code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import pickle
import logging
import random
import os
import os.path
import errno
from pandas import DataFrame
import sys
from timeit import default_timer as timer
from datetime import timedelta
import traceback
from statsmodels.distributions.empirical_distribution import ECDF
import ranking
from ranking import Ranking
from scipy.sparse import csr_matrix
import scipy.stats as stats
import scipy.optimize as opt
from sklearn.linear_model import LogisticRegression as LR
import cvxopt
"""
Some R-like functions to help port R code to python
"""
logger = logging.getLogger(__name__)
def set_seed(seed):
np.random.seed(seed)
random.seed(seed + 32767)
def matrix(d, nrow=None, ncol=None, byrow=False):
"""Returns the data as a 2-D matrix
A copy of the same matrix will be returned if input data dimensions are
same as output data dimensions. Else, a new matrix will be created
and returned.
Example:
d = np.reshape(range(12), (6, 2))
matrix(d[0:2, :], nrow=2, byrow=True)
Args:
d:
nrow:
ncol:
byrow:
Returns: np.ndarray
"""
if byrow:
# fill by row...in python 'C' fills by the last axis
# therefore, data gets populated one-row at a time
order = 'C'
else:
# fill by column...in python 'F' fills by the first axis
# therefore, data gets populated one-column at a time
order = 'F'
if len(d.shape) == 2:
d_rows, d_cols = d.shape
elif len(d.shape) == 1:
d_rows, d_cols = (1, d.shape[0])
else:
raise ValueError("Dimensions more than 2 are not supported")
if nrow is not None and ncol is None:
ncol = int(d_rows * d_cols / float(nrow))
elif ncol is not None and nrow is None:
nrow = int(d_rows * d_cols / float(ncol))
if len(d.shape) == 2 and d_rows == nrow and d_cols == ncol:
return d.copy()
if not d_rows * d_cols == nrow * ncol:
raise ValueError("input dimensions (%d, %d) not compatible with output dimensions (%d, %d)" %
(d_rows, d_cols, nrow, ncol))
if isinstance(d, csr_matrix):
return d.reshape((nrow, ncol), order=order)
else:
return np.reshape(d, (nrow, ncol), order=order)
# Ranks in decreasing order
def rank(x, ties_method="average"):
ox = np.argsort(-x)
sx = np.argsort(ox)
if ties_method == "average":
strategy = ranking.FRACTIONAL
else:
strategy = ranking.COMPETITION
r = Ranking(x[ox], strategy=strategy, start=1)
rnks = list(r.ranks())
return np.array(rnks)[sx]
def nrow(x):
if len(x.shape) == 2:
return x.shape[0]
return None
def ncol(x):
if len(x.shape) == 2:
return x.shape[1]
return None
def rbind(m1, m2):
if m1 is None:
return np.copy(m2)
return np.append(m1, m2, axis=0)
def cbind(m1, m2):
if len(m1.shape) == 1 and len(m2.shape) == 1:
if len(m1) == len(m2):
mat = np.empty(shape=(len(m1), 2))
mat[:, 0] = m1
mat[:, 1] = m2
return mat
else:
raise ValueError("length of arrays differ: (%d, %d)" % (len(m1), len(m2)))
return np.append(m1, m2, axis=1)
def sample(x, n):
shuffle = np.array(x)
np.random.shuffle(shuffle)
return shuffle[0:n]
def append(a1, a2):
if isinstance(a1, np.ndarray) and len(a1.shape) == 1:
return np.append(a1, a2)
a = a1[:]
if isinstance(a2, list):
a.extend(a2)
else:
a.append(a2)
return a
def rep(val, n, dtype=float):
return np.ones(n, dtype=dtype) * val
def quantile(x, q):
return np.percentile(x, q)
def difftime(endtime, starttime, units="secs"):
if units == "secs":
t = timedelta(seconds=endtime-starttime)
else:
raise ValueError("units '%s' not supported!" % (units,))
return t.seconds
def order(x, decreasing=False):
if decreasing:
return np.argsort(-x)
else:
return np.argsort(x)
def runif(n, min=0.0, max=1.0):
return stats.uniform.rvs(loc=min, scale=min+max, size=n)
def rnorm(n, mean=0.0, sd=1.0):
return stats.norm.rvs(loc=mean, scale=sd, size=n)
def pnorm(x, mean=0.0, sd=1.0):
return stats.norm.cdf(x, loc=mean, scale=sd)
def ecdf(x):
return ECDF(x)
def matrix_rank(x):
return np.linalg.matrix_rank(x)
class LogisticRegressionClassifier(object):
"""
see:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
"""
def __init__(self):
self.lr = None
@staticmethod
def fit(x, y):
classifier = LogisticRegressionClassifier()
classifier.lr = LR(penalty='l2', dual=False, tol=0.0001, C=1,
fit_intercept=True, intercept_scaling=1,
class_weight=None, random_state=None, solver='liblinear',
max_iter=100, multi_class='ovr', verbose=0)
classifier.lr.fit(x, y)
return classifier
def predict(self, x, type="response"):
if self.lr is None:
raise ValueError("classifier not initialized/trained...")
if type == "response":
y = self.lr.predict_proba(x)
else:
y = self.lr.predict(x)
return y
def predict_prob_for_class(self, x, cls):
if self.lr is None:
raise ValueError("classifier not initialized/trained...")
clsindex = np.where(self.lr.classes_ == cls)[0][0]
# logger.debug("class index: %d" % (clsindex,))
y = self.lr.predict_proba(x)[:, clsindex]
return y
def read_csv(file, header=None, sep=','):
"""Loads data from a CSV
Returns:
DataFrame
"""
if header is not None and header:
header = 0 # first row is header
data_df = DataFrame.from_csv(file, header=header, sep=sep, index_col=None)
#datamat = np.ndarray(shape=data_df.shape, dtype=float)
#datamat[:, :] = data_df.iloc[:, 0:data_df.shape[1]]
return data_df
def save(obj, filepath):
filehandler = open(filepath, 'w')
pickle.dump(obj, filehandler)
return obj
def load(filepath):
filehandler = open(filepath, 'r')
obj = pickle.load(filehandler)
return obj
def dir_create(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def exception_to_string(exc):
exc_type, exc_value, exc_traceback = exc
return (str(exc_type) + os.linesep + str(exc_value)
+ os.linesep + str(traceback.extract_tb(exc_traceback)))
def configure_logger(args):
global logger
logger_format = "%(levelname)s [%(asctime)s]: %(message)s"
logger_level = logging.DEBUG if args.debug else logging.ERROR
if args.log_file is not None and args.log_file != "":
# print "configuring logger to file %s" % (args.log_file,)
logging.basicConfig(filename=args.log_file,
level=logger_level, format=logger_format,
filemode='w') # use filemode='a' for APPEND
else:
logging.basicConfig(level=logger_level, format=logger_format)
logger = logging.getLogger("default")
class Timer(object):
def __init__(self):
self.start_time = timer()
self.end_time = None
def start(self):
self.start_time = timer()
self.end_time = None
def end(self):
self.end_time = timer()
def elapsed(self):
etime = self.end_time
if etime is None:
etime = timer()
return difftime(etime, self.start_time, units="secs")
def message(self, msg):
if self.end_time is None:
self.end_time = timer()
tdiff = self.elapsed()
return "%s %f sec(s)" % (msg, tdiff)
def constr_optim(theta, f, grad=None, ui=None, ci=None, a=None, b=None,
hessian=None, bounds=None, method="BFGS",
outer_iterations=500, debug=False, args=None):
"""solve non-linear constraint optimization with scipy.optimize
problems have the form:
minimize f_0(x)
s.t.
ui * x >= ci --> Note: this is opposite of cvxopt
a * x = b --> Supported
#f_k(x) <= 0, k=1..m --> Not supported
:param theta: np.array
initial values. Must be in the domain of f()
:param f: function that is being minimized
returns the function evaluation
:param grad: function
returns the first derivative
:param ui: np.ndarray
:param ci: np.array
:param a: np.ndarray
:param b: np.array
:param mu:
:param control:
:param method:
:param hessian:
:param outer_iterations:
:param outer_eps:
:param debug:
:param bounds:
:param args:
:return:
"""
x0 = np.array(theta)
# build the constraint set
cons = ()
if ui is not None:
for i in range(nrow(ui)):
# cons += ({'type': 'ineq', 'fun': lambda x: x.dot(u_) - c_},)
def fcons_ineq(x, i=i):
return x.dot(ui[i, :]) - ci[i]
cons += ({'type': 'ineq', 'fun': fcons_ineq},)
if a is not None:
for i in range(nrow(a)):
def fcons_eq(x, i=i):
return x.dot(a[i, :]) - b[i]
cons += ({'type': 'eq', 'fun': fcons_eq},)
res = opt.minimize(f, x0,
args=() if args is None else args,
method=method, jac=grad,
hess=hessian, hessp=None, bounds=bounds,
constraints=cons, tol=1e-6, callback=None,
#options={'gtol': 1e-6, 'maxiter': outer_iterations, 'disp': True}
options={'maxiter': outer_iterations, 'disp': debug}
)
if not res.success:
logger.debug("Optimization Failure:\nStatus: %d; Msg: %s" % (res.status, res.message))
return res.x, res.success
def get_box_constraints(n, bounds=None):
box_lims = np.empty(shape=(n, 2))
box_lims[:, 0] = -np.Inf
box_lims[:, 1] = np.Inf
if bounds is not None:
for i in range(n):
mn, mx = bounds[i]
mn = -np.Inf if mn is None else mn
mx = np.Inf if mx is None else mx
box_lims[i, 0] = mn
box_lims[i, 1] = mx
return box_lims
def get_kktsolver_no_equality_constraints(ui=None, fn=None, grad=None, hessian=None, debug=False):
""" Returns the kktsolver
:param ui: np.ndarray
ui = -G for CVXOPT
:param fn:
:param grad:
:param hessian:
:param debug:
:return:
"""
# Note that we negate ui because in other optimization
# APIs we follow the convention that G.x >= h whereas CVXOPT uses G.x <= h
G = cvxopt.matrix(-ui, ui.shape) if ui is not None else None
def kktsolver(x, z, W):
"""KKT solver for the specific case when there are no equality constraints
problem is:
minimize f(x)
s.t.
G.x <= h
where G = -ui
The KKT equations are solutions of:
[ H G_tilde' ] [ux] [bx]
[ ] [ ] = [ ]
[ G_tilde -W'W ] [uz] [bz]
G_tilde = [G']' = G (in case there are no non-linear constraints, like in AAD)
Simplifying:
[ H G' ] [ux] [bx]
[ ] [ ] = [ ]
[ G -W'W ] [uz] [bz]
Upon solution, the last component bz must be scaled, i.e.: bz := W.uz
To solve:
Let:
P = G'(W'W)^(-1)G
S = G'(W'W)^(-1)
Q = H + P
v = bx + S.bz
Q.ux = v
W.uz = (W')^(-1)(G.ux - bz)
"""
if debug:
logger.debug("Setup kkt solver")
logger.debug("W")
for key in W.keys():
logger.debug("key: %s" % (key,))
logger.debug(W[key])
H = hessian(x)
if debug:
logger.debug("diag H")
logger.debug(np.diag(H))
_H = cvxopt.spdiag(list(np.diag(H))) if H is not None else None
wdi = W["di"]
Wdi2 = cvxopt.spdiag(cvxopt.mul(wdi, wdi))
S = G.T * Wdi2
P = S * G
Q = _H + P
# now, do the cholesky decomposition of Q
cvxopt.lapack.potrf(Q)
if False and fn is not None:
logger.debug("At setup f(x) = %d" % (fn(np.array(list(x))),))
def f(x, y, z):
if False and fn is not None:
logger.debug("f(x) = %d" % (fn(np.array(list(x))),))
try:
# logger.debug("Compute x := S * z + x...")
cvxopt.blas.gemv(S, z, x, alpha=1.0, beta=1.0) # x = S * z + x
cvxopt.lapack.potrs(Q, x)
except BaseException as e:
logger.debug(exception_to_string(sys.exc_info()))
raise e
cvxopt.blas.gemv(G, x, z, alpha=1.0, beta=-1.0) # z = _G * x - z
z[:] = cvxopt.mul(wdi, z) # scaled z
# raise NotImplementedError("Method Not implemented yet")
return f
return kktsolver
def cvx_optim(theta, f, grad=None, ui=None, ci=None, a=None, b=None,
hessian=None, bounds=None, method="BFGS", kktsolver=None,
outer_iterations=100, debug=False, args=None):
"""Uses CVXOPT library for optimization
The general form of the optimization problem is:
minimize f(x)
s.t
ui * x >= ci <- This is different from the CXOPT API. We will be switching the sign before calling CVXOPT
a * x = b
# f_k(x) <= 0, k=1,...,m <-- not supported
:param theta: numpy.array
initial values
:param f: function
:param grad: function
:param ui: numpy.ndarray
:param ci: numpy.array
:param a: numpy.ndarray
:param b: numpy.array
:param method: string
:param kktsolver: string
:param hessian: function
:param outer_iterations:
:param debug: boolean
:param bounds: not supported
:param args:
:return:
"""
n = len(theta)
x0 = cvxopt.matrix(theta, (n, 1))
box_lims = get_box_constraints(n, bounds)
# logger.debug(box_lims)
def F(x=None, z=None):
if x is None:
return 0, x0
if bounds is not None:
for j in range(n):
v = x[j, 0]
if v < box_lims[j, 0] or v > box_lims[j, 1]:
return None
# convert the variable to numpy that will be understood by the caller.
m = x.size[0]
xx = np.array([x[j, 0] for j in range(m)])
Df = cvxopt.matrix(grad(xx), (1, n))
if z is None:
return f(xx), Df
if True:
H = z[0] * cvxopt.matrix(hessian(xx))
else:
# *ONLY* in case we are *sure* that the hessian is diagonal
_H = np.diag(hessian(xx))
H = z[0] * cvxopt.spdiag(list(_H))
return f(xx), Df, H
A = None
bx = None
G = None
h = None
if a is not None:
A = cvxopt.matrix(a, a.shape)
bx = cvxopt.matrix(b, (len(b), 1))
if ui is not None:
G = -cvxopt.matrix(ui, ui.shape)
h = -cvxopt.matrix(ci, (len(ci), 1))
if False:
logger.debug("A: %s" % ("" if b is None else str(A.size),))
# logger.debug(A)
logger.debug("b: %s" % ("" if b is None else str(b.size),))
# logger.debug(bx)
logger.debug("G: %s" % str(G.size))
logger.debug(G)
logger.debug("h: %s" % str(h.size))
logger.debug(h)
# cvxopt.solvers.options['show_progress'] = False
options = {'show_progress': False, 'maxiters': outer_iterations}
soln = cvxopt.solvers.cp(F, G=G, h=h, A=A, b=bx, options=options, kktsolver=kktsolver)
# logger.debug(soln.keys())
# logger.debug(soln['status'])
sx = soln['x']
x = np.array([sx[i, 0] for i in range(n)])
success = soln['status'] == 'optimal'
# if False and not success:
if False:
logger.debug("A:")
logger.debug(A)
logger.debug("b:")
logger.debug(bx)
logger.debug("G:")
logger.debug(G)
logger.debug("h:")
logger.debug(h)
fx, Dfx, hessx = F(sx, [1])
logger.debug("f(x)")
logger.debug(fx)
logger.debug("g(x)")
logger.debug(Dfx)
logger.debug("hessian(x)")
logger.debug(np.array(hessx))
logger.debug(soln.keys())
for key in soln.keys():
logger.debug("key: %s" % (key,))
logger.debug(soln[key])
return x, success
| [
"logging.getLogger",
"numpy.linalg.matrix_rank",
"cvxopt.blas.gemv",
"scipy.stats.norm.rvs",
"numpy.argsort",
"numpy.array",
"sys.exc_info",
"cvxopt.mul",
"scipy.stats.norm.cdf",
"scipy.stats.uniform.rvs",
"datetime.timedelta",
"numpy.reshape",
"numpy.where",
"pandas.DataFrame.from_csv",
... | [((581, 608), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (598, 608), False, 'import logging\n'), ((635, 655), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (649, 655), True, 'import numpy as np\n'), ((660, 685), 'random.seed', 'random.seed', (['(seed + 32767)'], {}), '(seed + 32767)\n', (671, 685), False, 'import random\n'), ((2363, 2377), 'numpy.argsort', 'np.argsort', (['(-x)'], {}), '(-x)\n', (2373, 2377), True, 'import numpy as np\n'), ((2387, 2401), 'numpy.argsort', 'np.argsort', (['ox'], {}), '(ox)\n', (2397, 2401), True, 'import numpy as np\n'), ((2530, 2572), 'ranking.Ranking', 'Ranking', (['x[ox]'], {'strategy': 'strategy', 'start': '(1)'}), '(x[ox], strategy=strategy, start=1)\n', (2537, 2572), False, 'from ranking import Ranking\n'), ((2874, 2899), 'numpy.append', 'np.append', (['m1', 'm2'], {'axis': '(0)'}), '(m1, m2, axis=0)\n', (2883, 2899), True, 'import numpy as np\n'), ((3238, 3263), 'numpy.append', 'np.append', (['m1', 'm2'], {'axis': '(1)'}), '(m1, m2, axis=1)\n', (3247, 3263), True, 'import numpy as np\n'), ((3298, 3309), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3306, 3309), True, 'import numpy as np\n'), ((3314, 3340), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle'], {}), '(shuffle)\n', (3331, 3340), True, 'import numpy as np\n'), ((3692, 3711), 'numpy.percentile', 'np.percentile', (['x', 'q'], {}), '(x, q)\n', (3705, 3711), True, 'import numpy as np\n'), ((4098, 4149), 'scipy.stats.uniform.rvs', 'stats.uniform.rvs', ([], {'loc': 'min', 'scale': '(min + max)', 'size': 'n'}), '(loc=min, scale=min + max, size=n)\n', (4115, 4149), True, 'import scipy.stats as stats\n'), ((4193, 4235), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'loc': 'mean', 'scale': 'sd', 'size': 'n'}), '(loc=mean, scale=sd, size=n)\n', (4207, 4235), True, 'import scipy.stats as stats\n'), ((4281, 4318), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['x'], {'loc': 'mean', 'scale': 'sd'}), '(x, loc=mean, scale=sd)\n', (4295, 4318), True, 'import scipy.stats as stats\n'), ((4345, 4352), 'statsmodels.distributions.empirical_distribution.ECDF', 'ECDF', (['x'], {}), '(x)\n', (4349, 4352), False, 'from statsmodels.distributions.empirical_distribution import ECDF\n'), ((4386, 4410), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['x'], {}), '(x)\n', (4407, 4410), True, 'import numpy as np\n'), ((5888, 5952), 'pandas.DataFrame.from_csv', 'DataFrame.from_csv', (['file'], {'header': 'header', 'sep': 'sep', 'index_col': 'None'}), '(file, header=header, sep=sep, index_col=None)\n', (5906, 5952), False, 'from pandas import DataFrame\n'), ((6160, 6189), 'pickle.dump', 'pickle.dump', (['obj', 'filehandler'], {}), '(obj, filehandler)\n', (6171, 6189), False, 'import pickle\n'), ((6275, 6299), 'pickle.load', 'pickle.load', (['filehandler'], {}), '(filehandler)\n', (6286, 6299), False, 'import pickle\n'), ((7260, 7288), 'logging.getLogger', 'logging.getLogger', (['"""default"""'], {}), "('default')\n", (7277, 7288), False, 'import logging\n'), ((8942, 8957), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (8950, 8957), True, 'import numpy as np\n'), ((9476, 9704), 'scipy.optimize.minimize', 'opt.minimize', (['f', 'x0'], {'args': '(() if args is None else args)', 'method': 'method', 'jac': 'grad', 'hess': 'hessian', 'hessp': 'None', 'bounds': 'bounds', 'constraints': 'cons', 'tol': '(1e-06)', 'callback': 'None', 'options': "{'maxiter': outer_iterations, 'disp': debug}"}), "(f, x0, args=() if args is None else args, method=method, jac=\n grad, hess=hessian, hessp=None, bounds=bounds, constraints=cons, tol=\n 1e-06, callback=None, options={'maxiter': outer_iterations, 'disp': debug})\n", (9488, 9704), True, 'import scipy.optimize as opt\n'), ((10130, 10152), 'numpy.empty', 'np.empty', ([], {'shape': '(n, 2)'}), '(shape=(n, 2))\n', (10138, 10152), True, 'import numpy as np\n'), ((14340, 14368), 'cvxopt.matrix', 'cvxopt.matrix', (['theta', '(n, 1)'], {}), '(theta, (n, 1))\n', (14353, 14368), False, 'import cvxopt\n'), ((15960, 16039), 'cvxopt.solvers.cp', 'cvxopt.solvers.cp', (['F'], {'G': 'G', 'h': 'h', 'A': 'A', 'b': 'bx', 'options': 'options', 'kktsolver': 'kktsolver'}), '(F, G=G, h=h, A=A, b=bx, options=options, kktsolver=kktsolver)\n', (15977, 16039), False, 'import cvxopt\n'), ((2247, 2287), 'numpy.reshape', 'np.reshape', (['d', '(nrow, ncol)'], {'order': 'order'}), '(d, (nrow, ncol), order=order)\n', (2257, 2287), True, 'import numpy as np\n'), ((2611, 2625), 'numpy.array', 'np.array', (['rnks'], {}), '(rnks)\n', (2619, 2625), True, 'import numpy as np\n'), ((2851, 2862), 'numpy.copy', 'np.copy', (['m2'], {}), '(m2)\n', (2858, 2862), True, 'import numpy as np\n'), ((3460, 3477), 'numpy.append', 'np.append', (['a1', 'a2'], {}), '(a1, a2)\n', (3469, 3477), True, 'import numpy as np\n'), ((3629, 3652), 'numpy.ones', 'np.ones', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (3636, 3652), True, 'import numpy as np\n'), ((3798, 3836), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(endtime - starttime)'}), '(seconds=endtime - starttime)\n', (3807, 3836), False, 'from datetime import timedelta\n'), ((3999, 4013), 'numpy.argsort', 'np.argsort', (['(-x)'], {}), '(-x)\n', (4009, 4013), True, 'import numpy as np\n'), ((4039, 4052), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (4049, 4052), True, 'import numpy as np\n'), ((4745, 4942), 'sklearn.linear_model.LogisticRegression', 'LR', ([], {'penalty': '"""l2"""', 'dual': '(False)', 'tol': '(0.0001)', 'C': '(1)', 'fit_intercept': '(True)', 'intercept_scaling': '(1)', 'class_weight': 'None', 'random_state': 'None', 'solver': '"""liblinear"""', 'max_iter': '(100)', 'multi_class': '"""ovr"""', 'verbose': '(0)'}), "(penalty='l2', dual=False, tol=0.0001, C=1, fit_intercept=True,\n intercept_scaling=1, class_weight=None, random_state=None, solver=\n 'liblinear', max_iter=100, multi_class='ovr', verbose=0)\n", (4747, 4942), True, 'from sklearn.linear_model import LogisticRegression as LR\n'), ((6356, 6373), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6367, 6373), False, 'import os\n'), ((6981, 7085), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'args.log_file', 'level': 'logger_level', 'format': 'logger_format', 'filemode': '"""w"""'}), "(filename=args.log_file, level=logger_level, format=\n logger_format, filemode='w')\n", (7000, 7085), False, 'import logging\n'), ((7185, 7246), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logger_level', 'format': 'logger_format'}), '(level=logger_level, format=logger_format)\n', (7204, 7246), False, 'import logging\n'), ((7362, 7369), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7367, 7369), True, 'from timeit import default_timer as timer\n'), ((7447, 7454), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7452, 7454), True, 'from timeit import default_timer as timer\n'), ((7528, 7535), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7533, 7535), True, 'from timeit import default_timer as timer\n'), ((10895, 10923), 'cvxopt.matrix', 'cvxopt.matrix', (['(-ui)', 'ui.shape'], {}), '(-ui, ui.shape)\n', (10908, 10923), False, 'import cvxopt\n'), ((12538, 12560), 'cvxopt.lapack.potrf', 'cvxopt.lapack.potrf', (['Q'], {}), '(Q)\n', (12557, 12560), False, 'import cvxopt\n'), ((15307, 15332), 'cvxopt.matrix', 'cvxopt.matrix', (['a', 'a.shape'], {}), '(a, a.shape)\n', (15320, 15332), False, 'import cvxopt\n'), ((6633, 6668), 'traceback.extract_tb', 'traceback.extract_tb', (['exc_traceback'], {}), '(exc_traceback)\n', (6653, 6668), False, 'import traceback\n'), ((7636, 7643), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7641, 7643), True, 'from timeit import default_timer as timer\n'), ((7797, 7804), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7802, 7804), True, 'from timeit import default_timer as timer\n'), ((12396, 12416), 'cvxopt.mul', 'cvxopt.mul', (['wdi', 'wdi'], {}), '(wdi, wdi)\n', (12406, 12416), False, 'import cvxopt\n'), ((13148, 13195), 'cvxopt.blas.gemv', 'cvxopt.blas.gemv', (['G', 'x', 'z'], {'alpha': '(1.0)', 'beta': '(-1.0)'}), '(G, x, z, alpha=1.0, beta=-1.0)\n', (13164, 13195), False, 'import cvxopt\n'), ((13233, 13251), 'cvxopt.mul', 'cvxopt.mul', (['wdi', 'z'], {}), '(wdi, z)\n', (13243, 13251), False, 'import cvxopt\n'), ((15412, 15439), 'cvxopt.matrix', 'cvxopt.matrix', (['ui', 'ui.shape'], {}), '(ui, ui.shape)\n', (15425, 15439), False, 'import cvxopt\n'), ((16668, 16683), 'numpy.array', 'np.array', (['hessx'], {}), '(hessx)\n', (16676, 16683), True, 'import numpy as np\n'), ((5517, 5550), 'numpy.where', 'np.where', (['(self.lr.classes_ == cls)'], {}), '(self.lr.classes_ == cls)\n', (5525, 5550), True, 'import numpy as np\n'), ((12260, 12270), 'numpy.diag', 'np.diag', (['H'], {}), '(H)\n', (12267, 12270), True, 'import numpy as np\n'), ((12901, 12947), 'cvxopt.blas.gemv', 'cvxopt.blas.gemv', (['S', 'z', 'x'], {'alpha': '(1.0)', 'beta': '(1.0)'}), '(S, z, x, alpha=1.0, beta=1.0)\n', (12917, 12947), False, 'import cvxopt\n'), ((12981, 13006), 'cvxopt.lapack.potrs', 'cvxopt.lapack.potrs', (['Q', 'x'], {}), '(Q, x)\n', (13000, 13006), False, 'import cvxopt\n'), ((12304, 12314), 'numpy.diag', 'np.diag', (['H'], {}), '(H)\n', (12311, 12314), True, 'import numpy as np\n'), ((13095, 13109), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (13107, 13109), False, 'import sys\n')] |
"""Test the minimum spanning tree function"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_
import numpy.testing as npt
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
def test_minimum_spanning_tree():
# Create a graph with two connected components.
graph = [[0,1,0,0,0],
[1,0,0,0,0],
[0,0,0,8,5],
[0,0,8,0,1],
[0,0,5,1,0]]
graph = np.asarray(graph)
# Create the expected spanning tree.
expected = [[0,1,0,0,0],
[0,0,0,0,0],
[0,0,0,0,5],
[0,0,0,0,1],
[0,0,0,0,0]]
expected = np.asarray(expected)
# Ensure minimum spanning tree code gives this expected output.
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
npt.assert_array_equal(mintree.todense(), expected,
'Incorrect spanning tree found.')
# Ensure that the original graph was not modified.
npt.assert_array_equal(csgraph.todense(), graph,
'Original graph was modified.')
# Now let the algorithm modify the csgraph in place.
mintree = minimum_spanning_tree(csgraph, overwrite=True)
npt.assert_array_equal(mintree.todense(), expected,
'Graph was not properly modified to contain MST.')
np.random.seed(1234)
for N in (5, 10, 15, 20):
# Create a random graph.
graph = 3 + np.random.random((N, N))
csgraph = csr_matrix(graph)
# The spanning tree has at most N - 1 edges.
mintree = minimum_spanning_tree(csgraph)
assert_(mintree.nnz < N)
# Set the sub diagonal to 1 to create a known spanning tree.
idx = np.arange(N-1)
graph[idx,idx+1] = 1
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
# We expect to see this pattern in the spanning tree and otherwise
# have this zero.
expected = np.zeros((N, N))
expected[idx, idx+1] = 1
npt.assert_array_equal(mintree.todense(), expected,
'Incorrect spanning tree found.')
| [
"numpy.random.random",
"numpy.asarray",
"numpy.testing.assert_",
"numpy.zeros",
"scipy.sparse.csgraph.minimum_spanning_tree",
"numpy.random.seed",
"scipy.sparse.csr_matrix",
"numpy.arange"
] | [((515, 532), 'numpy.asarray', 'np.asarray', (['graph'], {}), '(graph)\n', (525, 532), True, 'import numpy as np\n'), ((735, 755), 'numpy.asarray', 'np.asarray', (['expected'], {}), '(expected)\n', (745, 755), True, 'import numpy as np\n'), ((839, 856), 'scipy.sparse.csr_matrix', 'csr_matrix', (['graph'], {}), '(graph)\n', (849, 856), False, 'from scipy.sparse import csr_matrix\n'), ((871, 901), 'scipy.sparse.csgraph.minimum_spanning_tree', 'minimum_spanning_tree', (['csgraph'], {}), '(csgraph)\n', (892, 901), False, 'from scipy.sparse.csgraph import minimum_spanning_tree\n'), ((1221, 1267), 'scipy.sparse.csgraph.minimum_spanning_tree', 'minimum_spanning_tree', (['csgraph'], {'overwrite': '(True)'}), '(csgraph, overwrite=True)\n', (1242, 1267), False, 'from scipy.sparse.csgraph import minimum_spanning_tree\n'), ((1388, 1408), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (1402, 1408), True, 'import numpy as np\n'), ((1536, 1553), 'scipy.sparse.csr_matrix', 'csr_matrix', (['graph'], {}), '(graph)\n', (1546, 1553), False, 'from scipy.sparse import csr_matrix\n'), ((1626, 1656), 'scipy.sparse.csgraph.minimum_spanning_tree', 'minimum_spanning_tree', (['csgraph'], {}), '(csgraph)\n', (1647, 1656), False, 'from scipy.sparse.csgraph import minimum_spanning_tree\n'), ((1665, 1689), 'numpy.testing.assert_', 'assert_', (['(mintree.nnz < N)'], {}), '(mintree.nnz < N)\n', (1672, 1689), False, 'from numpy.testing import assert_\n'), ((1774, 1790), 'numpy.arange', 'np.arange', (['(N - 1)'], {}), '(N - 1)\n', (1783, 1790), True, 'import numpy as np\n'), ((1836, 1853), 'scipy.sparse.csr_matrix', 'csr_matrix', (['graph'], {}), '(graph)\n', (1846, 1853), False, 'from scipy.sparse import csr_matrix\n'), ((1872, 1902), 'scipy.sparse.csgraph.minimum_spanning_tree', 'minimum_spanning_tree', (['csgraph'], {}), '(csgraph)\n', (1893, 1902), False, 'from scipy.sparse.csgraph import minimum_spanning_tree\n'), ((2024, 2040), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (2032, 2040), True, 'import numpy as np\n'), ((1493, 1517), 'numpy.random.random', 'np.random.random', (['(N, N)'], {}), '((N, N))\n', (1509, 1517), True, 'import numpy as np\n')] |
import pandas as pd
from sklearn import preprocessing
import numpy as np
def compute_amino_props():
amino_props = pd.DataFrame.from_dict({
'A': [1.28, 0.05, 1.00, 0.31, 6.11, 0.42, 0.23],
'G': [0.00, 0.00, 0.00, 0.00, 6.07, 0.13, 0.15],
'V': [3.67, 0.14, 3.00, 1.22, 6.02, 0.27, 0.49],
'L': [2.59, 0.19, 4.00, 1.70, 6.04, 0.39, 0.31],
'I': [4.19, 0.19, 4.00, 1.80, 6.04, 0.30, 0.45],
'F': [2.94, 0.29, 5.89, 1.79, 5.67, 0.30, 0.38],
'Y': [2.94, 0.30, 6.47, 0.96, 5.66, 0.25, 0.41],
'W': [3.21, 0.41, 8.08, 2.25, 5.94, 0.32, 0.42],
'T': [3.03, 0.11, 2.60, 0.26, 5.60, 0.21, 0.36],
'S': [1.31, 0.06, 1.60, -0.04, 5.70, 0.20, 0.28],
'R': [2.34, 0.29, 6.13, -1.01, 10.74, 0.36, 0.25],
'K': [1.89, 0.22, 4.77, -0.99, 9.99, 0.32, 0.27],
'H': [2.99, 0.23, 4.66, 0.13, 7.69, 0.27, 0.30],
'D': [1.60, 0.11, 2.78, -0.77, 2.95, 0.25, 0.20],
'E': [1.56, 0.15, 3.78, -0.64, 3.09, 0.42, 0.21],
'N': [1.60, 0.13, 2.95, -0.60, 6.52, 0.21, 0.22],
'Q': [1.56, 0.18, 3.95, -0.22, 5.65, 0.36, 0.25],
'M': [2.35, 0.22, 4.43, 1.23, 5.71, 0.38, 0.32],
'P': [2.67, 0.00, 2.72, 0.72, 6.80, 0.13, 0.34],
'C': [1.77, 0.13, 2.43, 1.54, 6.35, 0.17, 0.41],
'-': [0, 0, 0, 0, 0, 0, 0]
}, orient='index')
amino_props_np = amino_props.values
amino_props_np = preprocessing.StandardScaler().fit_transform(amino_props_np)
mean = amino_props_np.mean(axis = 0)
amino_props_df = pd.DataFrame(amino_props_np, index=amino_props.index)
amino_props_df.loc['X'] = mean
return amino_props_df # to get a value: amino_props.loc['C'].values
amino_props = compute_amino_props()
aminoacids = list(amino_props.index)
amino_to_index = { aa: i for (i, aa) in enumerate(aminoacids) }
aminoacids_len = len(aminoacids)
def amino_props_and_one_hot():
amino_props = compute_amino_props()
amino_props_np = amino_props.values
one_hot = np.eye(aminoacids_len)
props_and_one_hot = np.concatenate((amino_props_np, one_hot), axis = 1)
return pd.DataFrame(props_and_one_hot, index=amino_props.index)
| [
"numpy.eye",
"pandas.DataFrame.from_dict",
"sklearn.preprocessing.StandardScaler",
"numpy.concatenate",
"pandas.DataFrame"
] | [((119, 1214), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'A': [1.28, 0.05, 1.0, 0.31, 6.11, 0.42, 0.23], 'G': [0.0, 0.0, 0.0, 0.0, \n 6.07, 0.13, 0.15], 'V': [3.67, 0.14, 3.0, 1.22, 6.02, 0.27, 0.49], 'L':\n [2.59, 0.19, 4.0, 1.7, 6.04, 0.39, 0.31], 'I': [4.19, 0.19, 4.0, 1.8, \n 6.04, 0.3, 0.45], 'F': [2.94, 0.29, 5.89, 1.79, 5.67, 0.3, 0.38], 'Y':\n [2.94, 0.3, 6.47, 0.96, 5.66, 0.25, 0.41], 'W': [3.21, 0.41, 8.08, 2.25,\n 5.94, 0.32, 0.42], 'T': [3.03, 0.11, 2.6, 0.26, 5.6, 0.21, 0.36], 'S':\n [1.31, 0.06, 1.6, -0.04, 5.7, 0.2, 0.28], 'R': [2.34, 0.29, 6.13, -1.01,\n 10.74, 0.36, 0.25], 'K': [1.89, 0.22, 4.77, -0.99, 9.99, 0.32, 0.27],\n 'H': [2.99, 0.23, 4.66, 0.13, 7.69, 0.27, 0.3], 'D': [1.6, 0.11, 2.78, \n -0.77, 2.95, 0.25, 0.2], 'E': [1.56, 0.15, 3.78, -0.64, 3.09, 0.42, \n 0.21], 'N': [1.6, 0.13, 2.95, -0.6, 6.52, 0.21, 0.22], 'Q': [1.56, 0.18,\n 3.95, -0.22, 5.65, 0.36, 0.25], 'M': [2.35, 0.22, 4.43, 1.23, 5.71, \n 0.38, 0.32], 'P': [2.67, 0.0, 2.72, 0.72, 6.8, 0.13, 0.34], 'C': [1.77,\n 0.13, 2.43, 1.54, 6.35, 0.17, 0.41], '-': [0, 0, 0, 0, 0, 0, 0]}"], {'orient': '"""index"""'}), "({'A': [1.28, 0.05, 1.0, 0.31, 6.11, 0.42, 0.23], 'G':\n [0.0, 0.0, 0.0, 0.0, 6.07, 0.13, 0.15], 'V': [3.67, 0.14, 3.0, 1.22, \n 6.02, 0.27, 0.49], 'L': [2.59, 0.19, 4.0, 1.7, 6.04, 0.39, 0.31], 'I':\n [4.19, 0.19, 4.0, 1.8, 6.04, 0.3, 0.45], 'F': [2.94, 0.29, 5.89, 1.79, \n 5.67, 0.3, 0.38], 'Y': [2.94, 0.3, 6.47, 0.96, 5.66, 0.25, 0.41], 'W':\n [3.21, 0.41, 8.08, 2.25, 5.94, 0.32, 0.42], 'T': [3.03, 0.11, 2.6, 0.26,\n 5.6, 0.21, 0.36], 'S': [1.31, 0.06, 1.6, -0.04, 5.7, 0.2, 0.28], 'R': [\n 2.34, 0.29, 6.13, -1.01, 10.74, 0.36, 0.25], 'K': [1.89, 0.22, 4.77, -\n 0.99, 9.99, 0.32, 0.27], 'H': [2.99, 0.23, 4.66, 0.13, 7.69, 0.27, 0.3],\n 'D': [1.6, 0.11, 2.78, -0.77, 2.95, 0.25, 0.2], 'E': [1.56, 0.15, 3.78,\n -0.64, 3.09, 0.42, 0.21], 'N': [1.6, 0.13, 2.95, -0.6, 6.52, 0.21, 0.22\n ], 'Q': [1.56, 0.18, 3.95, -0.22, 5.65, 0.36, 0.25], 'M': [2.35, 0.22, \n 4.43, 1.23, 5.71, 0.38, 0.32], 'P': [2.67, 0.0, 2.72, 0.72, 6.8, 0.13, \n 0.34], 'C': [1.77, 0.13, 2.43, 1.54, 6.35, 0.17, 0.41], '-': [0, 0, 0, \n 0, 0, 0, 0]}, orient='index')\n", (141, 1214), True, 'import pandas as pd\n'), ((1534, 1587), 'pandas.DataFrame', 'pd.DataFrame', (['amino_props_np'], {'index': 'amino_props.index'}), '(amino_props_np, index=amino_props.index)\n', (1546, 1587), True, 'import pandas as pd\n'), ((1992, 2014), 'numpy.eye', 'np.eye', (['aminoacids_len'], {}), '(aminoacids_len)\n', (1998, 2014), True, 'import numpy as np\n'), ((2039, 2088), 'numpy.concatenate', 'np.concatenate', (['(amino_props_np, one_hot)'], {'axis': '(1)'}), '((amino_props_np, one_hot), axis=1)\n', (2053, 2088), True, 'import numpy as np\n'), ((2102, 2158), 'pandas.DataFrame', 'pd.DataFrame', (['props_and_one_hot'], {'index': 'amino_props.index'}), '(props_and_one_hot, index=amino_props.index)\n', (2114, 2158), True, 'import pandas as pd\n'), ((1411, 1441), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (1439, 1441), False, 'from sklearn import preprocessing\n')] |
#! /usr/bin/env python
# coding=utf-8
import numpy as np
import tensorflow as tf
import core.utils as utils
import core.common as common
import core.backbone as backbone
from core.config import cfg
import time
class YOLOV3(object):
"""Implement tensoflow yolov3 here"""
def __init__(self, input_data, trainable, input_data_clean, defog_A=None, IcA=None):
self.trainable = trainable
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_class = len(self.classes)
self.strides = np.array(cfg.YOLO.STRIDES)
self.anchors = utils.get_anchors(cfg.YOLO.ANCHORS)
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.iou_loss_thresh = cfg.YOLO.IOU_LOSS_THRESH
self.upsample_method = cfg.YOLO.UPSAMPLE_METHOD
self.isp_flag = cfg.YOLO.ISP_FLAG
try:
self.conv_lbbox, self.conv_mbbox, self.conv_sbbox, self.recovery_loss = \
self.__build_nework(input_data, self.isp_flag, input_data_clean, defog_A, IcA)
except:
raise NotImplementedError("Can not build up yolov3 network!")
with tf.variable_scope('pred_sbbox'):
self.pred_sbbox = self.decode(self.conv_sbbox, self.anchors[0], self.strides[0])
with tf.variable_scope('pred_mbbox'):
self.pred_mbbox = self.decode(self.conv_mbbox, self.anchors[1], self.strides[1])
with tf.variable_scope('pred_lbbox'):
self.pred_lbbox = self.decode(self.conv_lbbox, self.anchors[2], self.strides[2])
def __build_nework(self, input_data, isp_flag, input_data_clean, defog_A, IcA):
filtered_image_batch = input_data
self.filter_params = input_data
filter_imgs_series = []
if isp_flag:
# start_time = time.time()
with tf.variable_scope('extract_parameters_2'):
input_data = tf.image.resize_images(input_data, [256, 256], method=tf.image.ResizeMethod.BILINEAR)
filter_features = common.extract_parameters_2(input_data, cfg, self.trainable)
# filter_features = tf.random_normal([1, 15], 0.5, 0.1)
filters = cfg.filters
filters = [x(filtered_image_batch, cfg) for x in filters]
filter_parameters = []
for j, filter in enumerate(filters):
with tf.variable_scope('filter_%d' % j):
print(' creating filter:', j, 'name:', str(filter.__class__), 'abbr.',
filter.get_short_name())
print(' filter_features:', filter_features.shape)
filtered_image_batch, filter_parameter = filter.apply(
filtered_image_batch, filter_features, defog_A, IcA)
filter_parameters.append(filter_parameter)
filter_imgs_series.append(filtered_image_batch)
print(' output:', filtered_image_batch.shape)
self.filter_params = filter_parameters
# end_time = time.time()
# print('filters所用时间:', end_time - start_time)
# input_data_shape = tf.shape(input_data)
# batch_size = input_data_shape[0]
recovery_loss = tf.reduce_sum(tf.pow(filtered_image_batch - input_data_clean, 2.0))#/(2.0 * batch_size)
self.image_isped = filtered_image_batch
self.filter_imgs_series = filter_imgs_series
input_data = filtered_image_batch
route_1, route_2, input_data = backbone.darknet53(input_data, self.trainable)
input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv52')
input_data = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, 'conv53')
input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv54')
input_data = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, 'conv55')
input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv56')
conv_lobj_branch = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, name='conv_lobj_branch')
conv_lbbox = common.convolutional(conv_lobj_branch, (1, 1, 1024, 3*(self.num_class + 5)),
trainable=self.trainable, name='conv_lbbox', activate=False, bn=False)
input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv57')
input_data = common.upsample(input_data, name='upsample0', method=self.upsample_method)
with tf.variable_scope('route_1'):
input_data = tf.concat([input_data, route_2], axis=-1)
input_data = common.convolutional(input_data, (1, 1, 768, 256), self.trainable, 'conv58')
input_data = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, 'conv59')
input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv60')
input_data = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, 'conv61')
input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv62')
conv_mobj_branch = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, name='conv_mobj_branch' )
conv_mbbox = common.convolutional(conv_mobj_branch, (1, 1, 512, 3*(self.num_class + 5)),
trainable=self.trainable, name='conv_mbbox', activate=False, bn=False)
input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv63')
input_data = common.upsample(input_data, name='upsample1', method=self.upsample_method)
with tf.variable_scope('route_2'):
input_data = tf.concat([input_data, route_1], axis=-1)
input_data = common.convolutional(input_data, (1, 1, 384, 128), self.trainable, 'conv64')
input_data = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, 'conv65')
input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv66')
input_data = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, 'conv67')
input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv68')
conv_sobj_branch = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, name='conv_sobj_branch')
conv_sbbox = common.convolutional(conv_sobj_branch, (1, 1, 256, 3*(self.num_class + 5)),
trainable=self.trainable, name='conv_sbbox', activate=False, bn=False)
return conv_lbbox, conv_mbbox, conv_sbbox, recovery_loss
def decode(self, conv_output, anchors, stride):
"""
return tensor of shape [batch_size, output_size, output_size, anchor_per_scale, 5 + num_classes]
contains (x, y, w, h, score, probability)
"""
conv_shape = tf.shape(conv_output)
batch_size = conv_shape[0]
output_size = conv_shape[1]
anchor_per_scale = len(anchors)
conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, anchor_per_scale, 5 + self.num_class))
conv_raw_dxdy = conv_output[:, :, :, :, 0:2]
conv_raw_dwdh = conv_output[:, :, :, :, 2:4]
conv_raw_conf = conv_output[:, :, :, :, 4:5]
conv_raw_prob = conv_output[:, :, :, :, 5: ]
y = tf.tile(tf.range(output_size, dtype=tf.int32)[:, tf.newaxis], [1, output_size])
x = tf.tile(tf.range(output_size, dtype=tf.int32)[tf.newaxis, :], [output_size, 1])
xy_grid = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1)
xy_grid = tf.tile(xy_grid[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1, anchor_per_scale, 1])
xy_grid = tf.cast(xy_grid, tf.float32)
pred_xy = (tf.sigmoid(conv_raw_dxdy) + xy_grid) * stride
pred_wh = (tf.exp(conv_raw_dwdh) * anchors) * stride
pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)
pred_conf = tf.sigmoid(conv_raw_conf)
pred_prob = tf.sigmoid(conv_raw_prob)
return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)
def focal(self, target, actual, alpha=1, gamma=2):
focal_loss = alpha * tf.pow(tf.abs(target - actual), gamma)
return focal_loss
def bbox_giou(self, boxes1, boxes2):
boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
boxes1 = tf.concat([tf.minimum(boxes1[..., :2], boxes1[..., 2:]),
tf.maximum(boxes1[..., :2], boxes1[..., 2:])], axis=-1)
boxes2 = tf.concat([tf.minimum(boxes2[..., :2], boxes2[..., 2:]),
tf.maximum(boxes2[..., :2], boxes2[..., 2:])], axis=-1)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
iou = inter_area / union_area
enclose_left_up = tf.minimum(boxes1[..., :2], boxes2[..., :2])
enclose_right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:])
enclose = tf.maximum(enclose_right_down - enclose_left_up, 0.0)
enclose_area = enclose[..., 0] * enclose[..., 1]
giou = iou - 1.0 * (enclose_area - union_area) / enclose_area
return giou
def bbox_iou(self, boxes1, boxes2):
boxes1_area = boxes1[..., 2] * boxes1[..., 3]
boxes2_area = boxes2[..., 2] * boxes2[..., 3]
boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
iou = 1.0 * inter_area / union_area
return iou
def loss_layer(self, conv, pred, label, bboxes, anchors, stride):
conv_shape = tf.shape(conv)
batch_size = conv_shape[0]
output_size = conv_shape[1]
input_size = stride * output_size
conv = tf.reshape(conv, (batch_size, output_size, output_size,
self.anchor_per_scale, 5 + self.num_class))
conv_raw_conf = conv[:, :, :, :, 4:5]
conv_raw_prob = conv[:, :, :, :, 5:]
pred_xywh = pred[:, :, :, :, 0:4]
pred_conf = pred[:, :, :, :, 4:5]
label_xywh = label[:, :, :, :, 0:4]
respond_bbox = label[:, :, :, :, 4:5]
label_prob = label[:, :, :, :, 5:]
giou = tf.expand_dims(self.bbox_giou(pred_xywh, label_xywh), axis=-1)
input_size = tf.cast(input_size, tf.float32)
bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)
giou_loss = respond_bbox * bbox_loss_scale * (1- giou)
iou = self.bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])
max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)
respond_bgd = (1.0 - respond_bbox) * tf.cast( max_iou < self.iou_loss_thresh, tf.float32 )
conf_focal = self.focal(respond_bbox, pred_conf)
conf_loss = conf_focal * (
respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
+
respond_bgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
)
prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob)
giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1,2,3,4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1,2,3,4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1,2,3,4]))
return giou_loss, conf_loss, prob_loss
def compute_loss(self, label_sbbox, label_mbbox, label_lbbox, true_sbbox, true_mbbox, true_lbbox):
with tf.name_scope('smaller_box_loss'):
loss_sbbox = self.loss_layer(self.conv_sbbox, self.pred_sbbox, label_sbbox, true_sbbox,
anchors = self.anchors[0], stride = self.strides[0])
with tf.name_scope('medium_box_loss'):
loss_mbbox = self.loss_layer(self.conv_mbbox, self.pred_mbbox, label_mbbox, true_mbbox,
anchors = self.anchors[1], stride = self.strides[1])
with tf.name_scope('bigger_box_loss'):
loss_lbbox = self.loss_layer(self.conv_lbbox, self.pred_lbbox, label_lbbox, true_lbbox,
anchors = self.anchors[2], stride = self.strides[2])
with tf.name_scope('giou_loss'):
giou_loss = loss_sbbox[0] + loss_mbbox[0] + loss_lbbox[0]
with tf.name_scope('conf_loss'):
conf_loss = loss_sbbox[1] + loss_mbbox[1] + loss_lbbox[1]
with tf.name_scope('prob_loss'):
prob_loss = loss_sbbox[2] + loss_mbbox[2] + loss_lbbox[2]
with tf.name_scope('recovery_loss'):
recovery_loss = self.recovery_loss
return giou_loss, conf_loss, prob_loss, recovery_loss
| [
"core.utils.read_class_names",
"tensorflow.tile",
"tensorflow.image.resize_images",
"tensorflow.shape",
"tensorflow.reduce_sum",
"numpy.array",
"core.utils.get_anchors",
"tensorflow.cast",
"tensorflow.pow",
"tensorflow.concat",
"tensorflow.maximum",
"tensorflow.variable_scope",
"core.common.... | [((442, 482), 'core.utils.read_class_names', 'utils.read_class_names', (['cfg.YOLO.CLASSES'], {}), '(cfg.YOLO.CLASSES)\n', (464, 482), True, 'import core.utils as utils\n'), ((565, 591), 'numpy.array', 'np.array', (['cfg.YOLO.STRIDES'], {}), '(cfg.YOLO.STRIDES)\n', (573, 591), True, 'import numpy as np\n'), ((624, 659), 'core.utils.get_anchors', 'utils.get_anchors', (['cfg.YOLO.ANCHORS'], {}), '(cfg.YOLO.ANCHORS)\n', (641, 659), True, 'import core.utils as utils\n'), ((3535, 3581), 'core.backbone.darknet53', 'backbone.darknet53', (['input_data', 'self.trainable'], {}), '(input_data, self.trainable)\n', (3553, 3581), True, 'import core.backbone as backbone\n'), ((3604, 3681), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 1024, 512)', 'self.trainable', '"""conv52"""'], {}), "(input_data, (1, 1, 1024, 512), self.trainable, 'conv52')\n", (3624, 3681), True, 'import core.common as common\n'), ((3704, 3781), 'core.common.convolutional', 'common.convolutional', (['input_data', '(3, 3, 512, 1024)', 'self.trainable', '"""conv53"""'], {}), "(input_data, (3, 3, 512, 1024), self.trainable, 'conv53')\n", (3724, 3781), True, 'import core.common as common\n'), ((3804, 3881), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 1024, 512)', 'self.trainable', '"""conv54"""'], {}), "(input_data, (1, 1, 1024, 512), self.trainable, 'conv54')\n", (3824, 3881), True, 'import core.common as common\n'), ((3904, 3981), 'core.common.convolutional', 'common.convolutional', (['input_data', '(3, 3, 512, 1024)', 'self.trainable', '"""conv55"""'], {}), "(input_data, (3, 3, 512, 1024), self.trainable, 'conv55')\n", (3924, 3981), True, 'import core.common as common\n'), ((4004, 4081), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 1024, 512)', 'self.trainable', '"""conv56"""'], {}), "(input_data, (1, 1, 1024, 512), self.trainable, 'conv56')\n", (4024, 4081), True, 'import core.common as common\n'), ((4111, 4208), 'core.common.convolutional', 'common.convolutional', (['input_data', '(3, 3, 512, 1024)', 'self.trainable'], {'name': '"""conv_lobj_branch"""'}), "(input_data, (3, 3, 512, 1024), self.trainable, name=\n 'conv_lobj_branch')\n", (4131, 4208), True, 'import core.common as common\n'), ((4225, 4379), 'core.common.convolutional', 'common.convolutional', (['conv_lobj_branch', '(1, 1, 1024, 3 * (self.num_class + 5))'], {'trainable': 'self.trainable', 'name': '"""conv_lbbox"""', 'activate': '(False)', 'bn': '(False)'}), "(conv_lobj_branch, (1, 1, 1024, 3 * (self.num_class + 5\n )), trainable=self.trainable, name='conv_lbbox', activate=False, bn=False)\n", (4245, 4379), True, 'import core.common as common\n'), ((4437, 4513), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 512, 256)', 'self.trainable', '"""conv57"""'], {}), "(input_data, (1, 1, 512, 256), self.trainable, 'conv57')\n", (4457, 4513), True, 'import core.common as common\n'), ((4537, 4611), 'core.common.upsample', 'common.upsample', (['input_data'], {'name': '"""upsample0"""', 'method': 'self.upsample_method'}), "(input_data, name='upsample0', method=self.upsample_method)\n", (4552, 4611), True, 'import core.common as common\n'), ((4745, 4821), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 768, 256)', 'self.trainable', '"""conv58"""'], {}), "(input_data, (1, 1, 768, 256), self.trainable, 'conv58')\n", (4765, 4821), True, 'import core.common as common\n'), ((4843, 4919), 'core.common.convolutional', 'common.convolutional', (['input_data', '(3, 3, 256, 512)', 'self.trainable', '"""conv59"""'], {}), "(input_data, (3, 3, 256, 512), self.trainable, 'conv59')\n", (4863, 4919), True, 'import core.common as common\n'), ((4941, 5017), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 512, 256)', 'self.trainable', '"""conv60"""'], {}), "(input_data, (1, 1, 512, 256), self.trainable, 'conv60')\n", (4961, 5017), True, 'import core.common as common\n'), ((5039, 5115), 'core.common.convolutional', 'common.convolutional', (['input_data', '(3, 3, 256, 512)', 'self.trainable', '"""conv61"""'], {}), "(input_data, (3, 3, 256, 512), self.trainable, 'conv61')\n", (5059, 5115), True, 'import core.common as common\n'), ((5137, 5213), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 512, 256)', 'self.trainable', '"""conv62"""'], {}), "(input_data, (1, 1, 512, 256), self.trainable, 'conv62')\n", (5157, 5213), True, 'import core.common as common\n'), ((5242, 5338), 'core.common.convolutional', 'common.convolutional', (['input_data', '(3, 3, 256, 512)', 'self.trainable'], {'name': '"""conv_mobj_branch"""'}), "(input_data, (3, 3, 256, 512), self.trainable, name=\n 'conv_mobj_branch')\n", (5262, 5338), True, 'import core.common as common\n'), ((5357, 5510), 'core.common.convolutional', 'common.convolutional', (['conv_mobj_branch', '(1, 1, 512, 3 * (self.num_class + 5))'], {'trainable': 'self.trainable', 'name': '"""conv_mbbox"""', 'activate': '(False)', 'bn': '(False)'}), "(conv_mobj_branch, (1, 1, 512, 3 * (self.num_class + 5)\n ), trainable=self.trainable, name='conv_mbbox', activate=False, bn=False)\n", (5377, 5510), True, 'import core.common as common\n'), ((5568, 5644), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 256, 128)', 'self.trainable', '"""conv63"""'], {}), "(input_data, (1, 1, 256, 128), self.trainable, 'conv63')\n", (5588, 5644), True, 'import core.common as common\n'), ((5666, 5740), 'core.common.upsample', 'common.upsample', (['input_data'], {'name': '"""upsample1"""', 'method': 'self.upsample_method'}), "(input_data, name='upsample1', method=self.upsample_method)\n", (5681, 5740), True, 'import core.common as common\n'), ((5874, 5950), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 384, 128)', 'self.trainable', '"""conv64"""'], {}), "(input_data, (1, 1, 384, 128), self.trainable, 'conv64')\n", (5894, 5950), True, 'import core.common as common\n'), ((5972, 6048), 'core.common.convolutional', 'common.convolutional', (['input_data', '(3, 3, 128, 256)', 'self.trainable', '"""conv65"""'], {}), "(input_data, (3, 3, 128, 256), self.trainable, 'conv65')\n", (5992, 6048), True, 'import core.common as common\n'), ((6070, 6146), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 256, 128)', 'self.trainable', '"""conv66"""'], {}), "(input_data, (1, 1, 256, 128), self.trainable, 'conv66')\n", (6090, 6146), True, 'import core.common as common\n'), ((6168, 6244), 'core.common.convolutional', 'common.convolutional', (['input_data', '(3, 3, 128, 256)', 'self.trainable', '"""conv67"""'], {}), "(input_data, (3, 3, 128, 256), self.trainable, 'conv67')\n", (6188, 6244), True, 'import core.common as common\n'), ((6266, 6342), 'core.common.convolutional', 'common.convolutional', (['input_data', '(1, 1, 256, 128)', 'self.trainable', '"""conv68"""'], {}), "(input_data, (1, 1, 256, 128), self.trainable, 'conv68')\n", (6286, 6342), True, 'import core.common as common\n'), ((6371, 6467), 'core.common.convolutional', 'common.convolutional', (['input_data', '(3, 3, 128, 256)', 'self.trainable'], {'name': '"""conv_sobj_branch"""'}), "(input_data, (3, 3, 128, 256), self.trainable, name=\n 'conv_sobj_branch')\n", (6391, 6467), True, 'import core.common as common\n'), ((6484, 6637), 'core.common.convolutional', 'common.convolutional', (['conv_sobj_branch', '(1, 1, 256, 3 * (self.num_class + 5))'], {'trainable': 'self.trainable', 'name': '"""conv_sbbox"""', 'activate': '(False)', 'bn': '(False)'}), "(conv_sobj_branch, (1, 1, 256, 3 * (self.num_class + 5)\n ), trainable=self.trainable, name='conv_sbbox', activate=False, bn=False)\n", (6504, 6637), True, 'import core.common as common\n'), ((7006, 7027), 'tensorflow.shape', 'tf.shape', (['conv_output'], {}), '(conv_output)\n', (7014, 7027), True, 'import tensorflow as tf\n'), ((7173, 7278), 'tensorflow.reshape', 'tf.reshape', (['conv_output', '(batch_size, output_size, output_size, anchor_per_scale, 5 + self.num_class)'], {}), '(conv_output, (batch_size, output_size, output_size,\n anchor_per_scale, 5 + self.num_class))\n', (7183, 7278), True, 'import tensorflow as tf\n'), ((7692, 7754), 'tensorflow.concat', 'tf.concat', (['[x[:, :, tf.newaxis], y[:, :, tf.newaxis]]'], {'axis': '(-1)'}), '([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1)\n', (7701, 7754), True, 'import tensorflow as tf\n'), ((7773, 7867), 'tensorflow.tile', 'tf.tile', (['xy_grid[tf.newaxis, :, :, tf.newaxis, :]', '[batch_size, 1, 1, anchor_per_scale, 1]'], {}), '(xy_grid[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1,\n anchor_per_scale, 1])\n', (7780, 7867), True, 'import tensorflow as tf\n'), ((7882, 7910), 'tensorflow.cast', 'tf.cast', (['xy_grid', 'tf.float32'], {}), '(xy_grid, tf.float32)\n', (7889, 7910), True, 'import tensorflow as tf\n'), ((8058, 8096), 'tensorflow.concat', 'tf.concat', (['[pred_xy, pred_wh]'], {'axis': '(-1)'}), '([pred_xy, pred_wh], axis=-1)\n', (8067, 8096), True, 'import tensorflow as tf\n'), ((8118, 8143), 'tensorflow.sigmoid', 'tf.sigmoid', (['conv_raw_conf'], {}), '(conv_raw_conf)\n', (8128, 8143), True, 'import tensorflow as tf\n'), ((8164, 8189), 'tensorflow.sigmoid', 'tf.sigmoid', (['conv_raw_prob'], {}), '(conv_raw_prob)\n', (8174, 8189), True, 'import tensorflow as tf\n'), ((8206, 8259), 'tensorflow.concat', 'tf.concat', (['[pred_xywh, pred_conf, pred_prob]'], {'axis': '(-1)'}), '([pred_xywh, pred_conf, pred_prob], axis=-1)\n', (8215, 8259), True, 'import tensorflow as tf\n'), ((8470, 8577), 'tensorflow.concat', 'tf.concat', (['[boxes1[..., :2] - boxes1[..., 2:] * 0.5, boxes1[..., :2] + boxes1[..., 2:] *\n 0.5]'], {'axis': '(-1)'}), '([boxes1[..., :2] - boxes1[..., 2:] * 0.5, boxes1[..., :2] + \n boxes1[..., 2:] * 0.5], axis=-1)\n', (8479, 8577), True, 'import tensorflow as tf\n'), ((8618, 8725), 'tensorflow.concat', 'tf.concat', (['[boxes2[..., :2] - boxes2[..., 2:] * 0.5, boxes2[..., :2] + boxes2[..., 2:] *\n 0.5]'], {'axis': '(-1)'}), '([boxes2[..., :2] - boxes2[..., 2:] * 0.5, boxes2[..., :2] + \n boxes2[..., 2:] * 0.5], axis=-1)\n', (8627, 8725), True, 'import tensorflow as tf\n'), ((9270, 9314), 'tensorflow.maximum', 'tf.maximum', (['boxes1[..., :2]', 'boxes2[..., :2]'], {}), '(boxes1[..., :2], boxes2[..., :2])\n', (9280, 9314), True, 'import tensorflow as tf\n'), ((9336, 9380), 'tensorflow.minimum', 'tf.minimum', (['boxes1[..., 2:]', 'boxes2[..., 2:]'], {}), '(boxes1[..., 2:], boxes2[..., 2:])\n', (9346, 9380), True, 'import tensorflow as tf\n'), ((9406, 9443), 'tensorflow.maximum', 'tf.maximum', (['(right_down - left_up)', '(0.0)'], {}), '(right_down - left_up, 0.0)\n', (9416, 9443), True, 'import tensorflow as tf\n'), ((9636, 9680), 'tensorflow.minimum', 'tf.minimum', (['boxes1[..., :2]', 'boxes2[..., :2]'], {}), '(boxes1[..., :2], boxes2[..., :2])\n', (9646, 9680), True, 'import tensorflow as tf\n'), ((9710, 9754), 'tensorflow.maximum', 'tf.maximum', (['boxes1[..., 2:]', 'boxes2[..., 2:]'], {}), '(boxes1[..., 2:], boxes2[..., 2:])\n', (9720, 9754), True, 'import tensorflow as tf\n'), ((9773, 9826), 'tensorflow.maximum', 'tf.maximum', (['(enclose_right_down - enclose_left_up)', '(0.0)'], {}), '(enclose_right_down - enclose_left_up, 0.0)\n', (9783, 9826), True, 'import tensorflow as tf\n'), ((10143, 10250), 'tensorflow.concat', 'tf.concat', (['[boxes1[..., :2] - boxes1[..., 2:] * 0.5, boxes1[..., :2] + boxes1[..., 2:] *\n 0.5]'], {'axis': '(-1)'}), '([boxes1[..., :2] - boxes1[..., 2:] * 0.5, boxes1[..., :2] + \n boxes1[..., 2:] * 0.5], axis=-1)\n', (10152, 10250), True, 'import tensorflow as tf\n'), ((10291, 10398), 'tensorflow.concat', 'tf.concat', (['[boxes2[..., :2] - boxes2[..., 2:] * 0.5, boxes2[..., :2] + boxes2[..., 2:] *\n 0.5]'], {'axis': '(-1)'}), '([boxes2[..., :2] - boxes2[..., 2:] * 0.5, boxes2[..., :2] + \n boxes2[..., 2:] * 0.5], axis=-1)\n', (10300, 10398), True, 'import tensorflow as tf\n'), ((10441, 10485), 'tensorflow.maximum', 'tf.maximum', (['boxes1[..., :2]', 'boxes2[..., :2]'], {}), '(boxes1[..., :2], boxes2[..., :2])\n', (10451, 10485), True, 'import tensorflow as tf\n'), ((10507, 10551), 'tensorflow.minimum', 'tf.minimum', (['boxes1[..., 2:]', 'boxes2[..., 2:]'], {}), '(boxes1[..., 2:], boxes2[..., 2:])\n', (10517, 10551), True, 'import tensorflow as tf\n'), ((10577, 10614), 'tensorflow.maximum', 'tf.maximum', (['(right_down - left_up)', '(0.0)'], {}), '(right_down - left_up, 0.0)\n', (10587, 10614), True, 'import tensorflow as tf\n'), ((10900, 10914), 'tensorflow.shape', 'tf.shape', (['conv'], {}), '(conv)\n', (10908, 10914), True, 'import tensorflow as tf\n'), ((11045, 11149), 'tensorflow.reshape', 'tf.reshape', (['conv', '(batch_size, output_size, output_size, self.anchor_per_scale, 5 + self.\n num_class)'], {}), '(conv, (batch_size, output_size, output_size, self.\n anchor_per_scale, 5 + self.num_class))\n', (11055, 11149), True, 'import tensorflow as tf\n'), ((11603, 11634), 'tensorflow.cast', 'tf.cast', (['input_size', 'tf.float32'], {}), '(input_size, tf.float32)\n', (11610, 11634), True, 'import tensorflow as tf\n'), ((1174, 1205), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pred_sbbox"""'], {}), "('pred_sbbox')\n", (1191, 1205), True, 'import tensorflow as tf\n'), ((1314, 1345), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pred_mbbox"""'], {}), "('pred_mbbox')\n", (1331, 1345), True, 'import tensorflow as tf\n'), ((1454, 1485), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pred_lbbox"""'], {}), "('pred_lbbox')\n", (1471, 1485), True, 'import tensorflow as tf\n'), ((3279, 3331), 'tensorflow.pow', 'tf.pow', (['(filtered_image_batch - input_data_clean)', '(2.0)'], {}), '(filtered_image_batch - input_data_clean, 2.0)\n', (3285, 3331), True, 'import tensorflow as tf\n'), ((4626, 4654), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""route_1"""'], {}), "('route_1')\n", (4643, 4654), True, 'import tensorflow as tf\n'), ((4681, 4722), 'tensorflow.concat', 'tf.concat', (['[input_data, route_2]'], {'axis': '(-1)'}), '([input_data, route_2], axis=-1)\n', (4690, 4722), True, 'import tensorflow as tf\n'), ((5755, 5783), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""route_2"""'], {}), "('route_2')\n", (5772, 5783), True, 'import tensorflow as tf\n'), ((5810, 5851), 'tensorflow.concat', 'tf.concat', (['[input_data, route_1]'], {'axis': '(-1)'}), '([input_data, route_1], axis=-1)\n', (5819, 5851), True, 'import tensorflow as tf\n'), ((11968, 11995), 'tensorflow.reduce_max', 'tf.reduce_max', (['iou'], {'axis': '(-1)'}), '(iou, axis=-1)\n', (11981, 11995), True, 'import tensorflow as tf\n'), ((12052, 12103), 'tensorflow.cast', 'tf.cast', (['(max_iou < self.iou_loss_thresh)', 'tf.float32'], {}), '(max_iou < self.iou_loss_thresh, tf.float32)\n', (12059, 12103), True, 'import tensorflow as tf\n'), ((12491, 12576), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'label_prob', 'logits': 'conv_raw_prob'}), '(labels=label_prob, logits=conv_raw_prob\n )\n', (12530, 12576), True, 'import tensorflow as tf\n'), ((12608, 12651), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['giou_loss'], {'axis': '[1, 2, 3, 4]'}), '(giou_loss, axis=[1, 2, 3, 4])\n', (12621, 12651), True, 'import tensorflow as tf\n'), ((12685, 12728), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['conf_loss'], {'axis': '[1, 2, 3, 4]'}), '(conf_loss, axis=[1, 2, 3, 4])\n', (12698, 12728), True, 'import tensorflow as tf\n'), ((12762, 12805), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['prob_loss'], {'axis': '[1, 2, 3, 4]'}), '(prob_loss, axis=[1, 2, 3, 4])\n', (12775, 12805), True, 'import tensorflow as tf\n'), ((12972, 13005), 'tensorflow.name_scope', 'tf.name_scope', (['"""smaller_box_loss"""'], {}), "('smaller_box_loss')\n", (12985, 13005), True, 'import tensorflow as tf\n'), ((13215, 13247), 'tensorflow.name_scope', 'tf.name_scope', (['"""medium_box_loss"""'], {}), "('medium_box_loss')\n", (13228, 13247), True, 'import tensorflow as tf\n'), ((13457, 13489), 'tensorflow.name_scope', 'tf.name_scope', (['"""bigger_box_loss"""'], {}), "('bigger_box_loss')\n", (13470, 13489), True, 'import tensorflow as tf\n'), ((13699, 13725), 'tensorflow.name_scope', 'tf.name_scope', (['"""giou_loss"""'], {}), "('giou_loss')\n", (13712, 13725), True, 'import tensorflow as tf\n'), ((13811, 13837), 'tensorflow.name_scope', 'tf.name_scope', (['"""conf_loss"""'], {}), "('conf_loss')\n", (13824, 13837), True, 'import tensorflow as tf\n'), ((13923, 13949), 'tensorflow.name_scope', 'tf.name_scope', (['"""prob_loss"""'], {}), "('prob_loss')\n", (13936, 13949), True, 'import tensorflow as tf\n'), ((14035, 14065), 'tensorflow.name_scope', 'tf.name_scope', (['"""recovery_loss"""'], {}), "('recovery_loss')\n", (14048, 14065), True, 'import tensorflow as tf\n'), ((1858, 1899), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""extract_parameters_2"""'], {}), "('extract_parameters_2')\n", (1875, 1899), True, 'import tensorflow as tf\n'), ((1930, 2020), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['input_data', '[256, 256]'], {'method': 'tf.image.ResizeMethod.BILINEAR'}), '(input_data, [256, 256], method=tf.image.ResizeMethod\n .BILINEAR)\n', (1952, 2020), True, 'import tensorflow as tf\n'), ((2050, 2110), 'core.common.extract_parameters_2', 'common.extract_parameters_2', (['input_data', 'cfg', 'self.trainable'], {}), '(input_data, cfg, self.trainable)\n', (2077, 2110), True, 'import core.common as common\n'), ((7509, 7546), 'tensorflow.range', 'tf.range', (['output_size'], {'dtype': 'tf.int32'}), '(output_size, dtype=tf.int32)\n', (7517, 7546), True, 'import tensorflow as tf\n'), ((7601, 7638), 'tensorflow.range', 'tf.range', (['output_size'], {'dtype': 'tf.int32'}), '(output_size, dtype=tf.int32)\n', (7609, 7638), True, 'import tensorflow as tf\n'), ((7931, 7956), 'tensorflow.sigmoid', 'tf.sigmoid', (['conv_raw_dxdy'], {}), '(conv_raw_dxdy)\n', (7941, 7956), True, 'import tensorflow as tf\n'), ((7996, 8017), 'tensorflow.exp', 'tf.exp', (['conv_raw_dwdh'], {}), '(conv_raw_dwdh)\n', (8002, 8017), True, 'import tensorflow as tf\n'), ((8352, 8375), 'tensorflow.abs', 'tf.abs', (['(target - actual)'], {}), '(target - actual)\n', (8358, 8375), True, 'import tensorflow as tf\n'), ((8778, 8822), 'tensorflow.minimum', 'tf.minimum', (['boxes1[..., :2]', 'boxes1[..., 2:]'], {}), '(boxes1[..., :2], boxes1[..., 2:])\n', (8788, 8822), True, 'import tensorflow as tf\n'), ((8852, 8896), 'tensorflow.maximum', 'tf.maximum', (['boxes1[..., :2]', 'boxes1[..., 2:]'], {}), '(boxes1[..., :2], boxes1[..., 2:])\n', (8862, 8896), True, 'import tensorflow as tf\n'), ((8936, 8980), 'tensorflow.minimum', 'tf.minimum', (['boxes2[..., :2]', 'boxes2[..., 2:]'], {}), '(boxes2[..., :2], boxes2[..., 2:])\n', (8946, 8980), True, 'import tensorflow as tf\n'), ((9010, 9054), 'tensorflow.maximum', 'tf.maximum', (['boxes2[..., :2]', 'boxes2[..., 2:]'], {}), '(boxes2[..., :2], boxes2[..., 2:])\n', (9020, 9054), True, 'import tensorflow as tf\n'), ((2389, 2423), 'tensorflow.variable_scope', 'tf.variable_scope', (["('filter_%d' % j)"], {}), "('filter_%d' % j)\n", (2406, 2423), True, 'import tensorflow as tf\n'), ((12231, 12318), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'respond_bbox', 'logits': 'conv_raw_conf'}), '(labels=respond_bbox, logits=\n conv_raw_conf)\n', (12270, 12318), True, 'import tensorflow as tf\n'), ((12362, 12449), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'respond_bbox', 'logits': 'conv_raw_conf'}), '(labels=respond_bbox, logits=\n conv_raw_conf)\n', (12401, 12449), True, 'import tensorflow as tf\n')] |
"""
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import os
import sys
from contextlib import contextmanager
import numpy as np
import pytest
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model._theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model._theil_sen import _modified_weiszfeld_step
from sklearn.utils._testing import assert_almost_equal
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert np.abs(lstq.coef_ - w) > 0.9
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert np.abs(lstq.coef_ - w - c) > 0.5
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
# non-regression test for #18104
theil_sen.score(X, y)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert norm(lstq.coef_ - w) > 1.0
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert np.abs(bp - 1 + 1 / (np.sqrt(2))) < 1.e-6
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(max_subpopulation=-1, random_state=0)
with pytest.raises(ValueError):
theil_sen.fit(X, y)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(n_subsamples=1, random_state=0)
with pytest.raises(ValueError):
theil_sen.fit(X, y)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(n_subsamples=101, random_state=0)
with pytest.raises(ValueError):
theil_sen.fit(X, y)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0)
with pytest.raises(ValueError):
theil_sen.fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert norm(lstq.coef_ - w) > 1.0
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=2,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| [
"numpy.sqrt",
"sklearn.linear_model.TheilSenRegressor",
"numpy.array",
"numpy.random.RandomState",
"numpy.testing.assert_array_less",
"numpy.testing.assert_warns",
"numpy.testing.assert_array_almost_equal",
"sklearn.linear_model._theil_sen._breakdown_point",
"numpy.dot",
"numpy.testing.assert_arra... | [((1094, 1118), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (1115, 1118), True, 'import numpy as np\n'), ((1827, 1851), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (1848, 1851), True, 'import numpy as np\n'), ((1982, 2003), 'numpy.array', 'np.array', (['[5.0, 10.0]'], {}), '([5.0, 10.0])\n', (1990, 2003), True, 'import numpy as np\n'), ((2341, 2365), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (2362, 2365), True, 'import numpy as np\n'), ((2516, 2548), 'numpy.array', 'np.array', (['[5.0, 10.0, 42.0, 7.0]'], {}), '([5.0, 10.0, 42.0, 7.0])\n', (2524, 2548), True, 'import numpy as np\n'), ((2997, 3032), 'sklearn.linear_model._theil_sen._modified_weiszfeld_step', '_modified_weiszfeld_step', (['X', 'median'], {}), '(X, median)\n', (3021, 3032), False, 'from sklearn.linear_model._theil_sen import _modified_weiszfeld_step\n'), ((3037, 3077), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['new_y', 'median'], {}), '(new_y, median)\n', (3062, 3077), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((3145, 3175), 'sklearn.linear_model._theil_sen._modified_weiszfeld_step', '_modified_weiszfeld_step', (['X', 'y'], {}), '(X, y)\n', (3169, 3175), False, 'from sklearn.linear_model._theil_sen import _modified_weiszfeld_step\n'), ((3180, 3212), 'numpy.testing.assert_array_less', 'assert_array_less', (['median', 'new_y'], {}), '(median, new_y)\n', (3197, 3212), False, 'from numpy.testing import assert_array_equal, assert_array_less\n'), ((3217, 3244), 'numpy.testing.assert_array_less', 'assert_array_less', (['new_y', 'y'], {}), '(new_y, y)\n', (3234, 3244), False, 'from numpy.testing import assert_array_equal, assert_array_less\n'), ((3328, 3358), 'sklearn.linear_model._theil_sen._modified_weiszfeld_step', '_modified_weiszfeld_step', (['X', 'y'], {}), '(X, y)\n', (3352, 3358), False, 'from sklearn.linear_model._theil_sen import _modified_weiszfeld_step\n'), ((3363, 3395), 'numpy.testing.assert_array_less', 'assert_array_less', (['median', 'new_y'], {}), '(median, new_y)\n', (3380, 3395), False, 'from numpy.testing import assert_array_equal, assert_array_less\n'), ((3400, 3427), 'numpy.testing.assert_array_less', 'assert_array_less', (['new_y', 'y'], {}), '(new_y, y)\n', (3417, 3427), False, 'from numpy.testing import assert_array_equal, assert_array_less\n'), ((3545, 3575), 'sklearn.linear_model._theil_sen._modified_weiszfeld_step', '_modified_weiszfeld_step', (['X', 'y'], {}), '(X, y)\n', (3569, 3575), False, 'from sklearn.linear_model._theil_sen import _modified_weiszfeld_step\n'), ((3580, 3608), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['y', 'new_y'], {}), '(y, new_y)\n', (3598, 3608), False, 'from numpy.testing import assert_array_equal, assert_array_less\n'), ((3709, 3729), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (3717, 3729), True, 'import numpy as np\n'), ((3775, 3805), 'sklearn.linear_model._theil_sen._modified_weiszfeld_step', '_modified_weiszfeld_step', (['X', 'y'], {}), '(X, y)\n', (3799, 3805), False, 'from sklearn.linear_model._theil_sen import _modified_weiszfeld_step\n'), ((3881, 3915), 'sklearn.linear_model._theil_sen._modified_weiszfeld_step', '_modified_weiszfeld_step', (['X', 'new_y'], {}), '(X, new_y)\n', (3905, 3915), False, 'from sklearn.linear_model._theil_sen import _modified_weiszfeld_step\n'), ((4017, 4051), 'numpy.array', 'np.array', (['[0.21132505, 0.78867497]'], {}), '([0.21132505, 0.78867497])\n', (4025, 4051), True, 'import numpy as np\n'), ((4064, 4094), 'sklearn.linear_model._theil_sen._modified_weiszfeld_step', '_modified_weiszfeld_step', (['X', 'y'], {}), '(X, y)\n', (4088, 4094), False, 'from sklearn.linear_model._theil_sen import _modified_weiszfeld_step\n'), ((4099, 4134), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['new_y', 'y'], {}), '(new_y, y)\n', (4124, 4134), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((4249, 4267), 'sklearn.linear_model._theil_sen._spatial_median', '_spatial_median', (['X'], {}), '(X)\n', (4264, 4267), False, 'from sklearn.linear_model._theil_sen import _spatial_median, _breakdown_point\n'), ((4272, 4318), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['median', 'true_median'], {}), '(median, true_median)\n', (4297, 4318), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((4398, 4422), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (4419, 4422), True, 'import numpy as np\n'), ((4528, 4546), 'sklearn.linear_model._theil_sen._spatial_median', '_spatial_median', (['X'], {}), '(X)\n', (4543, 4546), False, 'from sklearn.linear_model._theil_sen import _spatial_median, _breakdown_point\n'), ((4551, 4590), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['median', 'true_median'], {}), '(median, true_median)\n', (4569, 4590), False, 'from numpy.testing import assert_array_equal, assert_array_less\n'), ((4696, 4739), 'sklearn.linear_model._theil_sen._spatial_median', '_spatial_median', (['X'], {'max_iter': '(100)', 'tol': '(1e-06)'}), '(X, max_iter=100, tol=1e-06)\n', (4711, 4739), False, 'from sklearn.linear_model._theil_sen import _spatial_median, _breakdown_point\n'), ((4934, 4974), 'scipy.optimize.fmin_bfgs', 'fmin_bfgs', (['cost_func', 'median'], {'disp': '(False)'}), '(cost_func, median, disp=False)\n', (4943, 4974), False, 'from scipy.optimize import fmin_bfgs\n'), ((4979, 5026), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['median', 'fermat_weber'], {}), '(median, fermat_weber)\n', (5004, 5026), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((5099, 5173), 'numpy.testing.assert_warns', 'assert_warns', (['ConvergenceWarning', '_spatial_median', 'X'], {'max_iter': '(30)', 'tol': '(0.0)'}), '(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.0)\n', (5111, 5173), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((5452, 5500), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.coef_', 'w', '(1)'], {}), '(theil_sen.coef_, w, 1)\n', (5477, 5500), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((5505, 5558), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.intercept_', 'c', '(1)'], {}), '(theil_sen.intercept_, c, 1)\n', (5530, 5558), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((5944, 5996), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.coef_', '(w + c)', '(1)'], {}), '(theil_sen.coef_, w + c, 1)\n', (5969, 5996), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((6001, 6047), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['theil_sen.intercept_', '(0.0)'], {}), '(theil_sen.intercept_, 0.0)\n', (6020, 6047), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((6445, 6493), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.coef_', 'w', '(1)'], {}), '(theil_sen.coef_, w, 1)\n', (6470, 6493), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((6498, 6551), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.intercept_', 'c', '(1)'], {}), '(theil_sen.intercept_, c, 1)\n', (6523, 6551), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((6596, 6630), 'sklearn.linear_model._theil_sen._breakdown_point', '_breakdown_point', (['(10000000000.0)', '(2)'], {}), '(10000000000.0, 2)\n', (6612, 6630), False, 'from sklearn.linear_model._theil_sen import _spatial_median, _breakdown_point\n'), ((6781, 6836), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'max_subpopulation': '(-1)', 'random_state': '(0)'}), '(max_subpopulation=-1, random_state=0)\n', (6798, 6836), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((7004, 7053), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'n_subsamples': '(1)', 'random_state': '(0)'}), '(n_subsamples=1, random_state=0)\n', (7021, 7053), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((7221, 7272), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'n_subsamples': '(101)', 'random_state': '(0)'}), '(n_subsamples=101, random_state=0)\n', (7238, 7272), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((7428, 7452), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (7449, 7452), True, 'import numpy as np\n'), ((7606, 7655), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'n_subsamples': '(9)', 'random_state': '(0)'}), '(n_subsamples=9, random_state=0)\n', (7623, 7655), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((7907, 7955), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.coef_', 'w', '(1)'], {}), '(theil_sen.coef_, w, 1)\n', (7932, 7955), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((7960, 8013), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.intercept_', 'c', '(1)'], {}), '(theil_sen.intercept_, c, 1)\n', (7985, 8013), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((8296, 8353), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.coef_', 'lstq.coef_', '(9)'], {}), '(theil_sen.coef_, lstq.coef_, 9)\n', (8321, 8353), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((9077, 9125), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.coef_', 'w', '(1)'], {}), '(theil_sen.coef_, w, 1)\n', (9102, 9125), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((9130, 9183), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.intercept_', 'c', '(1)'], {}), '(theil_sen.intercept_, c, 1)\n', (9155, 9183), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((9244, 9268), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (9265, 9268), True, 'import numpy as np\n'), ((9662, 9720), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['theil_sen.coef_', 'lstq.coef_', '(12)'], {}), '(theil_sen.coef_, lstq.coef_, 12)\n', (9687, 9720), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((9982, 10022), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_pred', 'y', '(12)'], {}), '(y_pred, y, 12)\n', (10007, 10022), False, 'from numpy.testing import assert_array_almost_equal, assert_warns\n'), ((3843, 3867), 'numpy.array', 'np.array', (['[1 / 3, 2 / 3]'], {}), '([1 / 3, 2 / 3])\n', (3851, 3867), True, 'import numpy as np\n'), ((3953, 3985), 'numpy.array', 'np.array', (['[0.2792408, 0.7207592]'], {}), '([0.2792408, 0.7207592])\n', (3961, 3985), True, 'import numpy as np\n'), ((4829, 4842), 'numpy.sum', 'np.sum', (['dists'], {}), '(dists)\n', (4835, 4842), True, 'import numpy as np\n'), ((5326, 5348), 'numpy.abs', 'np.abs', (['(lstq.coef_ - w)'], {}), '(lstq.coef_ - w)\n', (5332, 5348), True, 'import numpy as np\n'), ((5759, 5785), 'numpy.abs', 'np.abs', (['(lstq.coef_ - w - c)'], {}), '(lstq.coef_ - w - c)\n', (5765, 5785), True, 'import numpy as np\n'), ((6264, 6284), 'scipy.linalg.norm', 'norm', (['(lstq.coef_ - w)'], {}), '(lstq.coef_ - w)\n', (6268, 6284), False, 'from scipy.linalg import norm\n'), ((6847, 6872), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6860, 6872), False, 'import pytest\n'), ((7063, 7088), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7076, 7088), False, 'import pytest\n'), ((7282, 7307), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7295, 7307), False, 'import pytest\n'), ((7665, 7690), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7678, 7690), False, 'import pytest\n'), ((8852, 8872), 'scipy.linalg.norm', 'norm', (['(lstq.coef_ - w)'], {}), '(lstq.coef_ - w)\n', (8856, 8872), False, 'from scipy.linalg import norm\n'), ((2075, 2087), 'numpy.dot', 'np.dot', (['X', 'w'], {}), '(X, w)\n', (2081, 2087), True, 'import numpy as np\n'), ((2618, 2630), 'numpy.dot', 'np.dot', (['X', 'w'], {}), '(X, w)\n', (2624, 2630), True, 'import numpy as np\n'), ((2880, 2905), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (2888, 2905), True, 'import numpy as np\n'), ((3481, 3506), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (3489, 3506), True, 'import numpy as np\n'), ((3652, 3692), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0, 0.0, 1.0])\n', (3660, 3692), True, 'import numpy as np\n'), ((4175, 4200), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (4183, 4200), True, 'import numpy as np\n'), ((4631, 4671), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0, 0.0, 1.0])\n', (4639, 4671), True, 'import numpy as np\n'), ((5286, 5304), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5302, 5304), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((5404, 5437), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (5421, 5437), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((5700, 5737), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (5716, 5737), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((5841, 5895), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'fit_intercept': '(False)', 'random_state': '(0)'}), '(fit_intercept=False, random_state=0)\n', (5858, 5895), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((6224, 6242), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (6240, 6242), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((6340, 6399), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'max_subpopulation': '(1000.0)', 'random_state': '(0)'}), '(max_subpopulation=1000.0, random_state=0)\n', (6357, 6399), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((7802, 7858), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'max_subpopulation': '(250)', 'random_state': '(0)'}), '(max_subpopulation=250, random_state=0)\n', (7819, 7858), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((8093, 8151), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'n_subsamples': 'X.shape[0]', 'random_state': '(0)'}), '(n_subsamples=X.shape[0], random_state=0)\n', (8110, 8151), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((8207, 8225), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8223, 8225), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((8812, 8830), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8828, 8830), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((8928, 8997), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'n_jobs': '(2)', 'random_state': '(0)', 'max_subpopulation': '(2000.0)'}), '(n_jobs=2, random_state=0, max_subpopulation=2000.0)\n', (8945, 8997), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((9500, 9554), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'fit_intercept': '(False)', 'random_state': '(0)'}), '(fit_intercept=False, random_state=0)\n', (9517, 9554), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((9610, 9647), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (9626, 9647), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((9880, 9933), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'fit_intercept': '(True)', 'random_state': '(0)'}), '(fit_intercept=True, random_state=0)\n', (9897, 9933), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((4789, 4800), 'scipy.linalg.norm', 'norm', (['(x - y)'], {}), '(x - y)\n', (4793, 4800), False, 'from scipy.linalg import norm\n'), ((8495, 8542), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'verbose': '(True)', 'random_state': '(0)'}), '(verbose=True, random_state=0)\n', (8512, 8542), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((8561, 8630), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {'verbose': '(True)', 'max_subpopulation': '(10)', 'random_state': '(0)'}), '(verbose=True, max_subpopulation=10, random_state=0)\n', (8578, 8630), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((6654, 6664), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6661, 6664), True, 'import numpy as np\n')] |
import os
import string
import sys
from random import randint
from enum import Enum
import numpy as np
import pandas as pd
from PySide2.QtWidgets import *
from GridCal.Gui.SigmaAnalysis.gui import *
from GridCal.Engine.Simulations.result_types import ResultTypes
from GridCal.Engine.Simulations.SigmaAnalysis.sigma_analysis_driver import SigmaAnalysisResults
class PandasModel(QtCore.QAbstractTableModel):
"""
Class to populate a Qt table view with a pandas data frame
"""
def __init__(self, data, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._data = np.array(data.values)
self._cols = data.columns
self._index = data.index.values
self.r, self.c = np.shape(self._data)
self.isDate = False
if len(self._index) > 0:
if isinstance(self._index[0], np.datetime64):
self._index = pd.to_datetime(self._index)
self.isDate = True
self.formatter = lambda x: "%.2f" % x
def rowCount(self, parent=None):
return self.r
def columnCount(self, parent=None):
return self.c
def data(self, index, role=QtCore.Qt.DisplayRole):
if index.isValid():
if role == QtCore.Qt.DisplayRole:
# return self.formatter(self._data[index.row(), index.column()])
return str(self._data[index.row(), index.column()])
return None
def headerData(self, p_int, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self._cols[p_int]
elif orientation == QtCore.Qt.Vertical:
if self._index is None:
return p_int
else:
if self.isDate:
return self._index[p_int].strftime('%Y/%m/%d %H:%M.%S')
else:
return str(self._index[p_int])
return None
def get_list_model(iterable):
"""
get Qt list model from a simple iterable
:param iterable:
:return: List model
"""
list_model = QtGui.QStandardItemModel()
if iterable is not None:
for val in iterable:
# for the list model
item = QtGui.QStandardItem(val)
item.setEditable(False)
list_model.appendRow(item)
return list_model
class SigmaAnalysisGUI(QtWidgets.QMainWindow):
def __init__(self, parent=None, results: SigmaAnalysisResults = None, bus_names=None, use_native_dialogues=True):
"""
:param parent:
:param results:
"""
QtWidgets.QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle('HELM-Sigma analysis dialogue')
self.use_native_dialogues = use_native_dialogues
self.results = results
if results is not None:
ax = self.ui.plotwidget.get_axis()
fig = self.ui.plotwidget.get_figure()
self.results.plot(ax)
fig.tight_layout()
n = len(bus_names)
self.mdl = self.results.mdl(result_type=ResultTypes.SigmaPlusDistances,
indices=np.arange(n),
names=bus_names)
self.ui.tableView.setModel(self.mdl)
else:
self.mdl = None
self.ui.actionCopy_to_clipboard.triggered.connect(self.copy_to_clipboard)
self.ui.actionSave.triggered.connect(self.save)
def msg(self, text, title="Warning"):
"""
Message box
:param text: Text to display
:param title: Name of the window
"""
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(text)
# msg.setInformativeText("This is additional information")
msg.setWindowTitle(title)
# msg.setDetailedText("The details are as follows:")
msg.setStandardButtons(QMessageBox.Ok)
retval = msg.exec_()
def copy_to_clipboard(self):
"""
Copy data to clipboard
"""
if self.mdl is not None:
self.mdl.copy_to_clipboard()
def save(self):
"""
:return:
"""
if self.mdl is not None:
options = QFileDialog.Options()
if self.use_native_dialogues:
options |= QFileDialog.DontUseNativeDialog
file, filter = QFileDialog.getSaveFileName(self, "Export results", '',
filter="CSV (*.csv);;Excel files (*.xlsx)",
options=options)
if file != '':
if 'xlsx' in filter:
f = file
if not f.endswith('.xlsx'):
f += '.xlsx'
self.mdl.save_to_excel(f, mode='real')
print('Saved!')
if 'csv' in filter:
f = file
if not f.endswith('.csv'):
f += '.csv'
self.mdl.save_to_csv(f, mode='real')
print('Saved!')
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = SigmaAnalysisGUI()
window.resize(1.61 * 700.0, 600.0) # golden ratio
window.show()
sys.exit(app.exec_())
| [
"numpy.array",
"numpy.shape",
"pandas.to_datetime",
"numpy.arange"
] | [((610, 631), 'numpy.array', 'np.array', (['data.values'], {}), '(data.values)\n', (618, 631), True, 'import numpy as np\n'), ((731, 751), 'numpy.shape', 'np.shape', (['self._data'], {}), '(self._data)\n', (739, 751), True, 'import numpy as np\n'), ((902, 929), 'pandas.to_datetime', 'pd.to_datetime', (['self._index'], {}), '(self._index)\n', (916, 929), True, 'import pandas as pd\n'), ((3269, 3281), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3278, 3281), True, 'import numpy as np\n')] |
import numpy as np
from unittest import TestCase
from aspire.basis.fb_2d import FBBasis2D
import os.path
DATA_DIR = os.path.join(os.path.dirname(__file__), 'saved_test_data')
class FBBasis2DTestCase(TestCase):
def setUp(self):
self.basis = FBBasis2D((8, 8))
def tearDown(self):
pass
def testFBBasis2DIndices(self):
indices = self.basis.indices()
self.assertTrue(np.allclose(
indices['ells'],
[
0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 2., 2., 2., 2., 2., 2., 3., 3.,
3., 3., 4., 4., 4., 4., 5., 5., 5., 5., 6., 6., 7., 7., 8., 8.
]
))
self.assertTrue(np.allclose(
indices['ks'],
[
0., 1., 2., 3., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1.,
0., 1., 0., 1., 0., 1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0.
]
))
self.assertTrue(np.allclose(
indices['sgns'],
[
1., 1., 1., 1., 1., 1., 1., -1., -1., -1., 1., 1., 1., -1., -1., -1., 1., 1.,
-1., -1., 1., 1., -1., -1., 1., 1., -1., -1., 1., -1., 1., -1., 1., -1.
]
))
def testFBBasis2DNorms(self):
norms = self.basis.norms()
self.assertTrue(np.allclose(
norms,
[
3.68065992303471, 2.41241466684800, 1.92454669738088, 1.64809729313301, 2.01913617828263,
1.50455726188833, 1.25183461029289, 1.70284654929000, 1.36051054373844, 1.16529703804363,
1.49532071137207, 1.25039038364830, 1.34537533748304, 1.16245357319190, 1.23042467443861,
1.09002083501080, 1.13867113286781, 1.06324777330476, 0.999841586390824
]
))
def testFBBasis2DEvaluate(self):
coeffs = np.array(
[
1.07338590e-01, 1.23690941e-01, 6.44482039e-03, -5.40484306e-02,
-4.85304586e-02, 1.09852144e-02, 3.87838396e-02, 3.43796455e-02,
-6.43284705e-03, -2.86677145e-02, -1.42313328e-02, -2.25684091e-03,
-3.31840727e-02, -2.59706174e-03, -5.91919887e-04, -9.97433028e-03,
9.19123928e-04, 1.19891589e-03, 7.49154982e-03, 6.18865229e-03,
-8.13265715e-04, -1.30715655e-02, -1.44160603e-02, 2.90379956e-03,
2.37066082e-02, 4.88805735e-03, 1.47870707e-03, 7.63376018e-03,
-5.60619559e-03, 1.05165081e-02, 3.30510143e-03, -3.48652120e-03,
-4.23228797e-04, 1.40484061e-02
]
)
result = self.basis.evaluate(coeffs)
self.assertTrue(np.allclose(
result,
np.load(os.path.join(DATA_DIR, 'fbbasis_evaluation_8_8.npy'))
))
def testFBBasis2DEvaluate_t(self):
v = np.load(os.path.join(DATA_DIR, 'fbbasis_coefficients_8_8.npy'))
result = self.basis.evaluate_t(v)
self.assertTrue(np.allclose(
result,
[
0.10761825, 0.12291151, 0.00836345, -0.0619454, -0.0483326, 0.01053718,
0.03977641, 0.03420101, -0.0060131, -0.02970658, -0.0151334, -0.00017575,
-0.03987446, -0.00257069, -0.0006621, -0.00975174, 0.00108047, 0.00072022,
0.00753342, 0.00604493, 0.00024362, -0.01711248, -0.01387371, 0.00112805,
0.02407385, 0.00376325, 0.00081128, 0.00951368, -0.00557536, 0.01087579,
0.00255393, -0.00525156, -0.00839695, 0.00802198
]
))
def testFBBasis2DExpand(self):
v = np.load(os.path.join(DATA_DIR, 'fbbasis_coefficients_8_8.npy'))
result = self.basis.expand(v)
self.assertTrue(np.allclose(
result,
[
0.10733859, 0.12369094, 0.00644482, -0.05404843, -0.04853046, 0.01098521,
0.03878384, 0.03437965, -0.00643285, -0.02866771, -0.01423133, -0.00225684,
-0.03318407, -0.00259706, -0.00059192, -0.00997433, 0.00091912, 0.00119892,
0.00749155, 0.00618865, -0.00081327, -0.01307157, -0.01441606, 0.00290380,
0.02370661, 0.00488806, 0.00147871, 0.00763376, -0.00560620, 0.01051651,
0.00330510, -0.00348652, -0.00042323, 0.01404841
]
))
def testFBBasis2DExpand_t(self):
v = np.array(
[
0.10733859, 0.12369094, 0.00644482, -0.05404843, -0.04853046, 0.01098521,
0.03878384, 0.03437965, -0.00643285, -0.02866771, -0.01423133, -0.00225684,
-0.03318407, -0.00259706, -0.00059192, -0.00997433, 0.00091912, 0.00119892,
0.00749155, 0.00618865, -0.00081327, -0.01307157, -0.01441606, 0.00290380,
0.02370661, 0.00488806, 0.00147871, 0.00763376, -0.00560620, 0.01051651,
0.00330510, -0.00348652, -0.00042323, 0.01404841
]
)
result = self.basis.expand_t(v)
self.assertTrue(np.allclose(
result,
np.array(
[
[0.00000000, 0.00000000, 0.00000000, 0.00000000, -0.00000000, 0.00000000, 0.00000000, 0.00000000],
[0.00000000, 0.00000000, -0.00918277, -0.00432375, -0.01197145, -0.00617931, 0.01026610, 0.00000000],
[0.00000000, 0.00476273, -0.00726280, 0.00956327, 0.01926635, 0.03723446, 0.01192624, -0.00633172],
[0.00000000, -0.00299080, -0.00930410, 0.04758995, 0.03910046, 0.06438156, 0.00533650, -0.00611883],
[0.00000000, -0.00366876, -0.01245707, 0.08397084, 0.05411559, 0.06951585, 0.01889731, 0.00489068],
[0.00000000, -0.00068456, -0.03255324, 0.04313602, 0.05831975, 0.04459346, 0.00363614, 0.00692364],
[0.00000000, -0.00104448, -0.02563514, -0.04045771, -0.01424875, -0.01740960, -0.01906915, 0.00817263],
[0.00000000, 0.00000000, 0.00799977, -0.01398406, -0.01052898, -0.01299636, -0.01446617, 0.00000000]
]
)
)) | [
"numpy.array",
"numpy.allclose",
"aspire.basis.fb_2d.FBBasis2D"
] | [((256, 273), 'aspire.basis.fb_2d.FBBasis2D', 'FBBasis2D', (['(8, 8)'], {}), '((8, 8))\n', (265, 273), False, 'from aspire.basis.fb_2d import FBBasis2D\n'), ((1926, 2480), 'numpy.array', 'np.array', (['[0.10733859, 0.123690941, 0.00644482039, -0.0540484306, -0.0485304586, \n 0.0109852144, 0.0387838396, 0.0343796455, -0.00643284705, -0.0286677145,\n -0.0142313328, -0.00225684091, -0.0331840727, -0.00259706174, -\n 0.000591919887, -0.00997433028, 0.000919123928, 0.00119891589, \n 0.00749154982, 0.00618865229, -0.000813265715, -0.0130715655, -\n 0.0144160603, 0.00290379956, 0.0237066082, 0.00488805735, 0.00147870707,\n 0.00763376018, -0.00560619559, 0.0105165081, 0.00330510143, -\n 0.0034865212, -0.000423228797, 0.0140484061]'], {}), '([0.10733859, 0.123690941, 0.00644482039, -0.0540484306, -\n 0.0485304586, 0.0109852144, 0.0387838396, 0.0343796455, -0.00643284705,\n -0.0286677145, -0.0142313328, -0.00225684091, -0.0331840727, -\n 0.00259706174, -0.000591919887, -0.00997433028, 0.000919123928, \n 0.00119891589, 0.00749154982, 0.00618865229, -0.000813265715, -\n 0.0130715655, -0.0144160603, 0.00290379956, 0.0237066082, 0.00488805735,\n 0.00147870707, 0.00763376018, -0.00560619559, 0.0105165081, \n 0.00330510143, -0.0034865212, -0.000423228797, 0.0140484061])\n', (1934, 2480), True, 'import numpy as np\n'), ((4542, 5000), 'numpy.array', 'np.array', (['[0.10733859, 0.12369094, 0.00644482, -0.05404843, -0.04853046, 0.01098521, \n 0.03878384, 0.03437965, -0.00643285, -0.02866771, -0.01423133, -\n 0.00225684, -0.03318407, -0.00259706, -0.00059192, -0.00997433, \n 0.00091912, 0.00119892, 0.00749155, 0.00618865, -0.00081327, -\n 0.01307157, -0.01441606, 0.0029038, 0.02370661, 0.00488806, 0.00147871,\n 0.00763376, -0.0056062, 0.01051651, 0.0033051, -0.00348652, -0.00042323,\n 0.01404841]'], {}), '([0.10733859, 0.12369094, 0.00644482, -0.05404843, -0.04853046, \n 0.01098521, 0.03878384, 0.03437965, -0.00643285, -0.02866771, -\n 0.01423133, -0.00225684, -0.03318407, -0.00259706, -0.00059192, -\n 0.00997433, 0.00091912, 0.00119892, 0.00749155, 0.00618865, -0.00081327,\n -0.01307157, -0.01441606, 0.0029038, 0.02370661, 0.00488806, 0.00147871,\n 0.00763376, -0.0056062, 0.01051651, 0.0033051, -0.00348652, -0.00042323,\n 0.01404841])\n', (4550, 5000), True, 'import numpy as np\n'), ((413, 623), 'numpy.allclose', 'np.allclose', (["indices['ells']", '[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0,\n 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 6.0, \n 6.0, 7.0, 7.0, 8.0, 8.0]'], {}), "(indices['ells'], [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, \n 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, \n 4.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0])\n", (424, 623), True, 'import numpy as np\n'), ((717, 925), 'numpy.allclose', 'np.allclose', (["indices['ks']", '[0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0,\n 2.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), "(indices['ks'], [0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 0.0, 1.0, \n 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, \n 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n", (728, 925), True, 'import numpy as np\n'), ((1020, 1244), 'numpy.allclose', 'np.allclose', (["indices['sgns']", '[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, -1.0, \n -1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0,\n -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0]'], {}), "(indices['sgns'], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, -1.0,\n -1.0, 1.0, 1.0, 1.0, -1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, \n -1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0])\n", (1031, 1244), True, 'import numpy as np\n'), ((1394, 1768), 'numpy.allclose', 'np.allclose', (['norms', '[3.68065992303471, 2.412414666848, 1.92454669738088, 1.64809729313301, \n 2.01913617828263, 1.50455726188833, 1.25183461029289, 1.70284654929, \n 1.36051054373844, 1.16529703804363, 1.49532071137207, 1.2503903836483, \n 1.34537533748304, 1.1624535731919, 1.23042467443861, 1.0900208350108, \n 1.13867113286781, 1.06324777330476, 0.999841586390824]'], {}), '(norms, [3.68065992303471, 2.412414666848, 1.92454669738088, \n 1.64809729313301, 2.01913617828263, 1.50455726188833, 1.25183461029289,\n 1.70284654929, 1.36051054373844, 1.16529703804363, 1.49532071137207, \n 1.2503903836483, 1.34537533748304, 1.1624535731919, 1.23042467443861, \n 1.0900208350108, 1.13867113286781, 1.06324777330476, 0.999841586390824])\n', (1405, 1768), True, 'import numpy as np\n'), ((3094, 3561), 'numpy.allclose', 'np.allclose', (['result', '[0.10761825, 0.12291151, 0.00836345, -0.0619454, -0.0483326, 0.01053718, \n 0.03977641, 0.03420101, -0.0060131, -0.02970658, -0.0151334, -\n 0.00017575, -0.03987446, -0.00257069, -0.0006621, -0.00975174, \n 0.00108047, 0.00072022, 0.00753342, 0.00604493, 0.00024362, -0.01711248,\n -0.01387371, 0.00112805, 0.02407385, 0.00376325, 0.00081128, 0.00951368,\n -0.00557536, 0.01087579, 0.00255393, -0.00525156, -0.00839695, 0.00802198]'], {}), '(result, [0.10761825, 0.12291151, 0.00836345, -0.0619454, -\n 0.0483326, 0.01053718, 0.03977641, 0.03420101, -0.0060131, -0.02970658,\n -0.0151334, -0.00017575, -0.03987446, -0.00257069, -0.0006621, -\n 0.00975174, 0.00108047, 0.00072022, 0.00753342, 0.00604493, 0.00024362,\n -0.01711248, -0.01387371, 0.00112805, 0.02407385, 0.00376325, \n 0.00081128, 0.00951368, -0.00557536, 0.01087579, 0.00255393, -\n 0.00525156, -0.00839695, 0.00802198])\n', (3105, 3561), True, 'import numpy as np\n'), ((3878, 4349), 'numpy.allclose', 'np.allclose', (['result', '[0.10733859, 0.12369094, 0.00644482, -0.05404843, -0.04853046, 0.01098521, \n 0.03878384, 0.03437965, -0.00643285, -0.02866771, -0.01423133, -\n 0.00225684, -0.03318407, -0.00259706, -0.00059192, -0.00997433, \n 0.00091912, 0.00119892, 0.00749155, 0.00618865, -0.00081327, -\n 0.01307157, -0.01441606, 0.0029038, 0.02370661, 0.00488806, 0.00147871,\n 0.00763376, -0.0056062, 0.01051651, 0.0033051, -0.00348652, -0.00042323,\n 0.01404841]'], {}), '(result, [0.10733859, 0.12369094, 0.00644482, -0.05404843, -\n 0.04853046, 0.01098521, 0.03878384, 0.03437965, -0.00643285, -\n 0.02866771, -0.01423133, -0.00225684, -0.03318407, -0.00259706, -\n 0.00059192, -0.00997433, 0.00091912, 0.00119892, 0.00749155, 0.00618865,\n -0.00081327, -0.01307157, -0.01441606, 0.0029038, 0.02370661, \n 0.00488806, 0.00147871, 0.00763376, -0.0056062, 0.01051651, 0.0033051, \n -0.00348652, -0.00042323, 0.01404841])\n', (3889, 4349), True, 'import numpy as np\n'), ((5241, 5962), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.0], [0.0, 0.0, -0.00918277, -\n 0.00432375, -0.01197145, -0.00617931, 0.0102661, 0.0], [0.0, 0.00476273,\n -0.0072628, 0.00956327, 0.01926635, 0.03723446, 0.01192624, -0.00633172\n ], [0.0, -0.0029908, -0.0093041, 0.04758995, 0.03910046, 0.06438156, \n 0.0053365, -0.00611883], [0.0, -0.00366876, -0.01245707, 0.08397084, \n 0.05411559, 0.06951585, 0.01889731, 0.00489068], [0.0, -0.00068456, -\n 0.03255324, 0.04313602, 0.05831975, 0.04459346, 0.00363614, 0.00692364],\n [0.0, -0.00104448, -0.02563514, -0.04045771, -0.01424875, -0.0174096, -\n 0.01906915, 0.00817263], [0.0, 0.0, 0.00799977, -0.01398406, -\n 0.01052898, -0.01299636, -0.01446617, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.0], [0.0, 0.0, -0.00918277,\n -0.00432375, -0.01197145, -0.00617931, 0.0102661, 0.0], [0.0, \n 0.00476273, -0.0072628, 0.00956327, 0.01926635, 0.03723446, 0.01192624,\n -0.00633172], [0.0, -0.0029908, -0.0093041, 0.04758995, 0.03910046, \n 0.06438156, 0.0053365, -0.00611883], [0.0, -0.00366876, -0.01245707, \n 0.08397084, 0.05411559, 0.06951585, 0.01889731, 0.00489068], [0.0, -\n 0.00068456, -0.03255324, 0.04313602, 0.05831975, 0.04459346, 0.00363614,\n 0.00692364], [0.0, -0.00104448, -0.02563514, -0.04045771, -0.01424875, \n -0.0174096, -0.01906915, 0.00817263], [0.0, 0.0, 0.00799977, -\n 0.01398406, -0.01052898, -0.01299636, -0.01446617, 0.0]])\n', (5249, 5962), True, 'import numpy as np\n')] |
import operator
import pickle
import sys
from contextlib import suppress
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import xarray.ufuncs as xu
from xarray import DataArray, Dataset, Variable
from xarray.core import duck_array_ops
from xarray.core.pycompat import dask_version
from xarray.testing import assert_chunks_equal
from xarray.tests import mock
from ..core.duck_array_ops import lazy_array_equiv
from . import (
assert_allclose,
assert_array_equal,
assert_equal,
assert_frame_equal,
assert_identical,
raise_if_dask_computes,
requires_pint_0_15,
requires_scipy_or_netCDF4,
)
from .test_backends import create_tmp_file
dask = pytest.importorskip("dask")
da = pytest.importorskip("dask.array")
dd = pytest.importorskip("dask.dataframe")
ON_WINDOWS = sys.platform == "win32"
def test_raise_if_dask_computes():
data = da.from_array(np.random.RandomState(0).randn(4, 6), chunks=(2, 2))
with pytest.raises(RuntimeError, match=r"Too many computes"):
with raise_if_dask_computes():
data.compute()
class DaskTestCase:
def assertLazyAnd(self, expected, actual, test):
with dask.config.set(scheduler="synchronous"):
test(actual, expected)
if isinstance(actual, Dataset):
for k, v in actual.variables.items():
if k in actual.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
elif isinstance(actual, DataArray):
assert isinstance(actual.data, da.Array)
for k, v in actual.coords.items():
if k in actual.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
elif isinstance(actual, Variable):
assert isinstance(actual.data, da.Array)
else:
assert False
class TestVariable(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_identical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_allclose)
@pytest.fixture(autouse=True)
def setUp(self):
self.values = np.random.RandomState(0).randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_var = Variable(("x", "y"), self.values)
self.lazy_var = Variable(("x", "y"), self.data)
def test_basics(self):
v = self.lazy_var
assert self.data is v.data
assert self.data.chunks == v.chunks
assert_array_equal(self.values, v)
def test_copy(self):
self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy())
self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy(deep=True))
def test_chunk(self):
for chunks, expected in [
({}, ((2, 2), (2, 2, 2))),
(3, ((3, 1), (3, 3))),
({"x": 3, "y": 3}, ((3, 1), (3, 3))),
({"x": 3}, ((3, 1), (2, 2, 2))),
({"x": (3, 1)}, ((3, 1), (2, 2, 2))),
]:
rechunked = self.lazy_var.chunk(chunks)
assert rechunked.chunks == expected
self.assertLazyAndIdentical(self.eager_var, rechunked)
def test_indexing(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0], v[0])
self.assertLazyAndIdentical(u[:1], v[:1])
self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]])
@pytest.mark.skipif(dask_version < "2021.04.1", reason="Requires dask >= 2021.04.1")
@pytest.mark.parametrize(
"expected_data, index",
[
(da.array([99, 2, 3, 4]), 0),
(da.array([99, 99, 99, 4]), slice(2, None, -1)),
(da.array([99, 99, 3, 99]), [0, -1, 1]),
(da.array([99, 99, 99, 4]), np.arange(3)),
(da.array([1, 99, 99, 99]), [False, True, True, True]),
(da.array([1, 99, 99, 99]), np.arange(4) > 0),
(da.array([99, 99, 99, 99]), Variable(("x"), da.array([1, 2, 3, 4])) > 0),
],
)
def test_setitem_dask_array(self, expected_data, index):
arr = Variable(("x"), da.array([1, 2, 3, 4]))
expected = Variable(("x"), expected_data)
arr[index] = 99
assert_identical(arr, expected)
@pytest.mark.skipif(dask_version >= "2021.04.1", reason="Requires dask < 2021.04.1")
def test_setitem_dask_array_error(self):
with pytest.raises(TypeError, match=r"stored in a dask array"):
v = self.lazy_var
v[:1] = 0
def test_squeeze(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze())
def test_equals(self):
v = self.lazy_var
assert v.equals(v)
assert isinstance(v.data, da.Array)
assert v.identical(v)
assert isinstance(v.data, da.Array)
def test_transpose(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.T, v.T)
def test_shift(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2))
self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2))
assert v.data.chunks == v.shift(x=1).data.chunks
def test_roll(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2))
assert v.data.chunks == v.roll(x=1).data.chunks
def test_unary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(-u, -v)
self.assertLazyAndIdentical(abs(u), abs(v))
self.assertLazyAndIdentical(u.round(), v.round())
def test_binary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(2 * u, 2 * v)
self.assertLazyAndIdentical(u + u, v + v)
self.assertLazyAndIdentical(u[0] + u, v[0] + v)
def test_repr(self):
expected = dedent(
"""\
<xarray.Variable (x: 4, y: 6)>
{!r}""".format(
self.lazy_var.data
)
)
assert expected == repr(self.lazy_var)
def test_pickle(self):
# Test that pickling/unpickling does not convert the dask
# backend to numpy
a1 = Variable(["x"], build_dask_array("x"))
a1.compute()
assert not a1._in_memory
assert kernel_call_count == 1
a2 = pickle.loads(pickle.dumps(a1))
assert kernel_call_count == 1
assert_identical(a1, a2)
assert not a1._in_memory
assert not a2._in_memory
def test_reduce(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(u.std(), v.std())
with raise_if_dask_computes():
actual = v.argmax(dim="x")
self.assertLazyAndAllClose(u.argmax(dim="x"), actual)
with raise_if_dask_computes():
actual = v.argmin(dim="x")
self.assertLazyAndAllClose(u.argmin(dim="x"), actual)
self.assertLazyAndAllClose((u > 1).any(), (v > 1).any())
self.assertLazyAndAllClose((u < 1).all("x"), (v < 1).all("x"))
with pytest.raises(NotImplementedError, match=r"only works along an axis"):
v.median()
with pytest.raises(NotImplementedError, match=r"only works along an axis"):
v.median(v.dims)
with raise_if_dask_computes():
v.reduce(duck_array_ops.mean)
def test_missing_values(self):
values = np.array([0, 1, np.nan, 3])
data = da.from_array(values, chunks=(2,))
eager_var = Variable("x", values)
lazy_var = Variable("x", data)
self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var))
self.assertLazyAndIdentical(Variable("x", range(4)), lazy_var.fillna(2))
self.assertLazyAndIdentical(eager_var.count(), lazy_var.count())
def test_concat(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([u[0], v[1]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], u[1]], "x"))
self.assertLazyAndIdentical(
u[:3], Variable.concat([v[[0, 2]], v[[1]]], "x", positions=[[0, 2], [1]])
)
def test_missing_methods(self):
v = self.lazy_var
try:
v.argsort()
except NotImplementedError as err:
assert "dask" in str(err)
try:
v[0].item()
except NotImplementedError as err:
assert "dask" in str(err)
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_univariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_bivariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0))
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v))
def test_compute(self):
u = self.eager_var
v = self.lazy_var
assert dask.is_dask_collection(v)
(v2,) = dask.compute(v + 1)
assert not dask.is_dask_collection(v2)
assert ((u + 1).data == v2.data).all()
def test_persist(self):
u = self.eager_var
v = self.lazy_var + 1
(v2,) = dask.persist(v)
assert v is not v2
assert len(v2.__dask_graph__()) < len(v.__dask_graph__())
assert v2.__dask_keys__() == v.__dask_keys__()
assert dask.is_dask_collection(v)
assert dask.is_dask_collection(v2)
self.assertLazyAndAllClose(u + 1, v)
self.assertLazyAndAllClose(u + 1, v2)
@requires_pint_0_15(reason="Need __dask_tokenize__")
def test_tokenize_duck_dask_array(self):
import pint
unit_registry = pint.UnitRegistry()
q = unit_registry.Quantity(self.data, "meter")
variable = xr.Variable(("x", "y"), q)
token = dask.base.tokenize(variable)
post_op = variable + 5 * unit_registry.meter
assert dask.base.tokenize(variable) != dask.base.tokenize(post_op)
# Immutability check
assert dask.base.tokenize(variable) == token
class TestDataArrayAndDataset(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_identical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_allclose)
def assertLazyAndEqual(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_equal)
@pytest.fixture(autouse=True)
def setUp(self):
self.values = np.random.randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_array = DataArray(
self.values, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
self.lazy_array = DataArray(
self.data, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
def test_rechunk(self):
chunked = self.eager_array.chunk({"x": 2}).chunk({"y": 2})
assert chunked.chunks == ((2,) * 2, (2,) * 3)
self.assertLazyAndIdentical(self.lazy_array, chunked)
def test_new_chunk(self):
chunked = self.eager_array.chunk()
assert chunked.data.name.startswith("xarray-<this-array>")
def test_lazy_dataset(self):
lazy_ds = Dataset({"foo": (("x", "y"), self.data)})
assert isinstance(lazy_ds.foo.variable.data, da.Array)
def test_lazy_array(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(u, v)
self.assertLazyAndAllClose(-u, -v)
self.assertLazyAndAllClose(u.T, v.T)
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(1 + u, 1 + v)
actual = xr.concat([v[:2], v[2:]], "x")
self.assertLazyAndAllClose(u, actual)
def test_compute(self):
u = self.eager_array
v = self.lazy_array
assert dask.is_dask_collection(v)
(v2,) = dask.compute(v + 1)
assert not dask.is_dask_collection(v2)
assert ((u + 1).data == v2.data).all()
def test_persist(self):
u = self.eager_array
v = self.lazy_array + 1
(v2,) = dask.persist(v)
assert v is not v2
assert len(v2.__dask_graph__()) < len(v.__dask_graph__())
assert v2.__dask_keys__() == v.__dask_keys__()
assert dask.is_dask_collection(v)
assert dask.is_dask_collection(v2)
self.assertLazyAndAllClose(u + 1, v)
self.assertLazyAndAllClose(u + 1, v2)
def test_concat_loads_variables(self):
# Test that concat() computes not-in-memory variables at most once
# and loads them in the output, while leaving the input unaltered.
d1 = build_dask_array("d1")
c1 = build_dask_array("c1")
d2 = build_dask_array("d2")
c2 = build_dask_array("c2")
d3 = build_dask_array("d3")
c3 = build_dask_array("c3")
# Note: c is a non-index coord.
# Index coords are loaded by IndexVariable.__init__.
ds1 = Dataset(data_vars={"d": ("x", d1)}, coords={"c": ("x", c1)})
ds2 = Dataset(data_vars={"d": ("x", d2)}, coords={"c": ("x", c2)})
ds3 = Dataset(data_vars={"d": ("x", d3)}, coords={"c": ("x", c3)})
assert kernel_call_count == 0
out = xr.concat(
[ds1, ds2, ds3], dim="n", data_vars="different", coords="different"
)
# each kernel is computed exactly once
assert kernel_call_count == 6
# variables are loaded in the output
assert isinstance(out["d"].data, np.ndarray)
assert isinstance(out["c"].data, np.ndarray)
out = xr.concat([ds1, ds2, ds3], dim="n", data_vars="all", coords="all")
# no extra kernel calls
assert kernel_call_count == 6
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=["d"], coords=["c"])
# no extra kernel calls
assert kernel_call_count == 6
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=[], coords=[])
# variables are loaded once as we are validing that they're identical
assert kernel_call_count == 12
assert isinstance(out["d"].data, np.ndarray)
assert isinstance(out["c"].data, np.ndarray)
out = xr.concat(
[ds1, ds2, ds3],
dim="n",
data_vars="different",
coords="different",
compat="identical",
)
# compat=identical doesn't do any more kernel calls than compat=equals
assert kernel_call_count == 18
assert isinstance(out["d"].data, np.ndarray)
assert isinstance(out["c"].data, np.ndarray)
# When the test for different turns true halfway through,
# stop computing variables as it would not have any benefit
ds4 = Dataset(data_vars={"d": ("x", [2.0])}, coords={"c": ("x", [2.0])})
out = xr.concat(
[ds1, ds2, ds4, ds3], dim="n", data_vars="different", coords="different"
)
# the variables of ds1 and ds2 were computed, but those of ds3 didn't
assert kernel_call_count == 22
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
# the data of ds1 and ds2 was loaded into numpy and then
# concatenated to the data of ds3. Thus, only ds3 is computed now.
out.compute()
assert kernel_call_count == 24
# Finally, test that originals are unaltered
assert ds1["d"].data is d1
assert ds1["c"].data is c1
assert ds2["d"].data is d2
assert ds2["c"].data is c2
assert ds3["d"].data is d3
assert ds3["c"].data is c3
# now check that concat() is correctly using dask name equality to skip loads
out = xr.concat(
[ds1, ds1, ds1], dim="n", data_vars="different", coords="different"
)
assert kernel_call_count == 24
# variables are not loaded in the output
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat(
[ds1, ds1, ds1], dim="n", data_vars=[], coords=[], compat="identical"
)
assert kernel_call_count == 24
# variables are not loaded in the output
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat(
[ds1, ds2.compute(), ds3],
dim="n",
data_vars="all",
coords="different",
compat="identical",
)
# c1,c3 must be computed for comparison since c2 is numpy;
# d2 is computed too
assert kernel_call_count == 28
out = xr.concat(
[ds1, ds2.compute(), ds3],
dim="n",
data_vars="all",
coords="all",
compat="identical",
)
# no extra computes
assert kernel_call_count == 30
# Finally, test that originals are unaltered
assert ds1["d"].data is d1
assert ds1["c"].data is c1
assert ds2["d"].data is d2
assert ds2["c"].data is c2
assert ds3["d"].data is d3
assert ds3["c"].data is c3
def test_groupby(self):
u = self.eager_array
v = self.lazy_array
expected = u.groupby("x").mean(...)
with raise_if_dask_computes():
actual = v.groupby("x").mean(...)
self.assertLazyAndAllClose(expected, actual)
def test_rolling(self):
u = self.eager_array
v = self.lazy_array
expected = u.rolling(x=2).mean()
with raise_if_dask_computes():
actual = v.rolling(x=2).mean()
self.assertLazyAndAllClose(expected, actual)
def test_groupby_first(self):
u = self.eager_array
v = self.lazy_array
for coords in [u.coords, v.coords]:
coords["ab"] = ("x", ["a", "a", "b", "b"])
with pytest.raises(NotImplementedError, match=r"dask"):
v.groupby("ab").first()
expected = u.groupby("ab").first()
with raise_if_dask_computes():
actual = v.groupby("ab").first(skipna=False)
self.assertLazyAndAllClose(expected, actual)
def test_reindex(self):
u = self.eager_array.assign_coords(y=range(6))
v = self.lazy_array.assign_coords(y=range(6))
for kwargs in [
{"x": [2, 3, 4]},
{"x": [1, 100, 2, 101, 3]},
{"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]},
]:
expected = u.reindex(**kwargs)
actual = v.reindex(**kwargs)
self.assertLazyAndAllClose(expected, actual)
def test_to_dataset_roundtrip(self):
u = self.eager_array
v = self.lazy_array
expected = u.assign_coords(x=u["x"])
self.assertLazyAndEqual(expected, v.to_dataset("x").to_array("x"))
def test_merge(self):
def duplicate_and_merge(array):
return xr.merge([array, array.rename("bar")]).to_array()
expected = duplicate_and_merge(self.eager_array)
actual = duplicate_and_merge(self.lazy_array)
self.assertLazyAndEqual(expected, actual)
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_ufuncs(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
def test_where_dispatching(self):
a = np.arange(10)
b = a > 3
x = da.from_array(a, 5)
y = da.from_array(b, 5)
expected = DataArray(a).where(b)
self.assertLazyAndEqual(expected, DataArray(a).where(y))
self.assertLazyAndEqual(expected, DataArray(x).where(b))
self.assertLazyAndEqual(expected, DataArray(x).where(y))
def test_simultaneous_compute(self):
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
count = [0]
def counting_get(*args, **kwargs):
count[0] += 1
return dask.get(*args, **kwargs)
ds.load(scheduler=counting_get)
assert count[0] == 1
def test_stack(self):
data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4))
arr = DataArray(data, dims=("w", "x", "y"))
stacked = arr.stack(z=("x", "y"))
z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)], names=["x", "y"])
expected = DataArray(data.reshape(2, -1), {"z": z}, dims=["w", "z"])
assert stacked.data.chunks == expected.data.chunks
self.assertLazyAndEqual(expected, stacked)
def test_dot(self):
eager = self.eager_array.dot(self.eager_array[0])
lazy = self.lazy_array.dot(self.lazy_array[0])
self.assertLazyAndAllClose(eager, lazy)
def test_dataarray_repr(self):
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
expected = dedent(
"""\
<xarray.DataArray 'data' (x: 1)>
{!r}
Coordinates:
y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>
Dimensions without coordinates: x""".format(
data
)
)
assert expected == repr(a)
assert kernel_call_count == 0 # should not evaluate dask array
def test_dataset_repr(self):
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)})
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 1)
Coordinates:
y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>
Dimensions without coordinates: x
Data variables:
a (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>"""
)
assert expected == repr(ds)
assert kernel_call_count == 0 # should not evaluate dask array
def test_dataarray_pickle(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variable nor the non-index coords
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a1 = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
a1.compute()
assert not a1._in_memory
assert not a1.coords["y"]._in_memory
assert kernel_call_count == 2
a2 = pickle.loads(pickle.dumps(a1))
assert kernel_call_count == 2
assert_identical(a1, a2)
assert not a1._in_memory
assert not a2._in_memory
assert not a1.coords["y"]._in_memory
assert not a2.coords["y"]._in_memory
def test_dataset_pickle(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variables nor the non-index coords
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
ds1 = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)})
ds1.compute()
assert not ds1["a"]._in_memory
assert not ds1["y"]._in_memory
assert kernel_call_count == 2
ds2 = pickle.loads(pickle.dumps(ds1))
assert kernel_call_count == 2
assert_identical(ds1, ds2)
assert not ds1["a"]._in_memory
assert not ds2["a"]._in_memory
assert not ds1["y"]._in_memory
assert not ds2["y"]._in_memory
def test_dataarray_getattr(self):
# ipython/jupyter does a long list of getattr() calls to when trying to
# represent an object.
# Make sure we're not accidentally computing dask variables.
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
with suppress(AttributeError):
getattr(a, "NOTEXIST")
assert kernel_call_count == 0
def test_dataset_getattr(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variables nor the non-index coords
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)})
with suppress(AttributeError):
getattr(ds, "NOTEXIST")
assert kernel_call_count == 0
def test_values(self):
# Test that invoking the values property does not convert the dask
# backend to numpy
a = DataArray([1, 2]).chunk()
assert not a._in_memory
assert a.values.tolist() == [1, 2]
assert not a._in_memory
def test_from_dask_variable(self):
# Test array creation from Variable with dask backend.
# This is used e.g. in broadcast()
a = DataArray(self.lazy_array.variable, coords={"x": range(4)}, name="foo")
self.assertLazyAndIdentical(self.lazy_array, a)
@requires_pint_0_15(reason="Need __dask_tokenize__")
def test_tokenize_duck_dask_array(self):
import pint
unit_registry = pint.UnitRegistry()
q = unit_registry.Quantity(self.data, unit_registry.meter)
data_array = xr.DataArray(
data=q, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
token = dask.base.tokenize(data_array)
post_op = data_array + 5 * unit_registry.meter
assert dask.base.tokenize(data_array) != dask.base.tokenize(post_op)
# Immutability check
assert dask.base.tokenize(data_array) == token
class TestToDaskDataFrame:
def test_to_dask_dataframe(self):
# Test conversion of Datasets to dask DataFrames
x = np.random.randn(10)
y = np.arange(10, dtype="uint8")
t = list("abcdefghij")
ds = Dataset(
{"a": ("t", da.from_array(x, chunks=4)), "b": ("t", y), "t": ("t", t)}
)
expected_pd = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t"))
# test if 1-D index is correctly set up
expected = dd.from_pandas(expected_pd, chunksize=4)
actual = ds.to_dask_dataframe(set_index=True)
# test if we have dask dataframes
assert isinstance(actual, dd.DataFrame)
# use the .equals from pandas to check dataframes are equivalent
assert_frame_equal(expected.compute(), actual.compute())
# test if no index is given
expected = dd.from_pandas(expected_pd.reset_index(drop=False), chunksize=4)
actual = ds.to_dask_dataframe(set_index=False)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected.compute(), actual.compute())
def test_to_dask_dataframe_2D(self):
# Test if 2-D dataset is supplied
w = np.random.randn(2, 3)
ds = Dataset({"w": (("x", "y"), da.from_array(w, chunks=(1, 2)))})
ds["x"] = ("x", np.array([0, 1], np.int64))
ds["y"] = ("y", list("abc"))
# dask dataframes do not (yet) support multiindex,
# but when it does, this would be the expected index:
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"]
)
expected = pd.DataFrame({"w": w.reshape(-1)}, index=exp_index)
# so for now, reset the index
expected = expected.reset_index(drop=False)
actual = ds.to_dask_dataframe(set_index=False)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
@pytest.mark.xfail(raises=NotImplementedError)
def test_to_dask_dataframe_2D_set_index(self):
# This will fail until dask implements MultiIndex support
w = da.from_array(np.random.randn(2, 3), chunks=(1, 2))
ds = Dataset({"w": (("x", "y"), w)})
ds["x"] = ("x", np.array([0, 1], np.int64))
ds["y"] = ("y", list("abc"))
expected = ds.compute().to_dataframe()
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_coordinates(self):
# Test if coordinate is also a dask array
x = np.random.randn(10)
t = np.arange(10) * 2
ds = Dataset(
{
"a": ("t", da.from_array(x, chunks=4)),
"t": ("t", da.from_array(t, chunks=4)),
}
)
expected_pd = pd.DataFrame({"a": x}, index=pd.Index(t, name="t"))
expected = dd.from_pandas(expected_pd, chunksize=4)
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected.compute(), actual.compute())
def test_to_dask_dataframe_not_daskarray(self):
# Test if DataArray is not a dask array
x = np.random.randn(10)
y = np.arange(10, dtype="uint8")
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
expected = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t"))
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_no_coordinate(self):
x = da.from_array(np.random.randn(10), chunks=4)
ds = Dataset({"x": ("dim_0", x)})
expected = ds.compute().to_dataframe().reset_index()
actual = ds.to_dask_dataframe()
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds.compute().to_dataframe()
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_dim_order(self):
values = np.array([[1, 2], [3, 4]], dtype=np.int64)
ds = Dataset({"w": (("x", "y"), values)}).chunk(1)
expected = ds["w"].to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=["x", "y"])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds["w"].T.to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=["y", "x"])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
with pytest.raises(ValueError, match=r"does not match the set of dimensions"):
ds.to_dask_dataframe(dim_order=["x"])
@pytest.mark.parametrize("method", ["load", "compute"])
def test_dask_kwargs_variable(method):
x = Variable("y", da.from_array(np.arange(3), chunks=(2,)))
# args should be passed on to da.Array.compute()
with mock.patch.object(
da.Array, "compute", return_value=np.arange(3)
) as mock_compute:
getattr(x, method)(foo="bar")
mock_compute.assert_called_with(foo="bar")
@pytest.mark.parametrize("method", ["load", "compute", "persist"])
def test_dask_kwargs_dataarray(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = DataArray(data)
if method in ["load", "compute"]:
dask_func = "dask.array.compute"
else:
dask_func = "dask.persist"
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo="bar")
mock_func.assert_called_with(data, foo="bar")
@pytest.mark.parametrize("method", ["load", "compute", "persist"])
def test_dask_kwargs_dataset(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = Dataset({"x": (("y"), data)})
if method in ["load", "compute"]:
dask_func = "dask.array.compute"
else:
dask_func = "dask.persist"
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo="bar")
mock_func.assert_called_with(data, foo="bar")
kernel_call_count = 0
def kernel(name):
"""Dask kernel to test pickling/unpickling and __repr__.
Must be global to make it pickleable.
"""
global kernel_call_count
kernel_call_count += 1
return np.ones(1, dtype=np.int64)
def build_dask_array(name):
global kernel_call_count
kernel_call_count = 0
return dask.array.Array(
dask={(name, 0): (kernel, name)}, name=name, chunks=((1,),), dtype=np.int64
)
@pytest.mark.parametrize(
"persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]
)
def test_persist_Dataset(persist):
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
ds = ds + 1
n = len(ds.foo.data.dask)
ds2 = persist(ds)
assert len(ds2.foo.data.dask) == 1
assert len(ds.foo.data.dask) == n # doesn't mutate in place
@pytest.mark.parametrize(
"persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]
)
def test_persist_DataArray(persist):
x = da.arange(10, chunks=(5,))
y = DataArray(x)
z = y + 1
n = len(z.data.dask)
zz = persist(z)
assert len(z.data.dask) == n
assert len(zz.data.dask) == zz.data.npartitions
def test_dataarray_with_dask_coords():
import toolz
x = xr.Variable("x", da.arange(8, chunks=(4,)))
y = xr.Variable("y", da.arange(8, chunks=(4,)) * 2)
data = da.random.random((8, 8), chunks=(4, 4)) + 1
array = xr.DataArray(data, dims=["x", "y"])
array.coords["xx"] = x
array.coords["yy"] = y
assert dict(array.__dask_graph__()) == toolz.merge(
data.__dask_graph__(), x.__dask_graph__(), y.__dask_graph__()
)
(array2,) = dask.compute(array)
assert not dask.is_dask_collection(array2)
assert all(isinstance(v._variable.data, np.ndarray) for v in array2.coords.values())
def test_basic_compute():
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk({"x": 2})
for get in [dask.threaded.get, dask.multiprocessing.get, dask.local.get_sync, None]:
with dask.config.set(scheduler=get):
ds.compute()
ds.foo.compute()
ds.foo.variable.compute()
def test_dask_layers_and_dependencies():
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
x = dask.delayed(ds)
assert set(x.__dask_graph__().dependencies).issuperset(
ds.__dask_graph__().dependencies
)
assert set(x.foo.__dask_graph__().dependencies).issuperset(
ds.__dask_graph__().dependencies
)
def make_da():
da = xr.DataArray(
np.ones((10, 20)),
dims=["x", "y"],
coords={"x": np.arange(10), "y": np.arange(100, 120)},
name="a",
).chunk({"x": 4, "y": 5})
da.x.attrs["long_name"] = "x"
da.attrs["test"] = "test"
da.coords["c2"] = 0.5
da.coords["ndcoord"] = da.x * 2
da.coords["cxy"] = (da.x * da.y).chunk({"x": 4, "y": 5})
return da
def make_ds():
map_ds = xr.Dataset()
map_ds["a"] = make_da()
map_ds["b"] = map_ds.a + 50
map_ds["c"] = map_ds.x + 20
map_ds = map_ds.chunk({"x": 4, "y": 5})
map_ds["d"] = ("z", [1, 1, 1, 1])
map_ds["z"] = [0, 1, 2, 3]
map_ds["e"] = map_ds.x + map_ds.y
map_ds.coords["c1"] = 0.5
map_ds.coords["cx"] = ("x", np.arange(len(map_ds.x)))
map_ds.coords["cx"].attrs["test2"] = "test2"
map_ds.attrs["test"] = "test"
map_ds.coords["xx"] = map_ds["a"] * map_ds.y
map_ds.x.attrs["long_name"] = "x"
map_ds.y.attrs["long_name"] = "y"
return map_ds
# fixtures cannot be used in parametrize statements
# instead use this workaround
# https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly
@pytest.fixture
def map_da():
return make_da()
@pytest.fixture
def map_ds():
return make_ds()
def test_unify_chunks(map_ds):
ds_copy = map_ds.copy()
ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10})
with pytest.raises(ValueError, match=r"inconsistent chunks"):
ds_copy.chunks
expected_chunks = {"x": (4, 4, 2), "y": (5, 5, 5, 5)}
with raise_if_dask_computes():
actual_chunks = ds_copy.unify_chunks().chunks
assert actual_chunks == expected_chunks
assert_identical(map_ds, ds_copy.unify_chunks())
out_a, out_b = xr.unify_chunks(ds_copy.cxy, ds_copy.drop_vars("cxy"))
assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))
assert out_b.chunks == expected_chunks
# Test unordered dims
da = ds_copy["cxy"]
out_a, out_b = xr.unify_chunks(da.chunk({"x": -1}), da.T.chunk({"y": -1}))
assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))
assert out_b.chunks == ((5, 5, 5, 5), (4, 4, 2))
# Test mismatch
with pytest.raises(ValueError, match=r"Dimension 'x' size mismatch: 10 != 2"):
xr.unify_chunks(da, da.isel(x=slice(2)))
@pytest.mark.parametrize("obj", [make_ds(), make_da()])
@pytest.mark.parametrize(
"transform", [lambda x: x.compute(), lambda x: x.unify_chunks()]
)
def test_unify_chunks_shallow_copy(obj, transform):
obj = transform(obj)
unified = obj.unify_chunks()
assert_identical(obj, unified) and obj is not obj.unify_chunks()
@pytest.mark.parametrize("obj", [make_da()])
def test_auto_chunk_da(obj):
actual = obj.chunk("auto").data
expected = obj.data.rechunk("auto")
np.testing.assert_array_equal(actual, expected)
assert actual.chunks == expected.chunks
def test_map_blocks_error(map_da, map_ds):
def bad_func(darray):
return (darray * darray.x + 5 * darray.y)[:1, :1]
with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
xr.map_blocks(bad_func, map_da).compute()
def returns_numpy(darray):
return (darray * darray.x + 5 * darray.y).values
with pytest.raises(TypeError, match=r"Function must return an xarray DataArray"):
xr.map_blocks(returns_numpy, map_da)
with pytest.raises(TypeError, match=r"args must be"):
xr.map_blocks(operator.add, map_da, args=10)
with pytest.raises(TypeError, match=r"kwargs must be"):
xr.map_blocks(operator.add, map_da, args=[10], kwargs=[20])
def really_bad_func(darray):
raise ValueError("couldn't do anything.")
with pytest.raises(Exception, match=r"Cannot infer"):
xr.map_blocks(really_bad_func, map_da)
ds_copy = map_ds.copy()
ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10})
with pytest.raises(ValueError, match=r"inconsistent chunks"):
xr.map_blocks(bad_func, ds_copy)
with pytest.raises(TypeError, match=r"Cannot pass dask collections"):
xr.map_blocks(bad_func, map_da, kwargs=dict(a=map_da.chunk()))
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks(obj):
def func(obj):
result = obj + obj.x + 5 * obj.y
return result
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj)
expected = func(obj)
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_convert_args_to_list(obj):
expected = obj + 10
with raise_if_dask_computes():
actual = xr.map_blocks(operator.add, obj, [10])
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
def test_map_blocks_dask_args():
da1 = xr.DataArray(
np.ones((10, 20)),
dims=["x", "y"],
coords={"x": np.arange(10), "y": np.arange(20)},
).chunk({"x": 5, "y": 4})
# check that block shapes are the same
def sumda(da1, da2):
assert da1.shape == da2.shape
return da1 + da2
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(sumda, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
# one dimension in common
da2 = (da1 + 1).isel(x=1, drop=True)
with raise_if_dask_computes():
mapped = xr.map_blocks(operator.add, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
# test that everything works when dimension names are different
da2 = (da1 + 1).isel(x=1, drop=True).rename({"y": "k"})
with raise_if_dask_computes():
mapped = xr.map_blocks(operator.add, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
with pytest.raises(ValueError, match=r"Chunk sizes along dimension 'x'"):
xr.map_blocks(operator.add, da1, args=[da1.chunk({"x": 1})])
with pytest.raises(ValueError, match=r"indexes along dimension 'x' are not equal"):
xr.map_blocks(operator.add, da1, args=[da1.reindex(x=np.arange(20))])
# reduction
da1 = da1.chunk({"x": -1})
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(lambda a, b: (a + b).sum("x"), da1, args=[da2])
xr.testing.assert_equal((da1 + da2).sum("x"), mapped)
# reduction with template
da1 = da1.chunk({"x": -1})
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(
lambda a, b: (a + b).sum("x"), da1, args=[da2], template=da1.sum("x")
)
xr.testing.assert_equal((da1 + da2).sum("x"), mapped)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_add_attrs(obj):
def add_attrs(obj):
obj = obj.copy(deep=True)
obj.attrs["new"] = "new"
obj.cxy.attrs["new2"] = "new2"
return obj
expected = add_attrs(obj)
with raise_if_dask_computes():
actual = xr.map_blocks(add_attrs, obj)
assert_identical(actual, expected)
# when template is specified, attrs are copied from template, not set by function
with raise_if_dask_computes():
actual = xr.map_blocks(add_attrs, obj, template=obj)
assert_identical(actual, obj)
def test_map_blocks_change_name(map_da):
def change_name(obj):
obj = obj.copy(deep=True)
obj.name = "new"
return obj
expected = change_name(map_da)
with raise_if_dask_computes():
actual = xr.map_blocks(change_name, map_da)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_kwargs(obj):
expected = xr.full_like(obj, fill_value=np.nan)
with raise_if_dask_computes():
actual = xr.map_blocks(xr.full_like, obj, kwargs=dict(fill_value=np.nan))
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
def test_map_blocks_to_array(map_ds):
with raise_if_dask_computes():
actual = xr.map_blocks(lambda x: x.to_array(), map_ds)
# to_array does not preserve name, so cannot use assert_identical
assert_equal(actual, map_ds.to_array())
@pytest.mark.parametrize(
"func",
[
lambda x: x,
lambda x: x.to_dataset(),
lambda x: x.drop_vars("x"),
lambda x: x.expand_dims(k=[1, 2, 3]),
lambda x: x.expand_dims(k=3),
lambda x: x.assign_coords(new_coord=("y", x.y.data * 2)),
lambda x: x.astype(np.int32),
lambda x: x.x,
],
)
def test_map_blocks_da_transformations(func, map_da):
with raise_if_dask_computes():
actual = xr.map_blocks(func, map_da)
assert_identical(actual, func(map_da))
@pytest.mark.parametrize(
"func",
[
lambda x: x,
lambda x: x.drop_vars("cxy"),
lambda x: x.drop_vars("a"),
lambda x: x.drop_vars("x"),
lambda x: x.expand_dims(k=[1, 2, 3]),
lambda x: x.expand_dims(k=3),
lambda x: x.rename({"a": "new1", "b": "new2"}),
lambda x: x.x,
],
)
def test_map_blocks_ds_transformations(func, map_ds):
with raise_if_dask_computes():
actual = xr.map_blocks(func, map_ds)
assert_identical(actual, func(map_ds))
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_da_ds_with_template(obj):
func = lambda x: x.isel(x=[1])
template = obj.isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj, template=template)
assert_identical(actual, template)
with raise_if_dask_computes():
actual = obj.map_blocks(func, template=template)
assert_identical(actual, template)
def test_map_blocks_template_convert_object():
da = make_da()
func = lambda x: x.to_dataset().isel(x=[1])
template = da.to_dataset().isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, da, template=template)
assert_identical(actual, template)
ds = da.to_dataset()
func = lambda x: x.to_array().isel(x=[1])
template = ds.to_array().isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, ds, template=template)
assert_identical(actual, template)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_errors_bad_template(obj):
with pytest.raises(ValueError, match=r"unexpected coordinate variables"):
xr.map_blocks(lambda x: x.assign_coords(a=10), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"does not contain coordinate variables"):
xr.map_blocks(lambda x: x.drop_vars("cxy"), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"Dimensions {'x'} missing"):
xr.map_blocks(lambda x: x.isel(x=1), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=obj).compute()
with pytest.raises(TypeError, match=r"must be a DataArray"):
xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=(obj,)).compute()
with pytest.raises(ValueError, match=r"map_blocks requires that one block"):
xr.map_blocks(
lambda x: x.isel(x=[1]).assign_coords(x=10), obj, template=obj.isel(x=[1])
).compute()
with pytest.raises(ValueError, match=r"Expected index 'x' to be"):
xr.map_blocks(
lambda a: a.isel(x=[1]).assign_coords(x=[120]), # assign bad index values
obj,
template=obj.isel(x=[1, 5, 9]),
).compute()
def test_map_blocks_errors_bad_template_2(map_ds):
with pytest.raises(ValueError, match=r"unexpected data variables {'xyz'}"):
xr.map_blocks(lambda x: x.assign(xyz=1), map_ds, template=map_ds).compute()
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_object_method(obj):
def func(obj):
result = obj + obj.x + 5 * obj.y
return result
with raise_if_dask_computes():
expected = xr.map_blocks(func, obj)
actual = obj.map_blocks(func)
assert_identical(expected, actual)
def test_map_blocks_hlg_layers():
# regression test for #3599
ds = xr.Dataset(
{
"x": (("a",), dask.array.ones(10, chunks=(5,))),
"z": (("b",), dask.array.ones(10, chunks=(5,))),
}
)
mapped = ds.map_blocks(lambda x: x)
xr.testing.assert_equal(mapped, ds)
def test_make_meta(map_ds):
from ..core.parallel import make_meta
meta = make_meta(map_ds)
for variable in map_ds._coord_names:
assert variable in meta._coord_names
assert meta.coords[variable].shape == (0,) * meta.coords[variable].ndim
for variable in map_ds.data_vars:
assert variable in meta.data_vars
assert meta.data_vars[variable].shape == (0,) * meta.data_vars[variable].ndim
def test_identical_coords_no_computes():
lons2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
a = xr.DataArray(
da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2}
)
b = xr.DataArray(
da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2}
)
with raise_if_dask_computes():
c = a + b
assert_identical(c, a)
@pytest.mark.parametrize(
"obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]
)
@pytest.mark.parametrize(
"transform",
[
lambda x: x.reset_coords(),
lambda x: x.reset_coords(drop=True),
lambda x: x.isel(x=1),
lambda x: x.attrs.update(new_attrs=1),
lambda x: x.assign_coords(cxy=1),
lambda x: x.rename({"x": "xnew"}),
lambda x: x.rename({"cxy": "cxynew"}),
],
)
def test_token_changes_on_transform(obj, transform):
with raise_if_dask_computes():
assert dask.base.tokenize(obj) != dask.base.tokenize(transform(obj))
@pytest.mark.parametrize(
"obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]
)
def test_token_changes_when_data_changes(obj):
with raise_if_dask_computes():
t1 = dask.base.tokenize(obj)
# Change data_var
if isinstance(obj, DataArray):
obj *= 2
else:
obj["a"] *= 2
with raise_if_dask_computes():
t2 = dask.base.tokenize(obj)
assert t2 != t1
# Change non-index coord
obj.coords["ndcoord"] *= 2
with raise_if_dask_computes():
t3 = dask.base.tokenize(obj)
assert t3 != t2
# Change IndexVariable
obj = obj.assign_coords(x=obj.x * 2)
with raise_if_dask_computes():
t4 = dask.base.tokenize(obj)
assert t4 != t3
@pytest.mark.parametrize("obj", [make_da().compute(), make_ds().compute()])
def test_token_changes_when_buffer_changes(obj):
with raise_if_dask_computes():
t1 = dask.base.tokenize(obj)
if isinstance(obj, DataArray):
obj[0, 0] = 123
else:
obj["a"][0, 0] = 123
with raise_if_dask_computes():
t2 = dask.base.tokenize(obj)
assert t2 != t1
obj.coords["ndcoord"][0] = 123
with raise_if_dask_computes():
t3 = dask.base.tokenize(obj)
assert t3 != t2
@pytest.mark.parametrize(
"transform",
[lambda x: x, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
)
@pytest.mark.parametrize("obj", [make_da(), make_ds(), make_ds().variables["a"]])
def test_token_identical(obj, transform):
with raise_if_dask_computes():
assert dask.base.tokenize(obj) == dask.base.tokenize(transform(obj))
assert dask.base.tokenize(obj.compute()) == dask.base.tokenize(
transform(obj.compute())
)
def test_recursive_token():
"""Test that tokenization is invoked recursively, and doesn't just rely on the
output of str()
"""
a = np.ones(10000)
b = np.ones(10000)
b[5000] = 2
assert str(a) == str(b)
assert dask.base.tokenize(a) != dask.base.tokenize(b)
# Test DataArray and Variable
da_a = DataArray(a)
da_b = DataArray(b)
assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b)
# Test Dataset
ds_a = da_a.to_dataset(name="x")
ds_b = da_b.to_dataset(name="x")
assert dask.base.tokenize(ds_a) != dask.base.tokenize(ds_b)
# Test IndexVariable
da_a = DataArray(a, dims=["x"], coords={"x": a})
da_b = DataArray(a, dims=["x"], coords={"x": b})
assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b)
@requires_scipy_or_netCDF4
def test_normalize_token_with_backend(map_ds):
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file:
map_ds.to_netcdf(tmp_file)
read = xr.open_dataset(tmp_file)
assert not dask.base.tokenize(map_ds) == dask.base.tokenize(read)
read.close()
@pytest.mark.parametrize(
"compat", ["broadcast_equals", "equals", "identical", "no_conflicts"]
)
def test_lazy_array_equiv_variables(compat):
var1 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2))
var2 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2))
var3 = xr.Variable(("y", "x"), da.zeros((20, 10), chunks=2))
with raise_if_dask_computes():
assert getattr(var1, compat)(var2, equiv=lazy_array_equiv)
# values are actually equal, but we don't know that till we compute, return None
with raise_if_dask_computes():
assert getattr(var1, compat)(var2 / 2, equiv=lazy_array_equiv) is None
# shapes are not equal, return False without computes
with raise_if_dask_computes():
assert getattr(var1, compat)(var3, equiv=lazy_array_equiv) is False
# if one or both arrays are numpy, return None
assert getattr(var1, compat)(var2.compute(), equiv=lazy_array_equiv) is None
assert (
getattr(var1.compute(), compat)(var2.compute(), equiv=lazy_array_equiv) is None
)
with raise_if_dask_computes():
assert getattr(var1, compat)(var2.transpose("y", "x"))
@pytest.mark.parametrize(
"compat", ["broadcast_equals", "equals", "identical", "no_conflicts"]
)
def test_lazy_array_equiv_merge(compat):
da1 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
da2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
da3 = xr.DataArray(da.ones((20, 10), chunks=2), dims=("y", "x"))
with raise_if_dask_computes():
xr.merge([da1, da2], compat=compat)
# shapes are not equal; no computes necessary
with raise_if_dask_computes(max_computes=0):
with pytest.raises(ValueError):
xr.merge([da1, da3], compat=compat)
with raise_if_dask_computes(max_computes=2):
xr.merge([da1, da2 / 2], compat=compat)
@pytest.mark.filterwarnings("ignore::FutureWarning") # transpose_coords
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
@pytest.mark.parametrize(
"transform",
[
lambda a: a.assign_attrs(new_attr="anew"),
lambda a: a.assign_coords(cxy=a.cxy),
lambda a: a.copy(),
lambda a: a.isel(x=np.arange(a.sizes["x"])),
lambda a: a.isel(x=slice(None)),
lambda a: a.loc[dict(x=slice(None))],
lambda a: a.loc[dict(x=np.arange(a.sizes["x"]))],
lambda a: a.loc[dict(x=a.x)],
lambda a: a.sel(x=a.x),
lambda a: a.sel(x=a.x.values),
lambda a: a.transpose(...),
lambda a: a.squeeze(), # no dimensions to squeeze
lambda a: a.sortby("x"), # "x" is already sorted
lambda a: a.reindex(x=a.x),
lambda a: a.reindex_like(a),
lambda a: a.rename({"cxy": "cnew"}).rename({"cnew": "cxy"}),
lambda a: a.pipe(lambda x: x),
lambda a: xr.align(a, xr.zeros_like(a))[0],
# assign
# swap_dims
# set_index / reset_index
],
)
def test_transforms_pass_lazy_array_equiv(obj, transform):
with raise_if_dask_computes():
assert_equal(obj, transform(obj))
def test_more_transforms_pass_lazy_array_equiv(map_da, map_ds):
with raise_if_dask_computes():
assert_equal(map_ds.cxy.broadcast_like(map_ds.cxy), map_ds.cxy)
assert_equal(xr.broadcast(map_ds.cxy, map_ds.cxy)[0], map_ds.cxy)
assert_equal(map_ds.map(lambda x: x), map_ds)
assert_equal(map_ds.set_coords("a").reset_coords("a"), map_ds)
assert_equal(map_ds.update({"a": map_ds.a}), map_ds)
# fails because of index error
# assert_equal(
# map_ds.rename_dims({"x": "xnew"}).rename_dims({"xnew": "x"}), map_ds
# )
assert_equal(
map_ds.rename_vars({"cxy": "cnew"}).rename_vars({"cnew": "cxy"}), map_ds
)
assert_equal(map_da._from_temp_dataset(map_da._to_temp_dataset()), map_da)
assert_equal(map_da.astype(map_da.dtype), map_da)
assert_equal(map_da.transpose("y", "x", transpose_coords=False).cxy, map_da.cxy)
def test_optimize():
# https://github.com/pydata/xarray/issues/3698
a = dask.array.ones((10, 4), chunks=(5, 2))
arr = xr.DataArray(a).chunk(5)
(arr2,) = dask.optimize(arr)
arr2.compute()
# The graph_manipulation module is in dask since 2021.2 but it became usable with
# xarray only since 2021.3
@pytest.mark.skipif(dask_version <= "2021.02.0", reason="new module")
def test_graph_manipulation():
"""dask.graph_manipulation passes an optional parameter, "rename", to the rebuilder
function returned by __dask_postperist__; also, the dsk passed to the rebuilder is
a HighLevelGraph whereas with dask.persist() and dask.optimize() it's a plain dict.
"""
import dask.graph_manipulation as gm
v = Variable(["x"], [1, 2]).chunk(-1).chunk(1) * 2
da = DataArray(v)
ds = Dataset({"d1": v[0], "d2": v[1], "d3": ("x", [3, 4])})
v2, da2, ds2 = gm.clone(v, da, ds)
assert_equal(v2, v)
assert_equal(da2, da)
assert_equal(ds2, ds)
for a, b in ((v, v2), (da, da2), (ds, ds2)):
assert a.__dask_layers__() != b.__dask_layers__()
assert len(a.__dask_layers__()) == len(b.__dask_layers__())
assert a.__dask_graph__().keys() != b.__dask_graph__().keys()
assert len(a.__dask_graph__()) == len(b.__dask_graph__())
assert a.__dask_graph__().layers.keys() != b.__dask_graph__().layers.keys()
assert len(a.__dask_graph__().layers) == len(b.__dask_graph__().layers)
# Above we performed a slice operation; adding the two slices back together creates
# a diamond-shaped dependency graph, which in turn will trigger a collision in layer
# names if we were to use HighLevelGraph.cull() instead of
# HighLevelGraph.cull_layers() in Dataset.__dask_postpersist__().
assert_equal(ds2.d1 + ds2.d2, ds.d1 + ds.d2)
| [
"xarray.Variable.concat",
"pytest.mark.filterwarnings",
"xarray.Variable",
"pickle.dumps",
"xarray.concat",
"numpy.array",
"pandas.Index",
"pytest.fixture",
"xarray.map_blocks",
"numpy.sin",
"pint.UnitRegistry",
"numpy.arange",
"numpy.random.RandomState",
"textwrap.dedent",
"xarray.merge... | [((726, 753), 'pytest.importorskip', 'pytest.importorskip', (['"""dask"""'], {}), "('dask')\n", (745, 753), False, 'import pytest\n'), ((759, 792), 'pytest.importorskip', 'pytest.importorskip', (['"""dask.array"""'], {}), "('dask.array')\n", (778, 792), False, 'import pytest\n'), ((798, 835), 'pytest.importorskip', 'pytest.importorskip', (['"""dask.dataframe"""'], {}), "('dask.dataframe')\n", (817, 835), False, 'import pytest\n'), ((31723, 31777), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['load', 'compute']"], {}), "('method', ['load', 'compute'])\n", (31746, 31777), False, 'import pytest\n'), ((32128, 32193), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['load', 'compute', 'persist']"], {}), "('method', ['load', 'compute', 'persist'])\n", (32151, 32193), False, 'import pytest\n'), ((32616, 32681), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['load', 'compute', 'persist']"], {}), "('method', ['load', 'compute', 'persist'])\n", (32639, 32681), False, 'import pytest\n'), ((52322, 52420), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""compat"""', "['broadcast_equals', 'equals', 'identical', 'no_conflicts']"], {}), "('compat', ['broadcast_equals', 'equals',\n 'identical', 'no_conflicts'])\n", (52345, 52420), False, 'import pytest\n'), ((53477, 53575), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""compat"""', "['broadcast_equals', 'equals', 'identical', 'no_conflicts']"], {}), "('compat', ['broadcast_equals', 'equals',\n 'identical', 'no_conflicts'])\n", (53500, 53575), False, 'import pytest\n'), ((54195, 54246), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::FutureWarning"""'], {}), "('ignore::FutureWarning')\n", (54221, 54246), False, 'import pytest\n'), ((56668, 56736), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(dask_version <= '2021.02.0')"], {'reason': '"""new module"""'}), "(dask_version <= '2021.02.0', reason='new module')\n", (56686, 56736), False, 'import pytest\n'), ((2281, 2309), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (2295, 2309), False, 'import pytest\n'), ((3652, 3740), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(dask_version < '2021.04.1')"], {'reason': '"""Requires dask >= 2021.04.1"""'}), "(dask_version < '2021.04.1', reason=\n 'Requires dask >= 2021.04.1')\n", (3670, 3740), False, 'import pytest\n'), ((4485, 4573), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(dask_version >= '2021.04.1')"], {'reason': '"""Requires dask < 2021.04.1"""'}), "(dask_version >= '2021.04.1', reason=\n 'Requires dask < 2021.04.1')\n", (4503, 4573), False, 'import pytest\n'), ((9031, 9094), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::PendingDeprecationWarning"""'], {}), "('ignore::PendingDeprecationWarning')\n", (9057, 9094), False, 'import pytest\n'), ((9248, 9311), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::PendingDeprecationWarning"""'], {}), "('ignore::PendingDeprecationWarning')\n", (9274, 9311), False, 'import pytest\n'), ((11172, 11200), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (11186, 11200), False, 'import pytest\n'), ((20168, 20231), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::PendingDeprecationWarning"""'], {}), "('ignore::PendingDeprecationWarning')\n", (20194, 20231), False, 'import pytest\n'), ((28700, 28745), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'NotImplementedError'}), '(raises=NotImplementedError)\n', (28717, 28745), False, 'import pytest\n'), ((32294, 32309), 'xarray.DataArray', 'DataArray', (['data'], {}), '(data)\n', (32303, 32309), False, 'from xarray import DataArray, Dataset, Variable\n'), ((32780, 32807), 'xarray.Dataset', 'Dataset', (["{'x': ('y', data)}"], {}), "({'x': ('y', data)})\n", (32787, 32807), False, 'from xarray import DataArray, Dataset, Variable\n'), ((33335, 33361), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'np.int64'}), '(1, dtype=np.int64)\n', (33342, 33361), True, 'import numpy as np\n'), ((34128, 34140), 'xarray.DataArray', 'DataArray', (['x'], {}), '(x)\n', (34137, 34140), False, 'from xarray import DataArray, Dataset, Variable\n'), ((34521, 34556), 'xarray.DataArray', 'xr.DataArray', (['data'], {'dims': "['x', 'y']"}), "(data, dims=['x', 'y'])\n", (34533, 34556), True, 'import xarray as xr\n'), ((36052, 36064), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (36062, 36064), True, 'import xarray as xr\n'), ((38388, 38435), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (38417, 38435), True, 'import numpy as np\n'), ((40836, 40878), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['(da1 + da2)', 'mapped'], {}), '(da1 + da2, mapped)\n', (40859, 40878), True, 'import xarray as xr\n'), ((41052, 41094), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['(da1 + da2)', 'mapped'], {}), '(da1 + da2, mapped)\n', (41075, 41094), True, 'import xarray as xr\n'), ((41325, 41367), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['(da1 + da2)', 'mapped'], {}), '(da1 + da2, mapped)\n', (41348, 41367), True, 'import xarray as xr\n'), ((43247, 43283), 'xarray.full_like', 'xr.full_like', (['obj'], {'fill_value': 'np.nan'}), '(obj, fill_value=np.nan)\n', (43259, 43283), True, 'import xarray as xr\n'), ((47987, 48022), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['mapped', 'ds'], {}), '(mapped, ds)\n', (48010, 48022), True, 'import xarray as xr\n'), ((51359, 51373), 'numpy.ones', 'np.ones', (['(10000)'], {}), '(10000)\n', (51366, 51373), True, 'import numpy as np\n'), ((51382, 51396), 'numpy.ones', 'np.ones', (['(10000)'], {}), '(10000)\n', (51389, 51396), True, 'import numpy as np\n'), ((51545, 51557), 'xarray.DataArray', 'DataArray', (['a'], {}), '(a)\n', (51554, 51557), False, 'from xarray import DataArray, Dataset, Variable\n'), ((51569, 51581), 'xarray.DataArray', 'DataArray', (['b'], {}), '(b)\n', (51578, 51581), False, 'from xarray import DataArray, Dataset, Variable\n'), ((51841, 51882), 'xarray.DataArray', 'DataArray', (['a'], {'dims': "['x']", 'coords': "{'x': a}"}), "(a, dims=['x'], coords={'x': a})\n", (51850, 51882), False, 'from xarray import DataArray, Dataset, Variable\n'), ((51894, 51935), 'xarray.DataArray', 'DataArray', (['a'], {'dims': "['x']", 'coords': "{'x': b}"}), "(a, dims=['x'], coords={'x': b})\n", (51903, 51935), False, 'from xarray import DataArray, Dataset, Variable\n'), ((57145, 57157), 'xarray.DataArray', 'DataArray', (['v'], {}), '(v)\n', (57154, 57157), False, 'from xarray import DataArray, Dataset, Variable\n'), ((57167, 57221), 'xarray.Dataset', 'Dataset', (["{'d1': v[0], 'd2': v[1], 'd3': ('x', [3, 4])}"], {}), "({'d1': v[0], 'd2': v[1], 'd3': ('x', [3, 4])})\n", (57174, 57221), False, 'from xarray import DataArray, Dataset, Variable\n'), ((57242, 57261), 'dask.graph_manipulation.clone', 'gm.clone', (['v', 'da', 'ds'], {}), '(v, da, ds)\n', (57250, 57261), True, 'import dask.graph_manipulation as gm\n'), ((998, 1052), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Too many computes"""'}), "(RuntimeError, match='Too many computes')\n", (1011, 1052), False, 'import pytest\n'), ((2478, 2511), 'xarray.Variable', 'Variable', (["('x', 'y')", 'self.values'], {}), "(('x', 'y'), self.values)\n", (2486, 2511), False, 'from xarray import DataArray, Dataset, Variable\n'), ((2536, 2567), 'xarray.Variable', 'Variable', (["('x', 'y')", 'self.data'], {}), "(('x', 'y'), self.data)\n", (2544, 2567), False, 'from xarray import DataArray, Dataset, Variable\n'), ((4384, 4412), 'xarray.Variable', 'Variable', (['"""x"""', 'expected_data'], {}), "('x', expected_data)\n", (4392, 4412), False, 'from xarray import DataArray, Dataset, Variable\n'), ((7810, 7837), 'numpy.array', 'np.array', (['[0, 1, np.nan, 3]'], {}), '([0, 1, np.nan, 3])\n', (7818, 7837), True, 'import numpy as np\n'), ((7909, 7930), 'xarray.Variable', 'Variable', (['"""x"""', 'values'], {}), "('x', values)\n", (7917, 7930), False, 'from xarray import DataArray, Dataset, Variable\n'), ((7950, 7969), 'xarray.Variable', 'Variable', (['"""x"""', 'data'], {}), "('x', data)\n", (7958, 7969), False, 'from xarray import DataArray, Dataset, Variable\n'), ((10391, 10410), 'pint.UnitRegistry', 'pint.UnitRegistry', ([], {}), '()\n', (10408, 10410), False, 'import pint\n'), ((10486, 10512), 'xarray.Variable', 'xr.Variable', (["('x', 'y')", 'q'], {}), "(('x', 'y'), q)\n", (10497, 10512), True, 'import xarray as xr\n'), ((11244, 11265), 'numpy.random.randn', 'np.random.randn', (['(4)', '(6)'], {}), '(4, 6)\n', (11259, 11265), True, 'import numpy as np\n'), ((11980, 12021), 'xarray.Dataset', 'Dataset', (["{'foo': (('x', 'y'), self.data)}"], {}), "({'foo': (('x', 'y'), self.data)})\n", (11987, 12021), False, 'from xarray import DataArray, Dataset, Variable\n'), ((12426, 12456), 'xarray.concat', 'xr.concat', (['[v[:2], v[2:]]', '"""x"""'], {}), "([v[:2], v[2:]], 'x')\n", (12435, 12456), True, 'import xarray as xr\n'), ((13736, 13796), 'xarray.Dataset', 'Dataset', ([], {'data_vars': "{'d': ('x', d1)}", 'coords': "{'c': ('x', c1)}"}), "(data_vars={'d': ('x', d1)}, coords={'c': ('x', c1)})\n", (13743, 13796), False, 'from xarray import DataArray, Dataset, Variable\n'), ((13811, 13871), 'xarray.Dataset', 'Dataset', ([], {'data_vars': "{'d': ('x', d2)}", 'coords': "{'c': ('x', c2)}"}), "(data_vars={'d': ('x', d2)}, coords={'c': ('x', c2)})\n", (13818, 13871), False, 'from xarray import DataArray, Dataset, Variable\n'), ((13886, 13946), 'xarray.Dataset', 'Dataset', ([], {'data_vars': "{'d': ('x', d3)}", 'coords': "{'c': ('x', c3)}"}), "(data_vars={'d': ('x', d3)}, coords={'c': ('x', c3)})\n", (13893, 13946), False, 'from xarray import DataArray, Dataset, Variable\n'), ((14000, 14078), 'xarray.concat', 'xr.concat', (['[ds1, ds2, ds3]'], {'dim': '"""n"""', 'data_vars': '"""different"""', 'coords': '"""different"""'}), "([ds1, ds2, ds3], dim='n', data_vars='different', coords='different')\n", (14009, 14078), True, 'import xarray as xr\n'), ((14352, 14418), 'xarray.concat', 'xr.concat', (['[ds1, ds2, ds3]'], {'dim': '"""n"""', 'data_vars': '"""all"""', 'coords': '"""all"""'}), "([ds1, ds2, ds3], dim='n', data_vars='all', coords='all')\n", (14361, 14418), True, 'import xarray as xr\n'), ((14622, 14688), 'xarray.concat', 'xr.concat', (['[ds1, ds2, ds3]'], {'dim': '"""n"""', 'data_vars': "['d']", 'coords': "['c']"}), "([ds1, ds2, ds3], dim='n', data_vars=['d'], coords=['c'])\n", (14631, 14688), True, 'import xarray as xr\n'), ((14892, 14952), 'xarray.concat', 'xr.concat', (['[ds1, ds2, ds3]'], {'dim': '"""n"""', 'data_vars': '[]', 'coords': '[]'}), "([ds1, ds2, ds3], dim='n', data_vars=[], coords=[])\n", (14901, 14952), True, 'import xarray as xr\n'), ((15191, 15294), 'xarray.concat', 'xr.concat', (['[ds1, ds2, ds3]'], {'dim': '"""n"""', 'data_vars': '"""different"""', 'coords': '"""different"""', 'compat': '"""identical"""'}), "([ds1, ds2, ds3], dim='n', data_vars='different', coords=\n 'different', compat='identical')\n", (15200, 15294), True, 'import xarray as xr\n'), ((15734, 15800), 'xarray.Dataset', 'Dataset', ([], {'data_vars': "{'d': ('x', [2.0])}", 'coords': "{'c': ('x', [2.0])}"}), "(data_vars={'d': ('x', [2.0])}, coords={'c': ('x', [2.0])})\n", (15741, 15800), False, 'from xarray import DataArray, Dataset, Variable\n'), ((15815, 15903), 'xarray.concat', 'xr.concat', (['[ds1, ds2, ds4, ds3]'], {'dim': '"""n"""', 'data_vars': '"""different"""', 'coords': '"""different"""'}), "([ds1, ds2, ds4, ds3], dim='n', data_vars='different', coords=\n 'different')\n", (15824, 15903), True, 'import xarray as xr\n'), ((16722, 16800), 'xarray.concat', 'xr.concat', (['[ds1, ds1, ds1]'], {'dim': '"""n"""', 'data_vars': '"""different"""', 'coords': '"""different"""'}), "([ds1, ds1, ds1], dim='n', data_vars='different', coords='different')\n", (16731, 16800), True, 'import xarray as xr\n'), ((17044, 17129), 'xarray.concat', 'xr.concat', (['[ds1, ds1, ds1]'], {'dim': '"""n"""', 'data_vars': '[]', 'coords': '[]', 'compat': '"""identical"""'}), "([ds1, ds1, ds1], dim='n', data_vars=[], coords=[], compat='identical'\n )\n", (17053, 17129), True, 'import xarray as xr\n'), ((20424, 20437), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (20433, 20437), True, 'import numpy as np\n'), ((21191, 21228), 'xarray.DataArray', 'DataArray', (['data'], {'dims': "('w', 'x', 'y')"}), "(data, dims=('w', 'x', 'y'))\n", (21200, 21228), False, 'from xarray import DataArray, Dataset, Variable\n'), ((21870, 21934), 'xarray.DataArray', 'DataArray', (['data'], {'dims': "['x']", 'coords': "{'y': ('x', nonindex_coord)}"}), "(data, dims=['x'], coords={'y': ('x', nonindex_coord)})\n", (21879, 21934), False, 'from xarray import DataArray, Dataset, Variable\n'), ((22492, 22566), 'xarray.Dataset', 'Dataset', ([], {'data_vars': "{'a': ('x', data)}", 'coords': "{'y': ('x', nonindex_coord)}"}), "(data_vars={'a': ('x', data)}, coords={'y': ('x', nonindex_coord)})\n", (22499, 22566), False, 'from xarray import DataArray, Dataset, Variable\n'), ((22586, 22927), 'textwrap.dedent', 'dedent', (['""" <xarray.Dataset>\n Dimensions: (x: 1)\n Coordinates:\n y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>\n Dimensions without coordinates: x\n Data variables:\n a (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>"""'], {}), '(\n """ <xarray.Dataset>\n Dimensions: (x: 1)\n Coordinates:\n y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>\n Dimensions without coordinates: x\n Data variables:\n a (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>"""\n )\n', (22592, 22927), False, 'from textwrap import dedent\n'), ((23331, 23395), 'xarray.DataArray', 'DataArray', (['data'], {'dims': "['x']", 'coords': "{'y': ('x', nonindex_coord)}"}), "(data, dims=['x'], coords={'y': ('x', nonindex_coord)})\n", (23340, 23395), False, 'from xarray import DataArray, Dataset, Variable\n'), ((24085, 24159), 'xarray.Dataset', 'Dataset', ([], {'data_vars': "{'a': ('x', data)}", 'coords': "{'y': ('x', nonindex_coord)}"}), "(data_vars={'a': ('x', data)}, coords={'y': ('x', nonindex_coord)})\n", (24092, 24159), False, 'from xarray import DataArray, Dataset, Variable\n'), ((24895, 24959), 'xarray.DataArray', 'DataArray', (['data'], {'dims': "['x']", 'coords': "{'y': ('x', nonindex_coord)}"}), "(data, dims=['x'], coords={'y': ('x', nonindex_coord)})\n", (24904, 24959), False, 'from xarray import DataArray, Dataset, Variable\n'), ((25353, 25427), 'xarray.Dataset', 'Dataset', ([], {'data_vars': "{'a': ('x', data)}", 'coords': "{'y': ('x', nonindex_coord)}"}), "(data_vars={'a': ('x', data)}, coords={'y': ('x', nonindex_coord)})\n", (25360, 25427), False, 'from xarray import DataArray, Dataset, Variable\n'), ((26250, 26269), 'pint.UnitRegistry', 'pint.UnitRegistry', ([], {}), '()\n', (26267, 26269), False, 'import pint\n'), ((26856, 26875), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (26871, 26875), True, 'import numpy as np\n'), ((26888, 26916), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': '"""uint8"""'}), "(10, dtype='uint8')\n", (26897, 26916), True, 'import numpy as np\n'), ((27926, 27947), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (27941, 27947), True, 'import numpy as np\n'), ((28254, 28355), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (["[[0, 0, 0, 1, 1, 1], ['a', 'b', 'c', 'a', 'b', 'c']]"], {'names': "['x', 'y']"}), "([[0, 0, 0, 1, 1, 1], ['a', 'b', 'c', 'a', 'b',\n 'c']], names=['x', 'y'])\n", (28279, 28355), True, 'import pandas as pd\n'), ((28940, 28971), 'xarray.Dataset', 'Dataset', (["{'w': (('x', 'y'), w)}"], {}), "({'w': (('x', 'y'), w)})\n", (28947, 28971), False, 'from xarray import DataArray, Dataset, Variable\n'), ((29379, 29398), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (29394, 29398), True, 'import numpy as np\n'), ((30017, 30036), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (30032, 30036), True, 'import numpy as np\n'), ((30049, 30077), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': '"""uint8"""'}), "(10, dtype='uint8')\n", (30058, 30077), True, 'import numpy as np\n'), ((30123, 30177), 'xarray.Dataset', 'Dataset', (["{'a': ('t', x), 'b': ('t', y), 't': ('t', t)}"], {}), "({'a': ('t', x), 'b': ('t', y), 't': ('t', t)})\n", (30130, 30177), False, 'from xarray import DataArray, Dataset, Variable\n'), ((30539, 30567), 'xarray.Dataset', 'Dataset', (["{'x': ('dim_0', x)}"], {}), "({'x': ('dim_0', x)})\n", (30546, 30567), False, 'from xarray import DataArray, Dataset, Variable\n'), ((31044, 31086), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {'dtype': 'np.int64'}), '([[1, 2], [3, 4]], dtype=np.int64)\n', (31052, 31086), True, 'import numpy as np\n'), ((32259, 32271), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (32268, 32271), True, 'import numpy as np\n'), ((32489, 32510), 'xarray.tests.mock.patch', 'mock.patch', (['dask_func'], {}), '(dask_func)\n', (32499, 32510), False, 'from xarray.tests import mock\n'), ((32745, 32757), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (32754, 32757), True, 'import numpy as np\n'), ((32989, 33010), 'xarray.tests.mock.patch', 'mock.patch', (['dask_func'], {}), '(dask_func)\n', (32999, 33010), False, 'from xarray.tests import mock\n'), ((37013, 37067), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""inconsistent chunks"""'}), "(ValueError, match='inconsistent chunks')\n", (37026, 37067), False, 'import pytest\n'), ((37775, 37846), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Dimension \'x\' size mismatch: 10 != 2"""'}), '(ValueError, match="Dimension \'x\' size mismatch: 10 != 2")\n', (37788, 37846), False, 'import pytest\n'), ((38619, 38688), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Received dimension \'x\' of length 1"""'}), '(ValueError, match="Received dimension \'x\' of length 1")\n', (38632, 38688), False, 'import pytest\n'), ((38840, 38914), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Function must return an xarray DataArray"""'}), "(TypeError, match='Function must return an xarray DataArray')\n", (38853, 38914), False, 'import pytest\n'), ((38925, 38961), 'xarray.map_blocks', 'xr.map_blocks', (['returns_numpy', 'map_da'], {}), '(returns_numpy, map_da)\n', (38938, 38961), True, 'import xarray as xr\n'), ((38972, 39018), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""args must be"""'}), "(TypeError, match='args must be')\n", (38985, 39018), False, 'import pytest\n'), ((39029, 39073), 'xarray.map_blocks', 'xr.map_blocks', (['operator.add', 'map_da'], {'args': '(10)'}), '(operator.add, map_da, args=10)\n', (39042, 39073), True, 'import xarray as xr\n'), ((39084, 39132), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""kwargs must be"""'}), "(TypeError, match='kwargs must be')\n", (39097, 39132), False, 'import pytest\n'), ((39143, 39202), 'xarray.map_blocks', 'xr.map_blocks', (['operator.add', 'map_da'], {'args': '[10]', 'kwargs': '[20]'}), '(operator.add, map_da, args=[10], kwargs=[20])\n', (39156, 39202), True, 'import xarray as xr\n'), ((39297, 39343), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""Cannot infer"""'}), "(Exception, match='Cannot infer')\n", (39310, 39343), False, 'import pytest\n'), ((39354, 39392), 'xarray.map_blocks', 'xr.map_blocks', (['really_bad_func', 'map_da'], {}), '(really_bad_func, map_da)\n', (39367, 39392), True, 'import xarray as xr\n'), ((39482, 39536), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""inconsistent chunks"""'}), "(ValueError, match='inconsistent chunks')\n", (39495, 39536), False, 'import pytest\n'), ((39547, 39579), 'xarray.map_blocks', 'xr.map_blocks', (['bad_func', 'ds_copy'], {}), '(bad_func, ds_copy)\n', (39560, 39579), True, 'import xarray as xr\n'), ((39590, 39652), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Cannot pass dask collections"""'}), "(TypeError, match='Cannot pass dask collections')\n", (39603, 39652), False, 'import pytest\n'), ((39945, 39969), 'xarray.map_blocks', 'xr.map_blocks', (['func', 'obj'], {}), '(func, obj)\n', (39958, 39969), True, 'import xarray as xr\n'), ((40265, 40303), 'xarray.map_blocks', 'xr.map_blocks', (['operator.add', 'obj', '[10]'], {}), '(operator.add, obj, [10])\n', (40278, 40303), True, 'import xarray as xr\n'), ((40794, 40831), 'xarray.map_blocks', 'xr.map_blocks', (['sumda', 'da1'], {'args': '[da2]'}), '(sumda, da1, args=[da2])\n', (40807, 40831), True, 'import xarray as xr\n'), ((41003, 41047), 'xarray.map_blocks', 'xr.map_blocks', (['operator.add', 'da1'], {'args': '[da2]'}), '(operator.add, da1, args=[da2])\n', (41016, 41047), True, 'import xarray as xr\n'), ((41276, 41320), 'xarray.map_blocks', 'xr.map_blocks', (['operator.add', 'da1'], {'args': '[da2]'}), '(operator.add, da1, args=[da2])\n', (41289, 41320), True, 'import xarray as xr\n'), ((41378, 41444), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Chunk sizes along dimension \'x\'"""'}), '(ValueError, match="Chunk sizes along dimension \'x\'")\n', (41391, 41444), False, 'import pytest\n'), ((41526, 41602), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""indexes along dimension \'x\' are not equal"""'}), '(ValueError, match="indexes along dimension \'x\' are not equal")\n', (41539, 41602), False, 'import pytest\n'), ((42544, 42573), 'xarray.map_blocks', 'xr.map_blocks', (['add_attrs', 'obj'], {}), '(add_attrs, obj)\n', (42557, 42573), True, 'import xarray as xr\n'), ((42753, 42796), 'xarray.map_blocks', 'xr.map_blocks', (['add_attrs', 'obj'], {'template': 'obj'}), '(add_attrs, obj, template=obj)\n', (42766, 42796), True, 'import xarray as xr\n'), ((43066, 43100), 'xarray.map_blocks', 'xr.map_blocks', (['change_name', 'map_da'], {}), '(change_name, map_da)\n', (43079, 43100), True, 'import xarray as xr\n'), ((44206, 44233), 'xarray.map_blocks', 'xr.map_blocks', (['func', 'map_da'], {}), '(func, map_da)\n', (44219, 44233), True, 'import xarray as xr\n'), ((44733, 44760), 'xarray.map_blocks', 'xr.map_blocks', (['func', 'map_ds'], {}), '(func, map_ds)\n', (44746, 44760), True, 'import xarray as xr\n'), ((45033, 45076), 'xarray.map_blocks', 'xr.map_blocks', (['func', 'obj'], {'template': 'template'}), '(func, obj, template=template)\n', (45046, 45076), True, 'import xarray as xr\n'), ((45465, 45507), 'xarray.map_blocks', 'xr.map_blocks', (['func', 'da'], {'template': 'template'}), '(func, da, template=template)\n', (45478, 45507), True, 'import xarray as xr\n'), ((45718, 45760), 'xarray.map_blocks', 'xr.map_blocks', (['func', 'ds'], {'template': 'template'}), '(func, ds, template=template)\n', (45731, 45760), True, 'import xarray as xr\n'), ((45913, 45979), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unexpected coordinate variables"""'}), "(ValueError, match='unexpected coordinate variables')\n", (45926, 45979), False, 'import pytest\n'), ((46075, 46147), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""does not contain coordinate variables"""'}), "(ValueError, match='does not contain coordinate variables')\n", (46088, 46147), False, 'import pytest\n'), ((46240, 46299), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Dimensions {\'x\'} missing"""'}), '(ValueError, match="Dimensions {\'x\'} missing")\n', (46253, 46299), False, 'import pytest\n'), ((46385, 46454), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Received dimension \'x\' of length 1"""'}), '(ValueError, match="Received dimension \'x\' of length 1")\n', (46398, 46454), False, 'import pytest\n'), ((46542, 46595), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""must be a DataArray"""'}), "(TypeError, match='must be a DataArray')\n", (46555, 46595), False, 'import pytest\n'), ((46686, 46755), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""map_blocks requires that one block"""'}), "(ValueError, match='map_blocks requires that one block')\n", (46699, 46755), False, 'import pytest\n'), ((46897, 46956), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Expected index \'x\' to be"""'}), '(ValueError, match="Expected index \'x\' to be")\n', (46910, 46956), False, 'import pytest\n'), ((47212, 47280), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unexpected data variables {\'xyz\'}"""'}), '(ValueError, match="unexpected data variables {\'xyz\'}")\n', (47225, 47280), False, 'import pytest\n'), ((47602, 47626), 'xarray.map_blocks', 'xr.map_blocks', (['func', 'obj'], {}), '(func, obj)\n', (47615, 47626), True, 'import xarray as xr\n'), ((52198, 52223), 'xarray.open_dataset', 'xr.open_dataset', (['tmp_file'], {}), '(tmp_file)\n', (52213, 52223), True, 'import xarray as xr\n'), ((53872, 53907), 'xarray.merge', 'xr.merge', (['[da1, da2]'], {'compat': 'compat'}), '([da1, da2], compat=compat)\n', (53880, 53907), True, 'import xarray as xr\n'), ((54152, 54191), 'xarray.merge', 'xr.merge', (['[da1, da2 / 2]'], {'compat': 'compat'}), '([da1, da2 / 2], compat=compat)\n', (54160, 54191), True, 'import xarray as xr\n'), ((4627, 4683), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""stored in a dask array"""'}), "(TypeError, match='stored in a dask array')\n", (4640, 4683), False, 'import pytest\n'), ((6696, 6712), 'pickle.dumps', 'pickle.dumps', (['a1'], {}), '(a1)\n', (6708, 6712), False, 'import pickle\n'), ((7469, 7537), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""only works along an axis"""'}), "(NotImplementedError, match='only works along an axis')\n", (7482, 7537), False, 'import pytest\n'), ((7576, 7644), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""only works along an axis"""'}), "(NotImplementedError, match='only works along an axis')\n", (7589, 7644), False, 'import pytest\n'), ((8318, 8354), 'xarray.Variable.concat', 'Variable.concat', (['[v[:2], v[2:]]', '"""x"""'], {}), "([v[:2], v[2:]], 'x')\n", (8333, 8354), False, 'from xarray import DataArray, Dataset, Variable\n'), ((8399, 8433), 'xarray.Variable.concat', 'Variable.concat', (['[v[0], v[1]]', '"""x"""'], {}), "([v[0], v[1]], 'x')\n", (8414, 8433), False, 'from xarray import DataArray, Dataset, Variable\n'), ((8478, 8512), 'xarray.Variable.concat', 'Variable.concat', (['[u[0], v[1]]', '"""x"""'], {}), "([u[0], v[1]], 'x')\n", (8493, 8512), False, 'from xarray import DataArray, Dataset, Variable\n'), ((8557, 8591), 'xarray.Variable.concat', 'Variable.concat', (['[v[0], u[1]]', '"""x"""'], {}), "([v[0], u[1]], 'x')\n", (8572, 8591), False, 'from xarray import DataArray, Dataset, Variable\n'), ((8649, 8715), 'xarray.Variable.concat', 'Variable.concat', (['[v[[0, 2]], v[[1]]]', '"""x"""'], {'positions': '[[0, 2], [1]]'}), "([v[[0, 2]], v[[1]]], 'x', positions=[[0, 2], [1]])\n", (8664, 8715), False, 'from xarray import DataArray, Dataset, Variable\n'), ((9220, 9229), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (9226, 9229), True, 'import numpy as np\n'), ((9231, 9240), 'xarray.ufuncs.sin', 'xu.sin', (['v'], {}), '(v)\n', (9237, 9240), True, 'import xarray.ufuncs as xu\n'), ((9436, 9452), 'numpy.maximum', 'np.maximum', (['u', '(0)'], {}), '(u, 0)\n', (9446, 9452), True, 'import numpy as np\n'), ((9454, 9470), 'xarray.ufuncs.maximum', 'xu.maximum', (['v', '(0)'], {}), '(v, 0)\n', (9464, 9470), True, 'import xarray.ufuncs as xu\n'), ((9507, 9523), 'numpy.maximum', 'np.maximum', (['u', '(0)'], {}), '(u, 0)\n', (9517, 9523), True, 'import numpy as np\n'), ((9525, 9541), 'xarray.ufuncs.maximum', 'xu.maximum', (['(0)', 'v'], {}), '(0, v)\n', (9535, 9541), True, 'import xarray.ufuncs as xu\n'), ((18928, 18976), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""dask"""'}), "(NotImplementedError, match='dask')\n", (18941, 18976), False, 'import pytest\n'), ((20351, 20360), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (20357, 20360), True, 'import numpy as np\n'), ((20362, 20371), 'xarray.ufuncs.sin', 'xu.sin', (['v'], {}), '(v)\n', (20368, 20371), True, 'import xarray.ufuncs as xu\n'), ((23559, 23575), 'pickle.dumps', 'pickle.dumps', (['a1'], {}), '(a1)\n', (23571, 23575), False, 'import pickle\n'), ((24325, 24342), 'pickle.dumps', 'pickle.dumps', (['ds1'], {}), '(ds1)\n', (24337, 24342), False, 'import pickle\n'), ((24973, 24997), 'contextlib.suppress', 'suppress', (['AttributeError'], {}), '(AttributeError)\n', (24981, 24997), False, 'from contextlib import suppress\n'), ((25441, 25465), 'contextlib.suppress', 'suppress', (['AttributeError'], {}), '(AttributeError)\n', (25449, 25465), False, 'from contextlib import suppress\n'), ((28047, 28073), 'numpy.array', 'np.array', (['[0, 1]', 'np.int64'], {}), '([0, 1], np.int64)\n', (28055, 28073), True, 'import numpy as np\n'), ((28889, 28910), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (28904, 28910), True, 'import numpy as np\n'), ((28996, 29022), 'numpy.array', 'np.array', (['[0, 1]', 'np.int64'], {}), '([0, 1], np.int64)\n', (29004, 29022), True, 'import numpy as np\n'), ((29411, 29424), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (29420, 29424), True, 'import numpy as np\n'), ((30495, 30514), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (30510, 30514), True, 'import numpy as np\n'), ((31596, 31667), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""does not match the set of dimensions"""'}), "(ValueError, match='does not match the set of dimensions')\n", (31609, 31667), False, 'import pytest\n'), ((31853, 31865), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (31862, 31865), True, 'import numpy as np\n'), ((54020, 54045), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (54033, 54045), False, 'import pytest\n'), ((54059, 54094), 'xarray.merge', 'xr.merge', (['[da1, da3]'], {'compat': 'compat'}), '([da1, da3], compat=compat)\n', (54067, 54094), True, 'import xarray as xr\n'), ((56479, 56494), 'xarray.DataArray', 'xr.DataArray', (['a'], {}), '(a)\n', (56491, 56494), True, 'import xarray as xr\n'), ((936, 960), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (957, 960), True, 'import numpy as np\n'), ((2353, 2377), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (2374, 2377), True, 'import numpy as np\n'), ((4004, 4016), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (4013, 4016), True, 'import numpy as np\n'), ((20539, 20551), 'xarray.DataArray', 'DataArray', (['a'], {}), '(a)\n', (20548, 20551), False, 'from xarray import DataArray, Dataset, Variable\n'), ((21311, 21323), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (21320, 21323), True, 'import numpy as np\n'), ((21325, 21337), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (21334, 21337), True, 'import numpy as np\n'), ((25683, 25700), 'xarray.DataArray', 'DataArray', (['[1, 2]'], {}), '([1, 2])\n', (25692, 25700), False, 'from xarray import DataArray, Dataset, Variable\n'), ((27124, 27145), 'pandas.Index', 'pd.Index', (['t'], {'name': '"""t"""'}), "(t, name='t')\n", (27132, 27145), True, 'import pandas as pd\n'), ((29654, 29675), 'pandas.Index', 'pd.Index', (['t'], {'name': '"""t"""'}), "(t, name='t')\n", (29662, 29675), True, 'import pandas as pd\n'), ((30235, 30256), 'pandas.Index', 'pd.Index', (['t'], {'name': '"""t"""'}), "(t, name='t')\n", (30243, 30256), True, 'import pandas as pd\n'), ((31100, 31136), 'xarray.Dataset', 'Dataset', (["{'w': (('x', 'y'), values)}"], {}), "({'w': (('x', 'y'), values)})\n", (31107, 31136), False, 'from xarray import DataArray, Dataset, Variable\n'), ((32004, 32016), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (32013, 32016), True, 'import numpy as np\n'), ((35665, 35682), 'numpy.ones', 'np.ones', (['(10, 20)'], {}), '((10, 20))\n', (35672, 35682), True, 'import numpy as np\n'), ((38699, 38730), 'xarray.map_blocks', 'xr.map_blocks', (['bad_func', 'map_da'], {}), '(bad_func, map_da)\n', (38712, 38730), True, 'import xarray as xr\n'), ((40460, 40477), 'numpy.ones', 'np.ones', (['(10, 20)'], {}), '((10, 20))\n', (40467, 40477), True, 'import numpy as np\n'), ((55600, 55636), 'xarray.broadcast', 'xr.broadcast', (['map_ds.cxy', 'map_ds.cxy'], {}), '(map_ds.cxy, map_ds.cxy)\n', (55612, 55636), True, 'import xarray as xr\n'), ((4127, 4139), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (4136, 4139), True, 'import numpy as np\n'), ((20603, 20615), 'xarray.DataArray', 'DataArray', (['a'], {}), '(a)\n', (20612, 20615), False, 'from xarray import DataArray, Dataset, Variable\n'), ((20668, 20680), 'xarray.DataArray', 'DataArray', (['x'], {}), '(x)\n', (20677, 20680), False, 'from xarray import DataArray, Dataset, Variable\n'), ((20733, 20745), 'xarray.DataArray', 'DataArray', (['x'], {}), '(x)\n', (20742, 20745), False, 'from xarray import DataArray, Dataset, Variable\n'), ((54524, 54547), 'numpy.arange', 'np.arange', (["a.sizes['x']"], {}), "(a.sizes['x'])\n", (54533, 54547), True, 'import numpy as np\n'), ((55168, 55184), 'xarray.zeros_like', 'xr.zeros_like', (['a'], {}), '(a)\n', (55181, 55184), True, 'import xarray as xr\n'), ((35730, 35743), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (35739, 35743), True, 'import numpy as np\n'), ((35750, 35769), 'numpy.arange', 'np.arange', (['(100)', '(120)'], {}), '(100, 120)\n', (35759, 35769), True, 'import numpy as np\n'), ((40525, 40538), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (40534, 40538), True, 'import numpy as np\n'), ((40545, 40558), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (40554, 40558), True, 'import numpy as np\n'), ((54668, 54691), 'numpy.arange', 'np.arange', (["a.sizes['x']"], {}), "(a.sizes['x'])\n", (54677, 54691), True, 'import numpy as np\n'), ((57089, 57112), 'xarray.Variable', 'Variable', (["['x']", '[1, 2]'], {}), "(['x'], [1, 2])\n", (57097, 57112), False, 'from xarray import DataArray, Dataset, Variable\n'), ((41666, 41679), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (41675, 41679), True, 'import numpy as np\n')] |
import numpy as np
import os
import sys
import getopt
import code # For development: code.interact(local=locals())
import time
day_per_year = 365.0
class pft_bc_type:
def __init__(self):
# Initialize a dictionary of parameters for any pft
self.pft_bc_dic = {}
def DailyCFromUnitGPPAR(leaf_area,AGB):
# -----------------------------------------------------------------------------------
# This routine estimates Net Daily Carbon Gains (GPP-AR) by estimating
# a mean canopy GPP per leaf area per year, and by estimating
# a mean autotrophic respiration per kilogram per year, both from literature.
# Thus to scale to a plant, the plant's leaf area and total biomass are needed.
#
# THese numbers are taken from Chambers et al. 2004
# from ZF2 Manaus Brazil
# -----------------------------------------------------------------------------------
kg_per_Mg = 1000.0
m2_per_ha = 10000.0
site_AGB = 151.35 # MgC/ha
site_NPP = 9.0 # MgC/ha/yr
site_AR = 21.0 # MgC/ha/yr
site_LAI = 4.7 # m2/m2
#site_Rleaf = 9.8 # MgC/ha/yr
#site_Rwood = 4.2 # MgC/ha/yr
#site_Rroot = 5.5 # MgC/ha/yr
GPP_per_larea_yr = kg_per_Mg * (site_NPP + site_AR) / \
site_LAI / m2_per_ha
AR_per_kg_yr = kg_per_Mg * site_AR / site_AGB / \
m2_per_ha
GPP = 100.8*GPP_per_larea_yr * leaf_area / day_per_year
AR = AR_per_kg_yr * AGB / day_per_year
NetDailyC = GPP - AR
return NetDailyC
def DailyCFromCArea(presc_npp_p1,c_area,phen_type,leaf_status):
# -----------------------------------------------------------------------------------
# This method was provided by <NAME> via is inferences from the PPA
# literature. Here, net daily carbon [kg] is based on one of two excluding
# parmaters (NPP per crown area per year), for plants that are either in
# the upper canopy (access to sunlight) or in the understory (low sunlight)
#
# c_area, footprint of the crown area [m2].
# presc_npp_p1, npp generated per crown area [kgC/m2/yr]
# -----------------------------------------------------------------------------------
if( (phen_type == 1) or (leaf_status ==2)):
NetDailyC = presc_npp_p1 * c_area / day_per_year
else:
NetDailyC = 0.0
return NetDailyC
def DailyCNPFromCArea(presc_npp_p1,presc_nflux_p1, \
presc_pflux_p1,c_area,phen_type,leaf_status):
# -----------------------------------------------------------------------------------
# This method was provided by <NAME> via is inferences from the PPA
# literature. Here, net daily carbon [kg] is based on one of two excluding
# parmaters (NPP per crown area per year), for plants that are either in
# the upper canopy (access to sunlight) or in the understory (low sunlight)
#
# c_area, footprint of the crown area [m2].
# presc_npp_canopy, npp generated per crown area in canopy [kgC/m2/yr]
# presc_npp_understory, npp generated per crown area in understory [kgC/m2/yr]
# presc_nflux_p1, Nitrogen flux per crown area [kgN/m2/yr]
# presc_pflux_p1, Phosphorus flux per crown area [kgP/m2/yr]
# -----------------------------------------------------------------------------------
if( (phen_type == 1) or (leaf_status ==2)):
NetDailyC = presc_npp_p1 * c_area / day_per_year
NetDailyN = presc_nflux_p1 * c_area / day_per_year
NetDailyP = presc_pflux_p1 * c_area / day_per_year
else:
NetDailyC = 0.0
NetDailyN = 0.0
NetDailyP = 0.0
return NetDailyC, NetDailyN, NetDailyP
def DailyCNPFromStorageSinWave(doy,store_c,presc_npp_p1, \
presc_nflux_p1,presc_pflux_p1,c_area,presc_npp_amp, \
phen_type, leaf_status):
# This method is supposed to simulate a seasonal cycle of NPP
# In some cases we pass negative daily carbon gain to the allocation model
# however, we have to be careful to not make negative gains larger
# than available storage in those cases. This is not necessarily the most
# realistic model, but its important to test that the parteh algorithms can handle
# these stressfull negative gain conditions.
doy0=0.0
sin_func = np.sin( (doy-doy0)/366.0 * 2.0 * np.pi )
#if (sin_func>0.0):
# NetDailyC = sin_func * presc_npp_p1 * c_area / day_per_year
#else:
# NetDailyC = -np.minimum( -neg_store_frac * sin_func * presc_npp_p1* c_area / day_per_year, 0.98* np.float(store_c))
NetDailyC = (presc_npp_amp * sin_func * presc_npp_p1 + presc_npp_p1) * c_area/day_per_year
# This is a fail-safe, for large negatives, cant be larger than storage
if (NetDailyC < 0.0):
NetDailyC = -np.minimum(-NetDailyC,0.98* np.float(store_c))
#print("sin_func: {}, NetDailyC: {}, store_c: {}, c_area :{}".format(sin_func,NetDailyC,store_c,c_area))
if( (phen_type == 1) or (leaf_status ==2)):
NetDailyN = presc_nflux_p1 * c_area / day_per_year
NetDailyP = presc_pflux_p1 * c_area / day_per_year
else:
NetDailyN = 0.0
NetDailyP = 0.0
NetDailyC = 0.0
return NetDailyC, NetDailyN, NetDailyP
def DeciduousPhenology(doy, target_leaf_c, store_c, phen_type):
# Time leaf-on with rising NPP
leaf_on_doy = np.int(366.0 * 0.01)
leaf_off_doy = np.int(366.0 * 0.55)
if ( doy==leaf_on_doy):
flush_c = np.minimum(store_c,target_leaf_c * 0.5)
else:
flush_c = 0.0
if ( doy==leaf_off_doy):
drop_frac_c = 1.0
else:
drop_frac_c = 0.0
if(doy>=leaf_on_doy and doy<leaf_off_doy):
leaf_status = 2 # Leaves are on
else:
leaf_status = 1 # Leaves are off
if(phen_type==1):
flush_c = 0.0
drop_frac_c = 0.0
leaf_status = 2
return flush_c, drop_frac_c, leaf_status
| [
"numpy.sin",
"numpy.float",
"numpy.int",
"numpy.minimum"
] | [((4462, 4504), 'numpy.sin', 'np.sin', (['((doy - doy0) / 366.0 * 2.0 * np.pi)'], {}), '((doy - doy0) / 366.0 * 2.0 * np.pi)\n', (4468, 4504), True, 'import numpy as np\n'), ((5537, 5557), 'numpy.int', 'np.int', (['(366.0 * 0.01)'], {}), '(366.0 * 0.01)\n', (5543, 5557), True, 'import numpy as np\n'), ((5578, 5598), 'numpy.int', 'np.int', (['(366.0 * 0.55)'], {}), '(366.0 * 0.55)\n', (5584, 5598), True, 'import numpy as np\n'), ((5646, 5686), 'numpy.minimum', 'np.minimum', (['store_c', '(target_leaf_c * 0.5)'], {}), '(store_c, target_leaf_c * 0.5)\n', (5656, 5686), True, 'import numpy as np\n'), ((4990, 5007), 'numpy.float', 'np.float', (['store_c'], {}), '(store_c)\n', (4998, 5007), True, 'import numpy as np\n')] |
import numpy as np
import keras.backend as K
import keras
from keras.callbacks import EarlyStopping
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import RMSprop
import os
import sys
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, here)
from load_data import load_data
def r2(y_true, y_pred):
SS_res = keras.backend.sum(keras.backend.square(y_true - y_pred), axis=0)
SS_tot = keras.backend.sum(
keras.backend.square(y_true - keras.backend.mean(y_true, axis=0)), axis=0
)
output_scores = 1 - SS_res / (SS_tot + keras.backend.epsilon())
r2 = keras.backend.mean(output_scores)
return r2
HISTORY = None
def run(point):
global HISTORY
(x_train, y_train), (x_valid, y_valid) = load_data()
model = Sequential()
model.add(Dense(
point['units'],
activation=point['activation'],
input_shape=tuple(np.shape(x_train)[1:])))
model.add(Dense(1))
model.summary()
model.compile(loss='mse', optimizer=RMSprop(lr=point['lr']), metrics=[r2])
history = model.fit(x_train, y_train,
batch_size=64,
epochs=1000,
verbose=1,
callbacks=[EarlyStopping(
monitor='val_r2',
mode='max',
verbose=1,
patience=10
)],
validation_data=(x_valid, y_valid))
HISTORY = history.history
return history.history['val_r2'][-1]
if __name__ == '__main__':
point = {
'activation': 'relu',
'lr': 0.8820413612862609,
'units': 21
}
objective = run(point)
print('objective: ', objective)
import matplotlib.pyplot as plt
plt.plot(HISTORY['val_r2'])
plt.xlabel('Epochs')
plt.ylabel('Objective: $R^2$')
plt.grid()
plt.show()
| [
"sys.path.insert",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"keras.backend.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.backend.square",
"keras.models.Sequential",
"keras.layers.Dense",
"load_data.load_data",
"keras.callbacks.EarlyStopping",
"os.path.abspath... | [((277, 301), 'sys.path.insert', 'sys.path.insert', (['(0)', 'here'], {}), '(0, here)\n', (292, 301), False, 'import sys\n'), ((250, 275), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (265, 275), False, 'import os\n'), ((635, 668), 'keras.backend.mean', 'keras.backend.mean', (['output_scores'], {}), '(output_scores)\n', (653, 668), False, 'import keras\n'), ((782, 793), 'load_data.load_data', 'load_data', ([], {}), '()\n', (791, 793), False, 'from load_data import load_data\n'), ((807, 819), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (817, 819), False, 'from keras.models import Sequential\n'), ((1847, 1874), 'matplotlib.pyplot.plot', 'plt.plot', (["HISTORY['val_r2']"], {}), "(HISTORY['val_r2'])\n", (1855, 1874), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1899), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (1889, 1899), True, 'import matplotlib.pyplot as plt\n'), ((1904, 1934), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Objective: $R^2$"""'], {}), "('Objective: $R^2$')\n", (1914, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1939, 1949), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1947, 1949), True, 'import matplotlib.pyplot as plt\n'), ((1954, 1964), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1962, 1964), True, 'import matplotlib.pyplot as plt\n'), ((391, 428), 'keras.backend.square', 'keras.backend.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (411, 428), False, 'import keras\n'), ((970, 978), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (975, 978), False, 'from keras.layers import Dense\n'), ((1042, 1065), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': "point['lr']"}), "(lr=point['lr'])\n", (1049, 1065), False, 'from keras.optimizers import RMSprop\n'), ((508, 542), 'keras.backend.mean', 'keras.backend.mean', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (526, 542), False, 'import keras\n'), ((601, 624), 'keras.backend.epsilon', 'keras.backend.epsilon', ([], {}), '()\n', (622, 624), False, 'import keras\n'), ((1270, 1337), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_r2"""', 'mode': '"""max"""', 'verbose': '(1)', 'patience': '(10)'}), "(monitor='val_r2', mode='max', verbose=1, patience=10)\n", (1283, 1337), False, 'from keras.callbacks import EarlyStopping\n'), ((931, 948), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (939, 948), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import matplotlib
matplotlib.rc('xtick', labelsize=6)
matplotlib.rc('ytick', labelsize=6)
from numpy import arange
class small_multiples_plot(object):
def __init__(self, fig=None, *args, **kwargs):
if fig is None:
raise AssertionError("A valid figure must be passed in.")
# fig = figure()
self.fig = fig
self.fig.subplots_adjust(bottom=0.20, left = 0.1, right=0.9, top=0.9)
self.colorbar_ax = fig.add_axes((0.1, 0.1, 0.8, 0.05))
self.multiples = small_multiples(self.fig, **kwargs)
def label_edges(self, bool_val):
m = self.multiples
leftside = m[:,0]
for ax in leftside:
ax.yaxis.tick_left()
ax.yaxis.set_visible(bool_val)
#last row
bottomedge = m[-1,:]
for ax in bottomedge:
ax.xaxis.tick_bottom()
ax.xaxis.set_visible(bool_val)
def small_multiples(f, rows=4, columns=5, margin=(0.0,0.0), zoom_together=True):
""" Given a figure f, create linked subplots with given number of rows and columns.
Returns an object array of axes instances [rows, columns], with top left being [0,0].
"""
# rows = 4 #number in y direction
# columns = 5 #number in x direction
f.subplots_adjust(wspace=margin[0], hspace=margin[1])
# should use N.empty((rows,columns),dtype=object)
# and attribute name should perhaps be changed
multiples = arange(rows*columns, dtype=object)
multiples.shape=(rows, columns)
# No axis defined to start with
commonaxis=None
for row in range(rows):
for column in range(columns):
nth_plot = row*columns + column
ax = f.add_subplot(rows, columns, nth_plot + 1, sharex=commonaxis, sharey=commonaxis)
if not commonaxis and zoom_together:
commonaxis = ax
# leaves axes frame, but turns off axis labels and ticks
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
multiples[row, column] = ax
# ax.plot(range(10), range(10))
# ax.text(1,1,'%i, %i, %i' % (row, column, nth_plot))
# print row, column, nth_plot
return multiples
if __name__ == '__main__':
from pylab import figure, show #, subplot, show
f=figure()
m=small_multiples(f)
#first column
leftside = m[:,0]
for ax in leftside:
ax.yaxis.set_visible(True)
#last row
bottomedge = m[-1,:]
for ax in bottomedge:
ax.xaxis.set_visible(True)
show()
| [
"pylab.figure",
"matplotlib.rc",
"numpy.arange",
"pylab.show"
] | [((58, 93), 'matplotlib.rc', 'matplotlib.rc', (['"""xtick"""'], {'labelsize': '(6)'}), "('xtick', labelsize=6)\n", (71, 93), False, 'import matplotlib\n'), ((94, 129), 'matplotlib.rc', 'matplotlib.rc', (['"""ytick"""'], {'labelsize': '(6)'}), "('ytick', labelsize=6)\n", (107, 129), False, 'import matplotlib\n'), ((1506, 1542), 'numpy.arange', 'arange', (['(rows * columns)'], {'dtype': 'object'}), '(rows * columns, dtype=object)\n', (1512, 1542), False, 'from numpy import arange\n'), ((2393, 2401), 'pylab.figure', 'figure', ([], {}), '()\n', (2399, 2401), False, 'from pylab import figure, show\n'), ((2641, 2647), 'pylab.show', 'show', ([], {}), '()\n', (2645, 2647), False, 'from pylab import figure, show\n')] |
"""
Created on Sat Nov 18 23:12:08 2017
@author: <NAME> - github.com/utkuozbulak
"""
import os
import cv2
import numpy as np
import torch
from torch.optim import SGD
from torchvision import models
from src.misc_functions import preprocess_image, recreate_image
class CNNLayerVisualization():
"""
Produces an image that minimizes the loss of a convolution
operation for a specific layer and filter
"""
def __init__(self, model, selected_layer, selected_filter):
self.model = model
self.model.eval()
self.selected_layer = selected_layer
self.selected_filter = selected_filter
self.conv_output = 0
# Generate a random image
self.created_image = np.uint8(np.random.uniform(150, 180, (224, 224, 3)))
# Create the folder to export images if not exists
if not os.path.exists('generated'):
os.makedirs('generated')
def hook_layer(self):
def hook_function(module, grad_in, grad_out):
# Gets the conv output of the selected filter (from selected layer)
self.conv_output = grad_out[0, self.selected_filter]
# Hook the selected layer
self.model[self.selected_layer].register_forward_hook(hook_function)
def visualise_layer_with_hooks(self):
# Hook the selected layer
self.hook_layer()
# Process image and return variable
self.processed_image = preprocess_image(self.created_image)
# Define optimizer for the image
# Earlier layers need higher learning rates to visualize whereas later layers need less
optimizer = SGD([self.processed_image], lr=5, weight_decay=1e-6)
for i in range(1, 51):
optimizer.zero_grad()
# Assign create image to a variable to move forward in the model
x = self.processed_image
for index, layer in enumerate(self.model):
# Forward pass layer by layer
# x is not used after this point because it is only needed to trigger
# the forward hook function
x = layer(x)
# Only need to forward until the selected layer is reached
if index == self.selected_layer:
# (forward hook function triggered)
break
# Loss function is the mean of the output of the selected layer/filter
# We try to minimize the mean of the output of that specific filter
loss = torch.mean(self.conv_output)
print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy()[0]))
# Backward
loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
# Save image
if i % 5 == 0:
cv2.imwrite('generated/layer_vis_l' + str(self.selected_layer) +
'_f' + str(self.selected_filter) + '_iter'+str(i)+'.jpg',
self.created_image)
def visualise_layer_without_hooks(self):
# Process image and return variable
self.processed_image = preprocess_image(self.created_image)
# Define optimizer for the image
# Earlier layers need higher learning rates to visualize whereas later layers need less
optimizer = SGD([self.processed_image], lr=5, weight_decay=1e-6)
for i in range(1, 51):
optimizer.zero_grad()
# Assign create image to a variable to move forward in the model
x = self.processed_image
for index, layer in enumerate(self.model):
# Forward pass layer by layer
x = layer(x)
if index == self.selected_layer:
# Only need to forward until the selected layer is reached
# Now, x is the output of the selected layer
break
# Here, we get the specific filter from the output of the convolution operation
# x is a tensor of shape 1x512x28x28.(For layer 17)
# So there are 512 unique filter outputs
# Following line selects a filter from 512 filters so self.conv_output will become
# a tensor of shape 28x28
self.conv_output = x[0, self.selected_filter]
# Loss function is the mean of the output of the selected layer/filter
# We try to minimize the mean of the output of that specific filter
loss = torch.mean(self.conv_output)
print('Iteration:', str(i), 'Loss:', "{0:.2f}".format(loss.data.numpy()[0]))
# Backward
loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
# Save image
if i % 5 == 0:
cv2.imwrite('generated/layer_vis_l' + str(self.selected_layer) +
'_f' + str(self.selected_filter) + '_iter'+str(i)+'.jpg',
self.created_image)
if __name__ == '__main__':
cnn_layer = 17
filter_pos = 0
# Fully connected layer is not needed
pretrained_model = models.vgg16(pretrained=True).features
layer_vis = CNNLayerVisualization(pretrained_model, cnn_layer, filter_pos)
# Layer visualization with pytorch hooks
# layer_vis.visualise_layer_with_hooks()
# Layer visualization without pytorch hooks
layer_vis.visualise_layer_without_hooks()
| [
"os.path.exists",
"torch.optim.SGD",
"os.makedirs",
"torch.mean",
"src.misc_functions.preprocess_image",
"src.misc_functions.recreate_image",
"numpy.random.uniform",
"torchvision.models.vgg16"
] | [((1440, 1476), 'src.misc_functions.preprocess_image', 'preprocess_image', (['self.created_image'], {}), '(self.created_image)\n', (1456, 1476), False, 'from src.misc_functions import preprocess_image, recreate_image\n'), ((1634, 1687), 'torch.optim.SGD', 'SGD', (['[self.processed_image]'], {'lr': '(5)', 'weight_decay': '(1e-06)'}), '([self.processed_image], lr=5, weight_decay=1e-06)\n', (1637, 1687), False, 'from torch.optim import SGD\n'), ((3226, 3262), 'src.misc_functions.preprocess_image', 'preprocess_image', (['self.created_image'], {}), '(self.created_image)\n', (3242, 3262), False, 'from src.misc_functions import preprocess_image, recreate_image\n'), ((3420, 3473), 'torch.optim.SGD', 'SGD', (['[self.processed_image]'], {'lr': '(5)', 'weight_decay': '(1e-06)'}), '([self.processed_image], lr=5, weight_decay=1e-06)\n', (3423, 3473), False, 'from torch.optim import SGD\n'), ((5306, 5335), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (5318, 5335), False, 'from torchvision import models\n'), ((740, 782), 'numpy.random.uniform', 'np.random.uniform', (['(150)', '(180)', '(224, 224, 3)'], {}), '(150, 180, (224, 224, 3))\n', (757, 782), True, 'import numpy as np\n'), ((858, 885), 'os.path.exists', 'os.path.exists', (['"""generated"""'], {}), "('generated')\n", (872, 885), False, 'import os\n'), ((899, 923), 'os.makedirs', 'os.makedirs', (['"""generated"""'], {}), "('generated')\n", (910, 923), False, 'import os\n'), ((2514, 2542), 'torch.mean', 'torch.mean', (['self.conv_output'], {}), '(self.conv_output)\n', (2524, 2542), False, 'import torch\n'), ((2801, 2837), 'src.misc_functions.recreate_image', 'recreate_image', (['self.processed_image'], {}), '(self.processed_image)\n', (2815, 2837), False, 'from src.misc_functions import preprocess_image, recreate_image\n'), ((4583, 4611), 'torch.mean', 'torch.mean', (['self.conv_output'], {}), '(self.conv_output)\n', (4593, 4611), False, 'import torch\n'), ((4870, 4906), 'src.misc_functions.recreate_image', 'recreate_image', (['self.processed_image'], {}), '(self.processed_image)\n', (4884, 4906), False, 'from src.misc_functions import preprocess_image, recreate_image\n')] |
import numpy as np
size_A = list(map(int, input().strip().split(' ')))
A = []
for i in range(size_A[0]):
in_A = list(map(int, input().strip().split(' ')))
A.append(in_A)
B = []
for i in range(size_A[1]):
in_B = list(map(int, input().strip().split(' ')))
B.append(in_B)
print(np.concatenate([A, B]))
# -- another answer
n, m, p = map(int, input().split())
A = np.array([input().split() for _ in range(n)], int)
B = np.array([input().split() for _ in range(m)], int)
print(np.concatenate([A, B], axis=0)) | [
"numpy.concatenate"
] | [((295, 317), 'numpy.concatenate', 'np.concatenate', (['[A, B]'], {}), '([A, B])\n', (309, 317), True, 'import numpy as np\n'), ((493, 523), 'numpy.concatenate', 'np.concatenate', (['[A, B]'], {'axis': '(0)'}), '([A, B], axis=0)\n', (507, 523), True, 'import numpy as np\n')] |
import logging
from os.path import exists
import cv2
import numpy as np
from pdf2image import convert_from_path
def collect_contours(image):
""" Sub function used by scrapper.\n
@param image: an opencv image\n
@return returns an ordered list of contours found in the image.\n
This function was heavily influenced by its source.\n
@source: https://medium.com/coinmonks/a-box-detection-algorithm-for-any-image-containing-boxes-756c15d7ed26
"""
debug_index = 0
# Grab absolute thresh of image
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(
image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
invert = 255 - thresh
if (logging.getLogger().level <= logging.DEBUG):
while exists("debugOutput/scrapper/{ind}1invert.jpg".format(ind=debug_index)):
debug_index += 1
cv2.imwrite(
"debugOutput/scrapper/{ind}1invert.jpg".format(ind=debug_index), invert)
#######################################
# Defining kernels for line detection #
#######################################
kernel_length = np.array(image).shape[1]//80
verticle_kernel = cv2.getStructuringElement(
cv2.MORPH_RECT, (1, kernel_length)) # kernel for finding all verticle lines
# kernel for finding all horizontal lines
hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # 3x3 kernel
# Collecting Verticle Lines
verticle_lines = cv2.erode(invert, verticle_kernel, iterations=3)
verticle_lines = cv2.dilate(verticle_lines, verticle_kernel, iterations=3)
verticle_lines = cv2.threshold(
verticle_lines, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
if (logging.getLogger().level <= logging.DEBUG):
cv2.imwrite(
"debugOutput/scrapper/{ind}2verticleLines.jpg".format(ind=debug_index), verticle_lines)
# Collecting Horizontal Lines
horizontal_lines = cv2.erode(invert, hori_kernel, iterations=3)
horizontal_lines = cv2.dilate(horizontal_lines, hori_kernel, iterations=3)
horizontal_lines = cv2.threshold(
horizontal_lines, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
if (logging.getLogger().level <= logging.DEBUG):
cv2.imwrite(
"debugOutput/scrapper/{ind}3horizontalLines.jpg".format(
ind=debug_index), horizontal_lines)
# Weighting parameters, this will decide the quantity of an image to be added
# to make a new image.
alpha = 0.5
beta = 1.0 - alpha
# combining verticle and horizontal lines. This gives us an empty table so that
# letters dont become boxes
blank_table = cv2.addWeighted(
verticle_lines, alpha, horizontal_lines, beta, 0.0)
blank_table = cv2.erode(~blank_table, kernel, iterations=2)
blank_table = cv2.threshold(blank_table, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[
1] # sharpening new table
if (logging.getLogger().level <= logging.DEBUG):
cv2.imwrite(
"debugOutput/scrapper/{ind}4blankTable.jpg".format(ind=debug_index), blank_table)
# Detecting all contours, which gives me all box positions
contours = cv2.findContours(
blank_table, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
# Organizing contours
# we got our boxes, but its mostly to sort the contours
bboxes = [cv2.boundingRect(c) for c in contours]
# Sort all the contours in ascending order
contours, bboxes = zip(
*sorted(zip(contours, bboxes), key=lambda b: b[1][1], reverse=False))
return contours
# Generator
# PHASE 1: manipulate image to clearly show tabs
def image_scraper(file, output_array=None):
"""This function if phase 1 of the process. It starts by taking the image/pdf
of the signin sheet and breaks the table apart to isolate each value in the exact
order that they came in.\n
@param file: the image/pdf that needs to be scraped into its values.\n
@param outputArray: a parameter passed by reference due to the nature
of tkinters buttons. If the param is not filled, it will just return the result.\n
@return a multidimension array of images that containes the values of all the slots in the table.
"""
images = []
sheets = [] # an array with each index containing the output per page
debug_index = 0
if not (file.split(".")[1] in ["jpg", "jpeg", "png", "pdf"]):
return
elif not exists(file):
raise FileNotFoundError("File given does not exist.")
if file.split(".")[1] == "pdf":
for image in convert_from_path(file):
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
images.append(image)
else:
# , cv2.IMREAD_GRAYSCALE)
images.append(cv2.imread(file, cv2.COLOR_RGB2BGR))
for image in images:
contours = collect_contours(image)
# // This is to tell which boxes correlate to the date
# Phase 1: Finding Main Boxes ## // and which big box is the signin table
#################################
main_boxes = []
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if ((h, w, 3) == image.shape):
continue
for m in main_boxes:
if (x > m[0] and w < m[2]) or (y > m[1] and h < m[3]):
break
elif(x <= m[0] and w >= m[2] and y <= m[1] and h >= m[3]):
main_boxes.remove(m)
main_boxes.append([x, y, w, h])
else:
main_boxes.append([x, y, w, h])
table = main_boxes[0] # img that contains whole table
for x, y, w, h in main_boxes:
if((w - x > table[2] - table[0]) or (h - y > table[3] - table[1])):
table = [x, y, w, h]
main_boxes.remove(table)
# making images for date and day
sheets.append([[], []])
for x, y, w, h in main_boxes:
sheets[-1][0].append(image[y:y+h, x:x+w])
# Checking if dates are text and not random images
for i in range(len(sheets[-1][0]) - 1, -1, -1):
date = sheets[-1][0][i]
temp_date = cv2.cvtColor(date, cv2.COLOR_BGR2GRAY)
temp_date = cv2.threshold(
temp_date, 230, 255, cv2.THRESH_BINARY_INV)[1]
black_pixel = cv2.countNonZero(temp_date)
total_pixel = temp_date.shape[0] * temp_date.shape[1]
# if the space filled is not between 1%-20%, then its a dud
if(black_pixel/total_pixel <= 0.01 or black_pixel/total_pixel >= 0.20):
sheets[-1][0].pop(i)
#########################################
# Phase 2: Collecting pairs for mapping #
#########################################
# Collecting contours collected from table
table = image[table[1]-5:table[1]+table[3] +
5, table[0]-5:table[0]+table[2]+5]
if (logging.getLogger().level <= logging.DEBUG):
cv2.imwrite(
"debugOutput/scrapper/mainTable{image}.jpg".format(image=debug_index), table)
debug_index += 1
# Grabbing verticle and horizontal images of table for better scraping
table_compute = cv2.cvtColor(table, cv2.COLOR_BGR2GRAY)
table_compute = cv2.threshold(
table_compute, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
table_invert = 255 - table_compute
t_kernel_length = np.array(table_compute).shape[1]//80
t_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
#############################
# Collecting Verticle Pairs #
#############################
verticle_points = []
verticle_pairs = []
# Creating verticle kernel lines
t_kernel_verticle = cv2.getStructuringElement(
cv2.MORPH_RECT, (1, t_kernel_length))
t_verticle_lines = cv2.erode(
table_invert, t_kernel_verticle, iterations=3)
t_verticle_lines = cv2.dilate(
t_verticle_lines, t_kernel_verticle, iterations=3)
t_verticle_lines = cv2.threshold(
t_verticle_lines, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
if (logging.getLogger().level <= logging.DEBUG):
cv2.imwrite(
"debugOutput/scrapper/table{}VertLines.jpg".format(debug_index), t_verticle_lines)
# Collecting verticle contours
contours = cv2.findContours(
t_verticle_lines, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
# Figuring out the length that relates to the majority of the table,
# (aka, longer lengths relates to length of table rather than random lines)
max_length = 0
table_height_pair = () # empty tuple for checking later
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if(h >= table.shape[0] * 0.9): # (y, h) == tableHeightPair):
verticle_points.append(x)
verticle_points.append(x + w)
verticle_points.sort()
# this is the gap before the table from the left side
verticle_points.pop(0)
# this is the gap after the table from the right side
verticle_points.pop(-1)
# taking points and making pairs
for i in range(0, len(verticle_points), 2):
verticle_pairs.append((verticle_points[i], verticle_points[i + 1]))
logging.debug("VerticlePairs: %s", verticle_pairs)
if (logging.getLogger().level <= logging.DEBUG):
debug_img = cv2.cvtColor(t_verticle_lines, cv2.COLOR_GRAY2BGR)
for v in verticle_pairs:
cv2.line(debug_img, (v[0], 0),
(v[0], debug_img.shape[0]), (0, 0, 255))
cv2.line(debug_img, (v[1], 0),
(v[1], debug_img.shape[0]), (0, 0, 255))
cv2.imwrite(
"debugOutput/scrapper/table{}VertContours.jpg".format(debug_index), debug_img)
###############################
# Collecting Horizontal Pairs #
###############################
horizontal_pairs = []
horizontal_points = []
# Creating horizontal kernel lines
t_kernel_horizontal = cv2.getStructuringElement(
cv2.MORPH_RECT, (t_kernel_length, 1))
t_horizontal_lines = cv2.erode(
table_invert, t_kernel_horizontal, iterations=3)
t_horizontal_lines = cv2.dilate(
t_horizontal_lines, t_kernel_horizontal, iterations=3)
t_horizontal_lines = cv2.threshold(
t_horizontal_lines, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
if (logging.getLogger().level <= logging.DEBUG):
cv2.imwrite(
"debugOutput/scrapper/table{}HorLines.jpg".format(debug_index), t_horizontal_lines)
# Collecting Horizontal contours
contours = cv2.findContours(
t_horizontal_lines, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
# Figuring out the length that relates to the majority of the table,
# (aka, longer lengths relates to length of table rather than random lines)
max_length = 0
table_width_pair = () # empty tuple for checking later
for c in contours:
x, y, w, h = cv2.boundingRect(c)
# (x, w) == tableWidthPair or w >= tHorizontalLines.shape[1] * 0.9):
if(w >= t_horizontal_lines.shape[1] * 0.9):
horizontal_points.append(y)
horizontal_points.append(y + h)
horizontal_points.sort()
logging.debug("HorizontalPoints: %s", horizontal_points)
# this is the gap before the table from the top
horizontal_points.pop(0)
# this is the gap after the table from the bottom
horizontal_points.pop(-1)
# Building pairs from points
for i in range(0, len(horizontal_points), 2):
horizontal_pairs.append(
(horizontal_points[i], horizontal_points[i + 1]))
logging.debug("HorizontalPairs: %s", horizontal_pairs)
if (logging.getLogger().level <= logging.DEBUG):
debug_img = cv2.cvtColor(t_horizontal_lines, cv2.COLOR_GRAY2BGR)
for h in horizontal_pairs:
cv2.line(debug_img, (0, h[0]),
(debug_img.shape[1], h[0]), (0, 0, 255))
cv2.line(debug_img, (0, h[1]),
(debug_img.shape[1], h[1]), (0, 0, 255))
cv2.imwrite(
"debugOutput/scrapper/table{}HorContours.jpg".format(debug_index), debug_img)
#####################################
# Phase 3: Time for actual Scraping #
#####################################
# the dictionary thatll hold all our information
dict_row = 0
for row in horizontal_pairs:
sheets[-1][1].append([])
for col in verticle_pairs:
sheets[-1][1][dict_row].append(table[row[0]:row[1], col[0]:col[1]])
if (logging.getLogger().level <= logging.DEBUG):
cv2.imwrite(
"debugOutput/dictionary/raw/table{}{}.jpg".format(
dict_row, col[1]-col[0]), table[row[0]:row[1], col[0]:col[1]])
dict_row += 1
if(output_array == None):
return sheets
else:
globals()["output_array"] = sheets.copy()
return
| [
"logging.getLogger",
"os.path.exists",
"cv2.imread",
"cv2.countNonZero",
"logging.debug",
"cv2.threshold",
"cv2.erode",
"cv2.line",
"cv2.addWeighted",
"numpy.array",
"cv2.cvtColor",
"cv2.findContours",
"cv2.dilate",
"pdf2image.convert_from_path",
"cv2.getStructuringElement",
"cv2.bound... | [((537, 576), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (549, 576), False, 'import cv2\n'), ((1175, 1236), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(1, kernel_length)'], {}), '(cv2.MORPH_RECT, (1, kernel_length))\n', (1200, 1236), False, 'import cv2\n'), ((1351, 1412), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(kernel_length, 1)'], {}), '(cv2.MORPH_RECT, (kernel_length, 1))\n', (1376, 1412), False, 'import cv2\n'), ((1426, 1475), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(3, 3)'], {}), '(cv2.MORPH_RECT, (3, 3))\n', (1451, 1475), False, 'import cv2\n'), ((1544, 1592), 'cv2.erode', 'cv2.erode', (['invert', 'verticle_kernel'], {'iterations': '(3)'}), '(invert, verticle_kernel, iterations=3)\n', (1553, 1592), False, 'import cv2\n'), ((1614, 1671), 'cv2.dilate', 'cv2.dilate', (['verticle_lines', 'verticle_kernel'], {'iterations': '(3)'}), '(verticle_lines, verticle_kernel, iterations=3)\n', (1624, 1671), False, 'import cv2\n'), ((2015, 2059), 'cv2.erode', 'cv2.erode', (['invert', 'hori_kernel'], {'iterations': '(3)'}), '(invert, hori_kernel, iterations=3)\n', (2024, 2059), False, 'import cv2\n'), ((2083, 2138), 'cv2.dilate', 'cv2.dilate', (['horizontal_lines', 'hori_kernel'], {'iterations': '(3)'}), '(horizontal_lines, hori_kernel, iterations=3)\n', (2093, 2138), False, 'import cv2\n'), ((2733, 2800), 'cv2.addWeighted', 'cv2.addWeighted', (['verticle_lines', 'alpha', 'horizontal_lines', 'beta', '(0.0)'], {}), '(verticle_lines, alpha, horizontal_lines, beta, 0.0)\n', (2748, 2800), False, 'import cv2\n'), ((2828, 2873), 'cv2.erode', 'cv2.erode', (['(~blank_table)', 'kernel'], {'iterations': '(2)'}), '(~blank_table, kernel, iterations=2)\n', (2837, 2873), False, 'import cv2\n'), ((590, 657), 'cv2.threshold', 'cv2.threshold', (['image', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (603, 657), False, 'import cv2\n'), ((1693, 1769), 'cv2.threshold', 'cv2.threshold', (['verticle_lines', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(verticle_lines, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (1706, 1769), False, 'import cv2\n'), ((2162, 2240), 'cv2.threshold', 'cv2.threshold', (['horizontal_lines', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(horizontal_lines, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (2175, 2240), False, 'import cv2\n'), ((2892, 2965), 'cv2.threshold', 'cv2.threshold', (['blank_table', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(blank_table, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (2905, 2965), False, 'import cv2\n'), ((3249, 3318), 'cv2.findContours', 'cv2.findContours', (['blank_table', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(blank_table, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3265, 3318), False, 'import cv2\n'), ((3432, 3451), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (3448, 3451), False, 'import cv2\n'), ((4635, 4658), 'pdf2image.convert_from_path', 'convert_from_path', (['file'], {}), '(file)\n', (4652, 4658), False, 'from pdf2image import convert_from_path\n'), ((7350, 7389), 'cv2.cvtColor', 'cv2.cvtColor', (['table', 'cv2.COLOR_BGR2GRAY'], {}), '(table, cv2.COLOR_BGR2GRAY)\n', (7362, 7389), False, 'import cv2\n'), ((7631, 7680), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(3, 3)'], {}), '(cv2.MORPH_RECT, (3, 3))\n', (7656, 7680), False, 'import cv2\n'), ((7922, 7985), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(1, t_kernel_length)'], {}), '(cv2.MORPH_RECT, (1, t_kernel_length))\n', (7947, 7985), False, 'import cv2\n'), ((8026, 8082), 'cv2.erode', 'cv2.erode', (['table_invert', 't_kernel_verticle'], {'iterations': '(3)'}), '(table_invert, t_kernel_verticle, iterations=3)\n', (8035, 8082), False, 'import cv2\n'), ((8123, 8184), 'cv2.dilate', 'cv2.dilate', (['t_verticle_lines', 't_kernel_verticle'], {'iterations': '(3)'}), '(t_verticle_lines, t_kernel_verticle, iterations=3)\n', (8133, 8184), False, 'import cv2\n'), ((9534, 9584), 'logging.debug', 'logging.debug', (['"""VerticlePairs: %s"""', 'verticle_pairs'], {}), "('VerticlePairs: %s', verticle_pairs)\n", (9547, 9584), False, 'import logging\n'), ((10356, 10419), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(t_kernel_length, 1)'], {}), '(cv2.MORPH_RECT, (t_kernel_length, 1))\n', (10381, 10419), False, 'import cv2\n'), ((10462, 10520), 'cv2.erode', 'cv2.erode', (['table_invert', 't_kernel_horizontal'], {'iterations': '(3)'}), '(table_invert, t_kernel_horizontal, iterations=3)\n', (10471, 10520), False, 'import cv2\n'), ((10563, 10628), 'cv2.dilate', 'cv2.dilate', (['t_horizontal_lines', 't_kernel_horizontal'], {'iterations': '(3)'}), '(t_horizontal_lines, t_kernel_horizontal, iterations=3)\n', (10573, 10628), False, 'import cv2\n'), ((11694, 11750), 'logging.debug', 'logging.debug', (['"""HorizontalPoints: %s"""', 'horizontal_points'], {}), "('HorizontalPoints: %s', horizontal_points)\n", (11707, 11750), False, 'import logging\n'), ((12136, 12190), 'logging.debug', 'logging.debug', (['"""HorizontalPairs: %s"""', 'horizontal_pairs'], {}), "('HorizontalPairs: %s', horizontal_pairs)\n", (12149, 12190), False, 'import logging\n'), ((705, 724), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (722, 724), False, 'import logging\n'), ((1791, 1810), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1808, 1810), False, 'import logging\n'), ((2262, 2281), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2279, 2281), False, 'import logging\n'), ((3011, 3030), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3028, 3030), False, 'import logging\n'), ((4502, 4514), 'os.path.exists', 'exists', (['file'], {}), '(file)\n', (4508, 4514), False, 'from os.path import exists\n'), ((4680, 4695), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4688, 4695), True, 'import numpy as np\n'), ((4716, 4754), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (4728, 4754), False, 'import cv2\n'), ((4854, 4889), 'cv2.imread', 'cv2.imread', (['file', 'cv2.COLOR_RGB2BGR'], {}), '(file, cv2.COLOR_RGB2BGR)\n', (4864, 4889), False, 'import cv2\n'), ((5226, 5245), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (5242, 5245), False, 'import cv2\n'), ((6273, 6311), 'cv2.cvtColor', 'cv2.cvtColor', (['date', 'cv2.COLOR_BGR2GRAY'], {}), '(date, cv2.COLOR_BGR2GRAY)\n', (6285, 6311), False, 'import cv2\n'), ((6440, 6467), 'cv2.countNonZero', 'cv2.countNonZero', (['temp_date'], {}), '(temp_date)\n', (6456, 6467), False, 'import cv2\n'), ((7414, 7489), 'cv2.threshold', 'cv2.threshold', (['table_compute', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(table_compute, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (7427, 7489), False, 'import cv2\n'), ((8225, 8303), 'cv2.threshold', 'cv2.threshold', (['t_verticle_lines', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(t_verticle_lines, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (8238, 8303), False, 'import cv2\n'), ((8559, 8633), 'cv2.findContours', 'cv2.findContours', (['t_verticle_lines', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(t_verticle_lines, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (8575, 8633), False, 'import cv2\n'), ((8951, 8970), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (8967, 8970), False, 'import cv2\n'), ((9667, 9717), 'cv2.cvtColor', 'cv2.cvtColor', (['t_verticle_lines', 'cv2.COLOR_GRAY2BGR'], {}), '(t_verticle_lines, cv2.COLOR_GRAY2BGR)\n', (9679, 9717), False, 'import cv2\n'), ((10671, 10756), 'cv2.threshold', 'cv2.threshold', (['t_horizontal_lines', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(t_horizontal_lines, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU\n )\n', (10684, 10756), False, 'import cv2\n'), ((11010, 11086), 'cv2.findContours', 'cv2.findContours', (['t_horizontal_lines', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(t_horizontal_lines, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (11026, 11086), False, 'import cv2\n'), ((11404, 11423), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (11420, 11423), False, 'import cv2\n'), ((12273, 12325), 'cv2.cvtColor', 'cv2.cvtColor', (['t_horizontal_lines', 'cv2.COLOR_GRAY2BGR'], {}), '(t_horizontal_lines, cv2.COLOR_GRAY2BGR)\n', (12285, 12325), False, 'import cv2\n'), ((1124, 1139), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1132, 1139), True, 'import numpy as np\n'), ((6336, 6393), 'cv2.threshold', 'cv2.threshold', (['temp_date', '(230)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(temp_date, 230, 255, cv2.THRESH_BINARY_INV)\n', (6349, 6393), False, 'import cv2\n'), ((7053, 7072), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7070, 7072), False, 'import logging\n'), ((8332, 8351), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (8349, 8351), False, 'import logging\n'), ((9598, 9617), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (9615, 9617), False, 'import logging\n'), ((9771, 9842), 'cv2.line', 'cv2.line', (['debug_img', '(v[0], 0)', '(v[0], debug_img.shape[0])', '(0, 0, 255)'], {}), '(debug_img, (v[0], 0), (v[0], debug_img.shape[0]), (0, 0, 255))\n', (9779, 9842), False, 'import cv2\n'), ((9884, 9955), 'cv2.line', 'cv2.line', (['debug_img', '(v[1], 0)', '(v[1], debug_img.shape[0])', '(0, 0, 255)'], {}), '(debug_img, (v[1], 0), (v[1], debug_img.shape[0]), (0, 0, 255))\n', (9892, 9955), False, 'import cv2\n'), ((10780, 10799), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (10797, 10799), False, 'import logging\n'), ((12204, 12223), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (12221, 12223), False, 'import logging\n'), ((12381, 12452), 'cv2.line', 'cv2.line', (['debug_img', '(0, h[0])', '(debug_img.shape[1], h[0])', '(0, 0, 255)'], {}), '(debug_img, (0, h[0]), (debug_img.shape[1], h[0]), (0, 0, 255))\n', (12389, 12452), False, 'import cv2\n'), ((12494, 12565), 'cv2.line', 'cv2.line', (['debug_img', '(0, h[1])', '(debug_img.shape[1], h[1])', '(0, 0, 255)'], {}), '(debug_img, (0, h[1]), (debug_img.shape[1], h[1]), (0, 0, 255))\n', (12502, 12565), False, 'import cv2\n'), ((7575, 7598), 'numpy.array', 'np.array', (['table_compute'], {}), '(table_compute)\n', (7583, 7598), True, 'import numpy as np\n'), ((13145, 13164), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (13162, 13164), False, 'import logging\n')] |
from molsysmt._private_tools.exceptions import *
import numpy as np
from .groups.aminoacid import name as aminoacid_names
from .groups.water import name as water_names
from .groups.ion import name as ion_names
from .groups.nucleotide import name as nucleotide_names
from .groups.lipid import name as lipid_names
from .groups.cosolute import name as cosolute_names
types=['water', 'ion', 'cosolute', 'small molecule', 'aminoacid', 'nucleotide', 'lipid']
def name_to_type(name):
tmp_type = None
if _name_is_type_water(name):
tmp_type = 'water'
elif _name_is_type_ion(name):
tmp_type = 'ion'
elif _name_is_type_cosolute(name):
tmp_type = 'cosolute'
elif _name_is_type_small_molecule(name):
tmp_type = 'small molecule'
elif _name_is_type_aminoacid(name):
tmp_type = 'aminoacid'
elif _name_is_type_nucleotide(name):
tmp_type ='nucleotide'
elif _name_is_type_lipid(name):
tmp_type = 'lipid'
else:
tmp_type = 'small molecule'
return tmp_type
def _name_is_type_water(name):
return (name in water_names)
def _name_is_type_ion(name):
return (name in ion_names)
def _name_is_type_cosolute(name):
return (name in cosolute_names)
def _name_is_type_small_molecule(name):
return False
def _name_is_type_aminoacid(name):
return (name in aminoacid_names)
def _name_is_type_nucleotide(name):
return (name in nucleotide_names)
def _name_is_type_lipid(name):
return (name in lipid_names)
def group_type_from_group(item, indices='all'):
from molsysmt import get
group_names = get(item, target='group', indices=indices, group_name=True)
output= np.vectorize(name_to_type)(group_names).astype('object')
return output
| [
"numpy.vectorize",
"molsysmt.get"
] | [((1609, 1668), 'molsysmt.get', 'get', (['item'], {'target': '"""group"""', 'indices': 'indices', 'group_name': '(True)'}), "(item, target='group', indices=indices, group_name=True)\n", (1612, 1668), False, 'from molsysmt import get\n'), ((1681, 1707), 'numpy.vectorize', 'np.vectorize', (['name_to_type'], {}), '(name_to_type)\n', (1693, 1707), True, 'import numpy as np\n')] |
"""Editor related to `Shapes`.
As you can easily assumes, `editor` is a high-level api, so
* This sub-module can call other more premitive api freely.
* On contrary, the more premitive sub-modules should not call this.
"""
import numpy as np
import _ctypes
from pywintypes import com_error
from fairypptx import constants
from fairypptx.shape import Shape, Shapes
from fairypptx.shape import Box
from fairypptx.table import Table
from fairypptx.shape import Shape, Shapes
from fairypptx.object_utils import is_object
from typing import Sequence
def _to_shapes(arg):
"""Convert to `Shapes`."""
if isinstance(arg, Shapes):
return arg
elif isinstance(arg, Sequence):
return Shapes(arg)
elif isinstance(arg, Shape):
return Shapes([arg])
elif is_object(arg, "Shapes"):
return Shapes(arg)
elif is_object(arg, "Shape"):
return Shapes(arg)
raise ValueError(f"Cannot interpret `{arg}`.")
class ShapesEncloser:
"""Enclose the specified shapes.
"""
def __init__(self,
line=3,
fill=None,
linecolor=(0, 0, 0),
*,
margin=0.10,
left_margin=None,
top_margin=None,
right_margin=None,
bottom_margin=None,
):
self.line = line
self.fill = fill
self.linecolor = linecolor
self.margin = margin
self.left_margin = left_margin
self.top_margin = top_margin
self.right_margin = right_margin
self.bottom_margin = bottom_margin
def _margin_solver(self, c_box):
"""Solves the margin of
it returns the actual pixel(float) of margin. (i.e. not ratio)
(left_margin, top_margin, right_margin, bottom_margin).
"""
def _to_pixel(margin, length):
if isinstance(margin, float) and abs(margin) < 1.0:
return length * margin
else:
return margin
def _solve_margin(first_value, length):
value = first_value
if value is None:
value = self.margin
assert value is not None
return _to_pixel(value, length)
left_margin = _solve_margin(self.left_margin, c_box.x_length)
top_margin = _solve_margin(self.top_margin, c_box.y_length)
right_margin = _solve_margin(self.right_margin, c_box.x_length)
bottom_margin = _solve_margin(self.bottom_margin, c_box.y_length)
return (left_margin, top_margin, right_margin, bottom_margin)
def __call__(self, shapes):
if not shapes:
return None
shapes = _to_shapes(shapes)
c_box = shapes.circumscribed_box
left_margin, top_margin, right_margin, bottom_margin = self._margin_solver(c_box)
width = c_box.width + (left_margin + right_margin)
height = c_box.height + (top_margin + bottom_margin)
shape = Shape.make(1)
shape.api.Top = c_box.top - top_margin
shape.api.Left = c_box.left - left_margin
shape.api.Width = width
shape.api.Height = height
shape.line = self.line
shape.fill = self.fill
if self.linecolor:
shape.line = self.linecolor
shape.api.Zorder(constants.msoSendToBack)
return Shapes(list(shapes) + [shape])
class TitleProvider:
def __init__(self,
title,
fontsize=None,
fontcolor=(0, 0, 0),
fill=None,
line=None,
bold=True,
underline=False,
):
self.title = title
self.fontsize = fontsize
self.fontcolor = fontcolor
self.fill = fill
self.line = line
self.bold = bold
self.underline = underline
def __call__(self, shapes):
shapes = _to_shapes(shapes)
c_box = shapes.circumscribed_box
shape = Shape.make(1)
shape.fill = self.fill
shape.line = self.line
shape.textrange.text = self.title
shape.textrange.font.bold = self.bold
shape.textrange.font.underline = self.underline
shape.textrange.font.size = self._yield_fontsize(self.fontsize, shapes)
shape.textrange.font.color = self.fontcolor
shape.tighten()
shape.api.Top = c_box.top - shape.height
shape.api.Left = c_box.left
return shape
def _yield_fontsize(self, fontsize, shapes):
if fontsize is not None:
return fontsize
fontsizes =[]
for shape in shapes:
fontsizes.append(shape.textrange.font.size)
if fontsizes:
return max(fontsizes)
else:
return 18
class ShapesResizer:
"""Shapes Resizer.
This class resize the given shapes to the equivalent size.
Related Class.
-----------
`shapes.BoundingResizer`: the bounding box of the shapes is resized.
"""
def __init__(self, size="max"):
self.size = size
def _yield_size(self, shapes):
"""Determine the return of size,
based on the given parameters.
"""
size = self.size
if isinstance(size, (list, tuple)):
width, height = size
elif isinstance(size, Shape):
shape = size
width, height = shape.width, shape.height
elif isinstance(size, str):
if size == "max":
width = max(shape.width for shape in shapes)
height = max(shape.height for shape in shapes)
else:
raise NotImplementedError("This error message must be displayed in `__init``. ")
return width, height
def __call__(self, shapes):
width, height = self._yield_size(shapes)
for shape in shapes:
shape.width = width
shape.height = height
return shapes
class BoundingResizer:
"""Resize the bounding box of `Shapes`.
Args:
size: 2-tuple. (width, height).
The expected width and height.
fontsize: (float)
The fontsize of the expected minimum over the shapes.
"""
def __init__(self, size=None, *, fontsize=None):
self.size = size
self.fontsize = fontsize
def _to_minimum_fontsize(self, textrange):
fontsizes = set()
for run in textrange.runs:
if run.text:
fontsizes.add(run.font.size)
if fontsizes:
return min(fontsizes)
else:
return None
def _get_minimum_fontsize(self, shapes):
fontsizes = set()
for shape in shapes:
if shape.is_table():
table = Table(shape)
for row in table.rows:
for cell in row:
textrange = cell.shape.textrange
fontsize = self._to_minimum_fontsize(textrange)
if fontsize:
fontsizes.add(fontsize)
else:
try:
fontsize = self._to_minimum_fontsize(shape.textrange)
except com_error as e:
pass
else:
if fontsize:
fontsizes.add(fontsize)
if fontsizes:
return min(fontsizes)
else:
return None
def _set_fontsize(self, textrange, ratio):
for run in textrange.runs:
run.api.Font.Size = round(run.font.size * ratio)
def _yield_size(self, shapes):
"""Determine the the return of `size`.
* Priority
1. `fontsize`
2. `size`.
"""
size = self.size
fontsize = self.fontsize
# For fallback.
if size is None and fontsize is None:
fontsize = self._get_minimum_fontsize(shapes.slide.shapes)
if fontsize is None:
fontsize = 12
if fontsize is not None:
c_box = shapes.circumscribed_box
c_fontsize = self._get_minimum_fontsize(shapes)
ratio = fontsize / c_fontsize
size = ratio
if isinstance(size, (int , float)):
c_box = shapes.circumscribed_box
c_width = c_box.x_length
c_height = c_box.y_length
n_width = c_width * size
n_height = c_height * size
elif isinstance(size, (list, tuple)):
n_width, n_height = size
else:
raise ValueError("Invalid size.", size)
return n_width, n_height
def __call__(self, shapes):
"""Perform `resize` for all the shapes.
Not only it changes the size of `Shape`,
but also changes the size of `Font` proportionaly.
Note:
It works only for shapes whose rotation is 0.
"""
# If the given is `Shape`, then, `Shape` is returned.
if isinstance(shapes, Shape):
is_shape = True
else:
is_shape = False
if not shapes:
return shapes
shapes = _to_shapes(shapes)
n_width, n_height = self._yield_size(shapes)
c_box = shapes.circumscribed_box
width, height = c_box.x_length, c_box.y_length
pivot = (c_box.top, c_box.left) # [y_min, x_min]
ratios = (n_height / height, n_width / width)
ratio = np.mean(ratios)
for shape in shapes:
# Processings for all the shapes.
shape.api.Left = (shape.api.Left - pivot[1]) * ratios[1] + pivot[1]
shape.api.Width = shape.api.Width * ratios[1]
shape.api.Top = (shape.api.Top - pivot[0]) * ratios[0] + pivot[0]
shape.api.Height = shape.api.Height * ratios[0]
# For Table.
if shape.is_table():
table = Table(shape)
for row in table.rows:
for cell in row:
self._set_fontsize(cell.shape.textrange, ratio)
else:
try:
self._set_fontsize(shape.textrange, ratio)
except com_error as e:
pass
if not is_shape:
return Shapes(shapes)
else:
return shapes[0]
if __name__ == "__main__":
pass
| [
"numpy.mean",
"fairypptx.table.Table",
"fairypptx.shape.Shapes",
"fairypptx.shape.Shape.make",
"fairypptx.object_utils.is_object"
] | [((3004, 3017), 'fairypptx.shape.Shape.make', 'Shape.make', (['(1)'], {}), '(1)\n', (3014, 3017), False, 'from fairypptx.shape import Shape, Shapes\n'), ((4017, 4030), 'fairypptx.shape.Shape.make', 'Shape.make', (['(1)'], {}), '(1)\n', (4027, 4030), False, 'from fairypptx.shape import Shape, Shapes\n'), ((9486, 9501), 'numpy.mean', 'np.mean', (['ratios'], {}), '(ratios)\n', (9493, 9501), True, 'import numpy as np\n'), ((710, 721), 'fairypptx.shape.Shapes', 'Shapes', (['arg'], {}), '(arg)\n', (716, 721), False, 'from fairypptx.shape import Shape, Shapes\n'), ((10310, 10324), 'fairypptx.shape.Shapes', 'Shapes', (['shapes'], {}), '(shapes)\n', (10316, 10324), False, 'from fairypptx.shape import Shape, Shapes\n'), ((770, 783), 'fairypptx.shape.Shapes', 'Shapes', (['[arg]'], {}), '([arg])\n', (776, 783), False, 'from fairypptx.shape import Shape, Shapes\n'), ((793, 817), 'fairypptx.object_utils.is_object', 'is_object', (['arg', '"""Shapes"""'], {}), "(arg, 'Shapes')\n", (802, 817), False, 'from fairypptx.object_utils import is_object\n'), ((6789, 6801), 'fairypptx.table.Table', 'Table', (['shape'], {}), '(shape)\n', (6794, 6801), False, 'from fairypptx.table import Table\n'), ((9938, 9950), 'fairypptx.table.Table', 'Table', (['shape'], {}), '(shape)\n', (9943, 9950), False, 'from fairypptx.table import Table\n'), ((834, 845), 'fairypptx.shape.Shapes', 'Shapes', (['arg'], {}), '(arg)\n', (840, 845), False, 'from fairypptx.shape import Shape, Shapes\n'), ((855, 878), 'fairypptx.object_utils.is_object', 'is_object', (['arg', '"""Shape"""'], {}), "(arg, 'Shape')\n", (864, 878), False, 'from fairypptx.object_utils import is_object\n'), ((895, 906), 'fairypptx.shape.Shapes', 'Shapes', (['arg'], {}), '(arg)\n', (901, 906), False, 'from fairypptx.shape import Shape, Shapes\n')] |
# Purpose: This script calculates StreetConnectivity (3 plus leg intersections per km2)
# It outputs PFI, 3 legIntersections, and street connectivity to an SQL database.
# Buffer area is referenced in SQL table nh1600m
# Author: <NAME>
import arcpy
import os
import sys
import time
import psycopg2
import numpy as np
from progressor import progressor
from script_running_log import script_running_log
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read(os.path.join(sys.path[0],'config.ini'))
# simple timer for log file
start = time.time()
script = os.path.basename(sys.argv[0])
task = "Calculate StreetConnectivity (3 plus leg intersections per km2)"
def basename(filePath):
'''strip a path to the basename of file, without the extension. Requires OS '''
try:
return os.path.basename(os.path.normpath(filePath)).split(".",1)[0]
except:
print('Return basename failed. Did you import os?')
# INPUT PARAMETERS
folderPath = parser.get('data', 'folderPath')
urbanGDB = os.path.join(folderPath,parser.get('data', 'workspace'))
arcpy.env.workspace = urbanGDB
arcpy.env.overwriteOutput = True
sde_connection = parser.get('postgresql', 'sde_connection')
srid = int(parser.get('workspace', 'srid'))
## specify locations
points = parser.get('parcels','parcel_dwellings')
denominator = int(arcpy.GetCount_management(points).getOutput(0))
# specify the unique location identifier
pointsID = parser.get('parcels', 'parcel_id')
intersections = basename(os.path.join(folderPath,parser.get('roads', 'intersections')))
arcpy.MakeFeatureLayer_management(intersections, "intersections")
intersection_count = int(arcpy.GetCount_management("intersections").getOutput(0))
distance = int(parser.get('network', 'distance'))
sausage_buffer_table = "sausagebuffer_{}".format(distance)
fields = [pointsID]
# SQL Settings - storing passwords in plain text is obviously not ideal
sqlDBName = parser.get('postgresql', 'database')
sqlUserName = parser.get('postgresql', 'user')
sqlPWD = parser.get('postgresql', 'password')
# output tables
intersections_table = "intersections_3plus_way"
street_connectivity_table = "street_connectivity"
# Size of tuple chunk sent to postgresql
sqlChunkify = 1000
# Define query to create table
createTable_intersections = '''
CREATE TABLE IF NOT EXISTS {0}
(OBJECTID bigint PRIMARY KEY,
geom geometry NOT NULL);
'''.format(intersections_table)
queryPartA_intersections = '''
INSERT INTO {} VALUES
'''.format(intersections_table)
createTable_sc = '''
CREATE TABLE IF NOT EXISTS {0}
({1} varchar PRIMARY KEY,
intersection_count integer NOT NULL,
area_sqkm double precision NOT NULL,
sc_nh1600m double precision NOT NULL
);
'''.format(street_connectivity_table,pointsID.lower())
sc_query_A = '''
INSERT INTO {0} ({1},intersection_count,area_sqkm,sc_nh1600m)
(SELECT {1}, COALESCE(COUNT({2}),0) AS intersection_count,area_sqkm, COALESCE(COUNT({2}),0)/area_sqkm AS sc_nh1600mm
FROM {2}
LEFT JOIN
(SELECT {3}.{1},area_sqkm,geom FROM nh1600m LEFT JOIN {3} ON nh1600m.{1} = {3}.{1})
AS sp_temp
ON ST_Intersects(sp_temp.geom, {2}.geom)
WHERE {1} IN
'''.format(street_connectivity_table,pointsID.lower(),intersections_table,sausage_buffer_table)
sc_query_C = '''
GROUP BY {},area_sqkm) ON CONFLICT DO NOTHING;
'''.format(pointsID.lower())
def unique_values(table, field):
data = arcpy.da.TableToNumPyArray(table, [field])
return np.unique(data[field])
# OUTPUT PROCESS
# Connect to postgreSQL server
try:
conn = psycopg2.connect(database=sqlDBName, user=sqlUserName, password=sqlPWD)
curs = conn.cursor()
print("Connection to SQL success {}".format(time.strftime("%Y%m%d-%H%M%S")) )
# drop table if it already exists
print("create table {}... ".format(intersections_table)),
subTaskStart = time.time()
curs.execute(createTable_intersections)
conn.commit()
print("{:4.2f} mins.".format((time.time() - start)/60))
except:
print("SQL connection error {}".format(time.strftime("%Y%m%d-%H%M%S")) )
print(sys.exc_info()[0])
print(sys.exc_info()[1])
raise
# export intersections to PostGIS feature
intersections_to_postgis = time.time()
count = 0
chunkedLines = list()
progressor(count,intersection_count,intersections_to_postgis,"Exporting intersections: {}".format(count))
with arcpy.da.SearchCursor("intersections", ["OBJECTID", "SHAPE@WKT"]) as cursor:
for row in cursor:
count += 1
wkt = row[1].encode('utf-8').replace(' NAN','').replace(' M ','')
chunkedLines.append("({0},ST_GeometryFromText('{1}', {2}))".format(row[0],wkt,srid))
if (count % sqlChunkify == 0) :
curs.execute(queryPartA_intersections + ','.join(rowOfChunk for rowOfChunk in chunkedLines)+' ON CONFLICT DO NOTHING')
conn.commit()
chunkedLines = list()
progressor(count,intersection_count,intersections_to_postgis,"Exporting intersections: {}".format(count))
if(count % sqlChunkify != 0):
curs.execute(queryPartA_intersections + ','.join(rowOfChunk for rowOfChunk in chunkedLines)+' ON CONFLICT DO NOTHING')
conn.commit()
progressor(count,intersection_count,intersections_to_postgis,"Exporting intersections: {}".format(count))
# Create sausage buffer spatial index
print("Creating intersections spatial index... "),
curs.execute("CREATE INDEX IF NOT EXISTS {0}_gix ON {0} USING GIST (geom);".format(intersections_table))
conn.commit()
print("Done.")
print("Analyze the intersections table to improve performance.")
curs.execute("ANALYZE {};".format(intersections_table))
conn.commit()
print("Done.")
# Now calculate street connectivity (three way intersections w/ in nh1600m/area in km2)
print("create table {}... ".format(street_connectivity_table)),
subTaskStart = time.time()
curs.execute(createTable_sc)
conn.commit()
print("{:4.2f} mins.".format((time.time() - start)/60))
print("fetch list of processed parcels, if any..."),
# (for string match to work, had to select first item of returned tuple)
curs.execute("SELECT {} FROM {}".format(pointsID.lower(),sausage_buffer_table))
raw_point_id_list = list(curs)
raw_point_id_list = [x[0] for x in raw_point_id_list]
curs.execute("SELECT {} FROM {}".format(pointsID.lower(),street_connectivity_table))
completed_points = list(curs)
completed_points = [x[0] for x in completed_points]
point_id_list = [x for x in raw_point_id_list if x not in completed_points]
print("Done.")
denom = len(point_id_list)
count = 0
chunkedPoints = list()
print("Processing points...")
for point in point_id_list:
count += 1
chunkedPoints.append(point)
if (count % sqlChunkify == 0) :
curs.execute('{} ({}) {}'.format(sc_query_A,','.join("'"+x+"'" for x in chunkedPoints),sc_query_C))
conn.commit()
chunkedPoints = list()
progressor(count,denom,start,"{}/{} points processed".format(count,denom))
if(count % sqlChunkify != 0):
curs.execute('{} ({}) {}'.format(sc_query_A,','.join("'"+x+"'" for x in chunkedPoints),sc_query_C))
conn.commit()
progressor(count,denom,start,"{}/{} points processed".format(count,denom))
# output to completion log
script_running_log(script, task, start)
# clean up
conn.close()
| [
"psycopg2.connect",
"arcpy.da.TableToNumPyArray",
"numpy.unique",
"arcpy.MakeFeatureLayer_management",
"ConfigParser.SafeConfigParser",
"time.strftime",
"os.path.join",
"arcpy.da.SearchCursor",
"script_running_log.script_running_log",
"os.path.normpath",
"sys.exc_info",
"arcpy.GetCount_managem... | [((474, 492), 'ConfigParser.SafeConfigParser', 'SafeConfigParser', ([], {}), '()\n', (490, 492), False, 'from ConfigParser import SafeConfigParser\n'), ((582, 593), 'time.time', 'time.time', ([], {}), '()\n', (591, 593), False, 'import time\n'), ((603, 632), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (619, 632), False, 'import os\n'), ((1591, 1656), 'arcpy.MakeFeatureLayer_management', 'arcpy.MakeFeatureLayer_management', (['intersections', '"""intersections"""'], {}), "(intersections, 'intersections')\n", (1624, 1656), False, 'import arcpy\n'), ((4219, 4230), 'time.time', 'time.time', ([], {}), '()\n', (4228, 4230), False, 'import time\n'), ((5822, 5833), 'time.time', 'time.time', ([], {}), '()\n', (5831, 5833), False, 'import time\n'), ((7184, 7223), 'script_running_log.script_running_log', 'script_running_log', (['script', 'task', 'start'], {}), '(script, task, start)\n', (7202, 7223), False, 'from script_running_log import script_running_log\n'), ((505, 544), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""config.ini"""'], {}), "(sys.path[0], 'config.ini')\n", (517, 544), False, 'import os\n'), ((3438, 3480), 'arcpy.da.TableToNumPyArray', 'arcpy.da.TableToNumPyArray', (['table', '[field]'], {}), '(table, [field])\n', (3464, 3480), False, 'import arcpy\n'), ((3490, 3512), 'numpy.unique', 'np.unique', (['data[field]'], {}), '(data[field])\n', (3499, 3512), True, 'import numpy as np\n'), ((3581, 3652), 'psycopg2.connect', 'psycopg2.connect', ([], {'database': 'sqlDBName', 'user': 'sqlUserName', 'password': 'sqlPWD'}), '(database=sqlDBName, user=sqlUserName, password=sqlPWD)\n', (3597, 3652), False, 'import psycopg2\n'), ((3872, 3883), 'time.time', 'time.time', ([], {}), '()\n', (3881, 3883), False, 'import time\n'), ((4374, 4439), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['"""intersections"""', "['OBJECTID', 'SHAPE@WKT']"], {}), "('intersections', ['OBJECTID', 'SHAPE@WKT'])\n", (4395, 4439), False, 'import arcpy\n'), ((1364, 1397), 'arcpy.GetCount_management', 'arcpy.GetCount_management', (['points'], {}), '(points)\n', (1389, 1397), False, 'import arcpy\n'), ((1682, 1724), 'arcpy.GetCount_management', 'arcpy.GetCount_management', (['"""intersections"""'], {}), "('intersections')\n", (1707, 1724), False, 'import arcpy\n'), ((3722, 3752), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (3735, 3752), False, 'import time\n'), ((4053, 4083), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (4066, 4083), False, 'import time\n'), ((4095, 4109), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4107, 4109), False, 'import sys\n'), ((4122, 4136), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4134, 4136), False, 'import sys\n'), ((5907, 5918), 'time.time', 'time.time', ([], {}), '()\n', (5916, 5918), False, 'import time\n'), ((3974, 3985), 'time.time', 'time.time', ([], {}), '()\n', (3983, 3985), False, 'import time\n'), ((851, 877), 'os.path.normpath', 'os.path.normpath', (['filePath'], {}), '(filePath)\n', (867, 877), False, 'import os\n')] |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aesara
import numpy as np
import pytest
import scipy.stats as st
from aesara import tensor as at
from numpy.testing import assert_allclose
from scipy.special import logsumexp
import pymc as pm
from pymc import (
Dirichlet,
Exponential,
Gamma,
LogNormal,
Metropolis,
Mixture,
Model,
MvNormal,
Normal,
NormalMixture,
Poisson,
sample,
)
from pymc.aesaraf import floatX
from pymc.distributions.shape_utils import to_tuple
from pymc.tests.helpers import SeededTest
pytestmark = pytest.mark.xfail(reason="Mixture not refactored.")
# Generate data
def generate_normal_mixture_data(w, mu, sd, size=1000):
component = np.random.choice(w.size, size=size, p=w)
mu, sd = np.broadcast_arrays(mu, sd)
out_size = to_tuple(size) + mu.shape[:-1]
mu_ = np.array([mu[..., comp] for comp in component.ravel()])
sd_ = np.array([sd[..., comp] for comp in component.ravel()])
mu_ = np.reshape(mu_, out_size)
sd_ = np.reshape(sd_, out_size)
x = np.random.normal(mu_, sd_, size=out_size)
return x
def generate_poisson_mixture_data(w, mu, size=1000):
component = np.random.choice(w.size, size=size, p=w)
mu = np.atleast_1d(mu)
out_size = to_tuple(size) + mu.shape[:-1]
mu_ = np.array([mu[..., comp] for comp in component.ravel()])
mu_ = np.reshape(mu_, out_size)
x = np.random.poisson(mu_, size=out_size)
return x
class TestMixture(SeededTest):
@classmethod
def setup_class(cls):
super().setup_class()
cls.norm_w = np.array([0.75, 0.25])
cls.norm_mu = np.array([0.0, 5.0])
cls.norm_sd = np.ones_like(cls.norm_mu)
cls.norm_x = generate_normal_mixture_data(cls.norm_w, cls.norm_mu, cls.norm_sd, size=1000)
cls.pois_w = np.array([0.4, 0.6])
cls.pois_mu = np.array([5.0, 20.0])
cls.pois_x = generate_poisson_mixture_data(cls.pois_w, cls.pois_mu, size=1000)
def test_dimensions(self):
a1 = Normal.dist(mu=0, sigma=1)
a2 = Normal.dist(mu=10, sigma=1)
mix = Mixture.dist(w=np.r_[0.5, 0.5], comp_dists=[a1, a2])
assert mix.mode.ndim == 0
assert mix.logp(0.0).ndim == 0
value = np.r_[0.0, 1.0, 2.0]
assert mix.logp(value).ndim == 1
def test_mixture_list_of_normals(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.norm_w)), shape=self.norm_w.size)
mu = Normal("mu", 0.0, 10.0, shape=self.norm_w.size)
tau = Gamma("tau", 1.0, 1.0, shape=self.norm_w.size)
Mixture(
"x_obs",
w,
[Normal.dist(mu[0], tau=tau[0]), Normal.dist(mu[1], tau=tau[1])],
observed=self.norm_x,
)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.norm_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.norm_mu), rtol=0.1, atol=0.1
)
def test_normal_mixture(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.norm_w)), shape=self.norm_w.size)
mu = Normal("mu", 0.0, 10.0, shape=self.norm_w.size)
tau = Gamma("tau", 1.0, 1.0, shape=self.norm_w.size)
NormalMixture("x_obs", w, mu, tau=tau, observed=self.norm_x)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.norm_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.norm_mu), rtol=0.1, atol=0.1
)
@pytest.mark.parametrize(
"nd,ncomp", [(tuple(), 5), (1, 5), (3, 5), ((3, 3), 5), (3, 3), ((3, 3), 3)], ids=str
)
def test_normal_mixture_nd(self, nd, ncomp):
nd = to_tuple(nd)
ncomp = int(ncomp)
comp_shape = nd + (ncomp,)
test_mus = np.random.randn(*comp_shape)
test_taus = np.random.gamma(1, 1, size=comp_shape)
observed = generate_normal_mixture_data(
w=np.ones(ncomp) / ncomp, mu=test_mus, sd=1 / np.sqrt(test_taus), size=10
)
with Model() as model0:
mus = Normal("mus", shape=comp_shape)
taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
mixture0 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape)
obs0 = NormalMixture(
"obs", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape, observed=observed
)
with Model() as model1:
mus = Normal("mus", shape=comp_shape)
taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
comp_dist = [
Normal.dist(mu=mus[..., i], tau=taus[..., i], shape=nd) for i in range(ncomp)
]
mixture1 = Mixture("m", w=ws, comp_dists=comp_dist, shape=nd)
obs1 = Mixture("obs", w=ws, comp_dists=comp_dist, shape=nd, observed=observed)
with Model() as model2:
# Expected to fail if comp_shape is not provided,
# nd is multidim and it does not broadcast with ncomp. If by chance
# it does broadcast, an error is raised if the mixture is given
# observed data.
# Furthermore, the Mixture will also raise errors when the observed
# data is multidimensional but it does not broadcast well with
# comp_dists.
mus = Normal("mus", shape=comp_shape)
taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
if len(nd) > 1:
if nd[-1] != ncomp:
with pytest.raises(ValueError):
NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
mixture2 = None
else:
mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
else:
mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
observed_fails = False
if len(nd) >= 1 and nd != (1,):
try:
np.broadcast(np.empty(comp_shape), observed)
except Exception:
observed_fails = True
if observed_fails:
with pytest.raises(ValueError):
NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)
obs2 = None
else:
obs2 = NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)
testpoint = model0.initial_point
testpoint["mus"] = test_mus
testpoint["taus"] = test_taus
assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
assert_allclose(obs0.logp(testpoint), obs1.logp(testpoint))
if mixture2 is not None and obs2 is not None:
assert_allclose(model0.logp(testpoint), model2.logp(testpoint))
if mixture2 is not None:
assert_allclose(mixture0.logp(testpoint), mixture2.logp(testpoint))
if obs2 is not None:
assert_allclose(obs0.logp(testpoint), obs2.logp(testpoint))
def test_poisson_mixture(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.pois_w)), shape=self.pois_w.shape)
mu = Gamma("mu", 1.0, 1.0, shape=self.pois_w.size)
Mixture("x_obs", w, Poisson.dist(mu), observed=self.pois_x)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.pois_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.pois_mu), rtol=0.1, atol=0.1
)
def test_mixture_list_of_poissons(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.pois_w)), shape=self.pois_w.shape)
mu = Gamma("mu", 1.0, 1.0, shape=self.pois_w.size)
Mixture("x_obs", w, [Poisson.dist(mu[0]), Poisson.dist(mu[1])], observed=self.pois_x)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.pois_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.pois_mu), rtol=0.1, atol=0.1
)
def test_mixture_of_mvn(self):
mu1 = np.asarray([0.0, 1.0])
cov1 = np.diag([1.5, 2.5])
mu2 = np.asarray([1.0, 0.0])
cov2 = np.diag([2.5, 3.5])
obs = np.asarray([[0.5, 0.5], mu1, mu2])
with Model() as model:
w = Dirichlet("w", floatX(np.ones(2)), transform=None, shape=(2,))
mvncomp1 = MvNormal.dist(mu=mu1, cov=cov1)
mvncomp2 = MvNormal.dist(mu=mu2, cov=cov2)
y = Mixture("x_obs", w, [mvncomp1, mvncomp2], observed=obs)
# check logp of each component
complogp_st = np.vstack(
(
st.multivariate_normal.logpdf(obs, mu1, cov1),
st.multivariate_normal.logpdf(obs, mu2, cov2),
)
).T
complogp = y.distribution._comp_logp(aesara.shared(obs)).eval()
assert_allclose(complogp, complogp_st)
# check logp of mixture
testpoint = model.initial_point
mixlogp_st = logsumexp(np.log(testpoint["w"]) + complogp_st, axis=-1, keepdims=False)
assert_allclose(y.logp_elemwise(testpoint), mixlogp_st)
# check logp of model
priorlogp = st.dirichlet.logpdf(
x=testpoint["w"],
alpha=np.ones(2),
)
assert_allclose(model.logp(testpoint), mixlogp_st.sum() + priorlogp)
def test_mixture_of_mixture(self):
if aesara.config.floatX == "float32":
rtol = 1e-4
else:
rtol = 1e-7
nbr = 4
with Model() as model:
# mixtures components
g_comp = Normal.dist(
mu=Exponential("mu_g", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
)
l_comp = LogNormal.dist(
mu=Exponential("mu_l", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
)
# weight vector for the mixtures
g_w = Dirichlet("g_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
l_w = Dirichlet("l_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
# mixture components
g_mix = Mixture.dist(w=g_w, comp_dists=g_comp)
l_mix = Mixture.dist(w=l_w, comp_dists=l_comp)
# mixture of mixtures
mix_w = Dirichlet("mix_w", a=floatX(np.ones(2)), transform=None, shape=(2,))
mix = Mixture("mix", w=mix_w, comp_dists=[g_mix, l_mix], observed=np.exp(self.norm_x))
test_point = model.initial_point
def mixmixlogp(value, point):
floatX = aesara.config.floatX
priorlogp = (
st.dirichlet.logpdf(
x=point["g_w"],
alpha=np.ones(nbr) * 0.0000001,
).astype(floatX)
+ st.expon.logpdf(x=point["mu_g"]).sum(dtype=floatX)
+ st.dirichlet.logpdf(
x=point["l_w"],
alpha=np.ones(nbr) * 0.0000001,
).astype(floatX)
+ st.expon.logpdf(x=point["mu_l"]).sum(dtype=floatX)
+ st.dirichlet.logpdf(
x=point["mix_w"],
alpha=np.ones(2),
).astype(floatX)
)
complogp1 = st.norm.logpdf(x=value, loc=point["mu_g"]).astype(floatX)
mixlogp1 = logsumexp(
np.log(point["g_w"]).astype(floatX) + complogp1, axis=-1, keepdims=True
)
complogp2 = st.lognorm.logpdf(value, 1.0, 0.0, np.exp(point["mu_l"])).astype(floatX)
mixlogp2 = logsumexp(
np.log(point["l_w"]).astype(floatX) + complogp2, axis=-1, keepdims=True
)
complogp_mix = np.concatenate((mixlogp1, mixlogp2), axis=1)
mixmixlogpg = logsumexp(
np.log(point["mix_w"]).astype(floatX) + complogp_mix, axis=-1, keepdims=False
)
return priorlogp, mixmixlogpg
value = np.exp(self.norm_x)[:, None]
priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
# check logp of mixture
assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)
# check model logp
assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)
# check input and check logp again
test_point["g_w"] = np.asarray([0.1, 0.1, 0.2, 0.6])
test_point["mu_g"] = np.exp(np.random.randn(nbr))
priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)
assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)
def test_sample_prior_and_posterior(self):
def build_toy_dataset(N, K):
pi = np.array([0.2, 0.5, 0.3])
mus = [[1, 1, 1], [-1, -1, -1], [2, -2, 0]]
stds = [[0.1, 0.1, 0.1], [0.1, 0.2, 0.2], [0.2, 0.3, 0.3]]
x = np.zeros((N, 3), dtype=np.float32)
y = np.zeros((N,), dtype=np.int)
for n in range(N):
k = np.argmax(np.random.multinomial(1, pi))
x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))
y[n] = k
return x, y
N = 100 # number of data points
K = 3 # number of mixture components
D = 3 # dimensionality of the data
X, y = build_toy_dataset(N, K)
with pm.Model() as model:
pi = pm.Dirichlet("pi", np.ones(K), shape=(K,))
comp_dist = []
mu = []
packed_chol = []
chol = []
for i in range(K):
mu.append(pm.Normal("mu%i" % i, 0, 10, shape=D))
packed_chol.append(
pm.LKJCholeskyCov(
"chol_cov_%i" % i, eta=2, n=D, sd_dist=pm.HalfNormal.dist(2.5)
)
)
chol.append(pm.expand_packed_triangular(D, packed_chol[i], lower=True))
comp_dist.append(pm.MvNormal.dist(mu=mu[i], chol=chol[i], shape=D))
pm.Mixture("x_obs", pi, comp_dist, observed=X)
with model:
idata = pm.sample(30, tune=10, chains=1)
n_samples = 20
with model:
ppc = pm.sample_posterior_predictive(idata, n_samples)
prior = pm.sample_prior_predictive(samples=n_samples)
assert ppc["x_obs"].shape == (n_samples,) + X.shape
assert prior["x_obs"].shape == (n_samples,) + X.shape
assert prior["mu0"].shape == (n_samples, D)
assert prior["chol_cov_0"].shape == (n_samples, D * (D + 1) // 2)
class TestMixtureVsLatent(SeededTest):
def setup_method(self, *args, **kwargs):
super().setup_method(*args, **kwargs)
self.nd = 3
self.npop = 3
self.mus = at.as_tensor_variable(
np.tile(
np.reshape(
np.arange(self.npop),
(
1,
-1,
),
),
(
self.nd,
1,
),
)
)
def test_1d_w(self):
nd = self.nd
npop = self.npop
mus = self.mus
size = 100
with pm.Model() as model:
m = pm.NormalMixture(
"m", w=np.ones(npop) / npop, mu=mus, sigma=1e-5, comp_shape=(nd, npop), shape=nd
)
z = pm.Categorical("z", p=np.ones(npop) / npop)
latent_m = pm.Normal("latent_m", mu=mus[..., z], sigma=1e-5, shape=nd)
m_val = m.random(size=size)
latent_m_val = latent_m.random(size=size)
assert m_val.shape == latent_m_val.shape
# Test that each element in axis = -1 comes from the same mixture
# component
assert all(np.all(np.diff(m_val) < 1e-3, axis=-1))
assert all(np.all(np.diff(latent_m_val) < 1e-3, axis=-1))
self.samples_from_same_distribution(m_val, latent_m_val)
self.logp_matches(m, latent_m, z, npop, model=model)
def test_2d_w(self):
nd = self.nd
npop = self.npop
mus = self.mus
size = 100
with pm.Model() as model:
m = pm.NormalMixture(
"m",
w=np.ones((nd, npop)) / npop,
mu=mus,
sigma=1e-5,
comp_shape=(nd, npop),
shape=nd,
)
z = pm.Categorical("z", p=np.ones(npop) / npop, shape=nd)
mu = at.as_tensor_variable([mus[i, z[i]] for i in range(nd)])
latent_m = pm.Normal("latent_m", mu=mu, sigma=1e-5, shape=nd)
m_val = m.random(size=size)
latent_m_val = latent_m.random(size=size)
assert m_val.shape == latent_m_val.shape
# Test that each element in axis = -1 can come from independent
# components
assert not all(np.all(np.diff(m_val) < 1e-3, axis=-1))
assert not all(np.all(np.diff(latent_m_val) < 1e-3, axis=-1))
self.samples_from_same_distribution(m_val, latent_m_val)
self.logp_matches(m, latent_m, z, npop, model=model)
def samples_from_same_distribution(self, *args):
# Test if flattened samples distributions match (marginals match)
_, p_marginal = st.ks_2samp(*(s.flatten() for s in args))
# Test if correlations within non independent draws match
_, p_correlation = st.ks_2samp(
*(np.array([np.corrcoef(ss) for ss in s]).flatten() for s in args)
)
assert p_marginal >= 0.05 and p_correlation >= 0.05
def logp_matches(self, mixture, latent_mix, z, npop, model):
if aesara.config.floatX == "float32":
rtol = 1e-4
else:
rtol = 1e-7
test_point = model.initial_point
test_point["latent_m"] = test_point["m"]
mix_logp = mixture.logp(test_point)
logps = []
for component in range(npop):
test_point["z"] = component * np.ones(z.distribution.shape)
# Count the number of axes that should be broadcasted from z to
# modify the logp
sh1 = test_point["z"].shape
sh2 = test_point["latent_m"].shape
if len(sh1) > len(sh2):
sh2 = (1,) * (len(sh1) - len(sh2)) + sh2
elif len(sh2) > len(sh1):
sh1 = (1,) * (len(sh2) - len(sh1)) + sh1
reps = np.prod([s2 if s1 != s2 else 1 for s1, s2 in zip(sh1, sh2)])
z_logp = z.logp(test_point) * reps
logps.append(z_logp + latent_mix.logp(test_point))
latent_mix_logp = logsumexp(np.array(logps), axis=0)
assert_allclose(mix_logp, latent_mix_logp, rtol=rtol)
class TestMixtureSameFamily(SeededTest):
@classmethod
def setup_class(cls):
super().setup_class()
cls.size = 50
cls.n_samples = 1000
cls.mixture_comps = 10
@pytest.mark.parametrize("batch_shape", [(3, 4), (20,)], ids=str)
def test_with_multinomial(self, batch_shape):
p = np.random.uniform(size=(*batch_shape, self.mixture_comps, 3))
n = 100 * np.ones((*batch_shape, 1))
w = np.ones(self.mixture_comps) / self.mixture_comps
mixture_axis = len(batch_shape)
with pm.Model() as model:
comp_dists = pm.Multinomial.dist(p=p, n=n, shape=(*batch_shape, self.mixture_comps, 3))
mixture = pm.MixtureSameFamily(
"mixture",
w=w,
comp_dists=comp_dists,
mixture_axis=mixture_axis,
shape=(*batch_shape, 3),
)
prior = pm.sample_prior_predictive(samples=self.n_samples)
assert prior["mixture"].shape == (self.n_samples, *batch_shape, 3)
assert mixture.random(size=self.size).shape == (self.size, *batch_shape, 3)
if aesara.config.floatX == "float32":
rtol = 1e-4
else:
rtol = 1e-7
comp_logp = comp_dists.logp(model.initial_point["mixture"].reshape(*batch_shape, 1, 3))
log_sum_exp = logsumexp(
comp_logp.eval() + np.log(w)[..., None], axis=mixture_axis, keepdims=True
).sum()
assert_allclose(
model.logp(model.initial_point),
log_sum_exp,
rtol,
)
# TODO: Handle case when `batch_shape` == `sample_shape`.
# See https://github.com/pymc-devs/pymc/issues/4185 for details.
def test_with_mvnormal(self):
# 10 batch, 3-variate Gaussian
mu = np.random.randn(self.mixture_comps, 3)
mat = np.random.randn(3, 3)
cov = mat @ mat.T
chol = np.linalg.cholesky(cov)
w = np.ones(self.mixture_comps) / self.mixture_comps
with pm.Model() as model:
comp_dists = pm.MvNormal.dist(mu=mu, chol=chol, shape=(self.mixture_comps, 3))
mixture = pm.MixtureSameFamily(
"mixture", w=w, comp_dists=comp_dists, mixture_axis=0, shape=(3,)
)
prior = pm.sample_prior_predictive(samples=self.n_samples)
assert prior["mixture"].shape == (self.n_samples, 3)
assert mixture.random(size=self.size).shape == (self.size, 3)
if aesara.config.floatX == "float32":
rtol = 1e-4
else:
rtol = 1e-7
comp_logp = comp_dists.logp(model.initial_point["mixture"].reshape(1, 3))
log_sum_exp = logsumexp(
comp_logp.eval() + np.log(w)[..., None], axis=0, keepdims=True
).sum()
assert_allclose(
model.logp(model.initial_point),
log_sum_exp,
rtol,
)
def test_broadcasting_in_shape(self):
with pm.Model() as model:
mu = pm.Gamma("mu", 1.0, 1.0, shape=2)
comp_dists = pm.Poisson.dist(mu, shape=2)
mix = pm.MixtureSameFamily(
"mix", w=np.ones(2) / 2, comp_dists=comp_dists, shape=(1000,)
)
prior = pm.sample_prior_predictive(samples=self.n_samples)
assert prior["mix"].shape == (self.n_samples, 1000)
| [
"pymc.Normal.dist",
"pymc.MixtureSameFamily",
"numpy.sqrt",
"aesara.shared",
"pymc.MvNormal.dist",
"numpy.log",
"numpy.array",
"scipy.stats.norm.logpdf",
"numpy.arange",
"numpy.reshape",
"numpy.random.poisson",
"pytest.mark.xfail",
"numpy.testing.assert_allclose",
"numpy.sort",
"numpy.as... | [((1136, 1187), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Mixture not refactored."""'}), "(reason='Mixture not refactored.')\n", (1153, 1187), False, 'import pytest\n'), ((1277, 1317), 'numpy.random.choice', 'np.random.choice', (['w.size'], {'size': 'size', 'p': 'w'}), '(w.size, size=size, p=w)\n', (1293, 1317), True, 'import numpy as np\n'), ((1331, 1358), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['mu', 'sd'], {}), '(mu, sd)\n', (1350, 1358), True, 'import numpy as np\n'), ((1547, 1572), 'numpy.reshape', 'np.reshape', (['mu_', 'out_size'], {}), '(mu_, out_size)\n', (1557, 1572), True, 'import numpy as np\n'), ((1583, 1608), 'numpy.reshape', 'np.reshape', (['sd_', 'out_size'], {}), '(sd_, out_size)\n', (1593, 1608), True, 'import numpy as np\n'), ((1617, 1658), 'numpy.random.normal', 'np.random.normal', (['mu_', 'sd_'], {'size': 'out_size'}), '(mu_, sd_, size=out_size)\n', (1633, 1658), True, 'import numpy as np\n'), ((1744, 1784), 'numpy.random.choice', 'np.random.choice', (['w.size'], {'size': 'size', 'p': 'w'}), '(w.size, size=size, p=w)\n', (1760, 1784), True, 'import numpy as np\n'), ((1794, 1811), 'numpy.atleast_1d', 'np.atleast_1d', (['mu'], {}), '(mu)\n', (1807, 1811), True, 'import numpy as np\n'), ((1934, 1959), 'numpy.reshape', 'np.reshape', (['mu_', 'out_size'], {}), '(mu_, out_size)\n', (1944, 1959), True, 'import numpy as np\n'), ((1968, 2005), 'numpy.random.poisson', 'np.random.poisson', (['mu_'], {'size': 'out_size'}), '(mu_, size=out_size)\n', (1985, 2005), True, 'import numpy as np\n'), ((20545, 20609), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_shape"""', '[(3, 4), (20,)]'], {'ids': 'str'}), "('batch_shape', [(3, 4), (20,)], ids=str)\n", (20568, 20609), False, 'import pytest\n'), ((1374, 1388), 'pymc.distributions.shape_utils.to_tuple', 'to_tuple', (['size'], {}), '(size)\n', (1382, 1388), False, 'from pymc.distributions.shape_utils import to_tuple\n'), ((1827, 1841), 'pymc.distributions.shape_utils.to_tuple', 'to_tuple', (['size'], {}), '(size)\n', (1835, 1841), False, 'from pymc.distributions.shape_utils import to_tuple\n'), ((2148, 2170), 'numpy.array', 'np.array', (['[0.75, 0.25]'], {}), '([0.75, 0.25])\n', (2156, 2170), True, 'import numpy as np\n'), ((2193, 2213), 'numpy.array', 'np.array', (['[0.0, 5.0]'], {}), '([0.0, 5.0])\n', (2201, 2213), True, 'import numpy as np\n'), ((2236, 2261), 'numpy.ones_like', 'np.ones_like', (['cls.norm_mu'], {}), '(cls.norm_mu)\n', (2248, 2261), True, 'import numpy as np\n'), ((2383, 2403), 'numpy.array', 'np.array', (['[0.4, 0.6]'], {}), '([0.4, 0.6])\n', (2391, 2403), True, 'import numpy as np\n'), ((2426, 2447), 'numpy.array', 'np.array', (['[5.0, 20.0]'], {}), '([5.0, 20.0])\n', (2434, 2447), True, 'import numpy as np\n'), ((2580, 2606), 'pymc.Normal.dist', 'Normal.dist', ([], {'mu': '(0)', 'sigma': '(1)'}), '(mu=0, sigma=1)\n', (2591, 2606), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((2620, 2647), 'pymc.Normal.dist', 'Normal.dist', ([], {'mu': '(10)', 'sigma': '(1)'}), '(mu=10, sigma=1)\n', (2631, 2647), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((2662, 2714), 'pymc.Mixture.dist', 'Mixture.dist', ([], {'w': 'np.r_[0.5, 0.5]', 'comp_dists': '[a1, a2]'}), '(w=np.r_[0.5, 0.5], comp_dists=[a1, a2])\n', (2674, 2714), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((4626, 4638), 'pymc.distributions.shape_utils.to_tuple', 'to_tuple', (['nd'], {}), '(nd)\n', (4634, 4638), False, 'from pymc.distributions.shape_utils import to_tuple\n'), ((4720, 4748), 'numpy.random.randn', 'np.random.randn', (['*comp_shape'], {}), '(*comp_shape)\n', (4735, 4748), True, 'import numpy as np\n'), ((4769, 4807), 'numpy.random.gamma', 'np.random.gamma', (['(1)', '(1)'], {'size': 'comp_shape'}), '(1, 1, size=comp_shape)\n', (4784, 4807), True, 'import numpy as np\n'), ((9622, 9644), 'numpy.asarray', 'np.asarray', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (9632, 9644), True, 'import numpy as np\n'), ((9660, 9679), 'numpy.diag', 'np.diag', (['[1.5, 2.5]'], {}), '([1.5, 2.5])\n', (9667, 9679), True, 'import numpy as np\n'), ((9694, 9716), 'numpy.asarray', 'np.asarray', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (9704, 9716), True, 'import numpy as np\n'), ((9732, 9751), 'numpy.diag', 'np.diag', (['[2.5, 3.5]'], {}), '([2.5, 3.5])\n', (9739, 9751), True, 'import numpy as np\n'), ((9766, 9800), 'numpy.asarray', 'np.asarray', (['[[0.5, 0.5], mu1, mu2]'], {}), '([[0.5, 0.5], mu1, mu2])\n', (9776, 9800), True, 'import numpy as np\n'), ((10412, 10450), 'numpy.testing.assert_allclose', 'assert_allclose', (['complogp', 'complogp_st'], {}), '(complogp, complogp_st)\n', (10427, 10450), False, 'from numpy.testing import assert_allclose\n'), ((13927, 13959), 'numpy.asarray', 'np.asarray', (['[0.1, 0.1, 0.2, 0.6]'], {}), '([0.1, 0.1, 0.2, 0.6])\n', (13937, 13959), True, 'import numpy as np\n'), ((20287, 20340), 'numpy.testing.assert_allclose', 'assert_allclose', (['mix_logp', 'latent_mix_logp'], {'rtol': 'rtol'}), '(mix_logp, latent_mix_logp, rtol=rtol)\n', (20302, 20340), False, 'from numpy.testing import assert_allclose\n'), ((20672, 20733), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(*batch_shape, self.mixture_comps, 3)'}), '(size=(*batch_shape, self.mixture_comps, 3))\n', (20689, 20733), True, 'import numpy as np\n'), ((22156, 22194), 'numpy.random.randn', 'np.random.randn', (['self.mixture_comps', '(3)'], {}), '(self.mixture_comps, 3)\n', (22171, 22194), True, 'import numpy as np\n'), ((22209, 22230), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (22224, 22230), True, 'import numpy as np\n'), ((22272, 22295), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['cov'], {}), '(cov)\n', (22290, 22295), True, 'import numpy as np\n'), ((2926, 2933), 'pymc.Model', 'Model', ([], {}), '()\n', (2931, 2933), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((3051, 3098), 'pymc.Normal', 'Normal', (['"""mu"""', '(0.0)', '(10.0)'], {'shape': 'self.norm_w.size'}), "('mu', 0.0, 10.0, shape=self.norm_w.size)\n", (3057, 3098), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((3117, 3163), 'pymc.Gamma', 'Gamma', (['"""tau"""', '(1.0)', '(1.0)'], {'shape': 'self.norm_w.size'}), "('tau', 1.0, 1.0, shape=self.norm_w.size)\n", (3122, 3163), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((3382, 3394), 'pymc.Metropolis', 'Metropolis', ([], {}), '()\n', (3392, 3394), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((3415, 3492), 'pymc.sample', 'sample', (['(5000)', 'step'], {'random_seed': 'self.random_seed', 'progressbar': '(False)', 'chains': '(1)'}), '(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)\n', (3421, 3492), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((3552, 3572), 'numpy.sort', 'np.sort', (['self.norm_w'], {}), '(self.norm_w)\n', (3559, 3572), True, 'import numpy as np\n'), ((3666, 3687), 'numpy.sort', 'np.sort', (['self.norm_mu'], {}), '(self.norm_mu)\n', (3673, 3687), True, 'import numpy as np\n'), ((3767, 3774), 'pymc.Model', 'Model', ([], {}), '()\n', (3772, 3774), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((3892, 3939), 'pymc.Normal', 'Normal', (['"""mu"""', '(0.0)', '(10.0)'], {'shape': 'self.norm_w.size'}), "('mu', 0.0, 10.0, shape=self.norm_w.size)\n", (3898, 3939), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((3958, 4004), 'pymc.Gamma', 'Gamma', (['"""tau"""', '(1.0)', '(1.0)'], {'shape': 'self.norm_w.size'}), "('tau', 1.0, 1.0, shape=self.norm_w.size)\n", (3963, 4004), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((4017, 4077), 'pymc.NormalMixture', 'NormalMixture', (['"""x_obs"""', 'w', 'mu'], {'tau': 'tau', 'observed': 'self.norm_x'}), "('x_obs', w, mu, tau=tau, observed=self.norm_x)\n", (4030, 4077), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((4097, 4109), 'pymc.Metropolis', 'Metropolis', ([], {}), '()\n', (4107, 4109), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((4130, 4207), 'pymc.sample', 'sample', (['(5000)', 'step'], {'random_seed': 'self.random_seed', 'progressbar': '(False)', 'chains': '(1)'}), '(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)\n', (4136, 4207), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((4267, 4287), 'numpy.sort', 'np.sort', (['self.norm_w'], {}), '(self.norm_w)\n', (4274, 4287), True, 'import numpy as np\n'), ((4381, 4402), 'numpy.sort', 'np.sort', (['self.norm_mu'], {}), '(self.norm_mu)\n', (4388, 4402), True, 'import numpy as np\n'), ((4967, 4974), 'pymc.Model', 'Model', ([], {}), '()\n', (4972, 4974), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((5004, 5035), 'pymc.Normal', 'Normal', (['"""mus"""'], {'shape': 'comp_shape'}), "('mus', shape=comp_shape)\n", (5010, 5035), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((5055, 5103), 'pymc.Gamma', 'Gamma', (['"""taus"""'], {'alpha': '(1)', 'beta': '(1)', 'shape': 'comp_shape'}), "('taus', alpha=1, beta=1, shape=comp_shape)\n", (5060, 5103), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((5192, 5267), 'pymc.NormalMixture', 'NormalMixture', (['"""m"""'], {'w': 'ws', 'mu': 'mus', 'tau': 'taus', 'shape': 'nd', 'comp_shape': 'comp_shape'}), "('m', w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape)\n", (5205, 5267), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((5287, 5388), 'pymc.NormalMixture', 'NormalMixture', (['"""obs"""'], {'w': 'ws', 'mu': 'mus', 'tau': 'taus', 'shape': 'nd', 'comp_shape': 'comp_shape', 'observed': 'observed'}), "('obs', w=ws, mu=mus, tau=taus, shape=nd, comp_shape=\n comp_shape, observed=observed)\n", (5300, 5388), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((5428, 5435), 'pymc.Model', 'Model', ([], {}), '()\n', (5433, 5435), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((5465, 5496), 'pymc.Normal', 'Normal', (['"""mus"""'], {'shape': 'comp_shape'}), "('mus', shape=comp_shape)\n", (5471, 5496), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((5516, 5564), 'pymc.Gamma', 'Gamma', (['"""taus"""'], {'alpha': '(1)', 'beta': '(1)', 'shape': 'comp_shape'}), "('taus', alpha=1, beta=1, shape=comp_shape)\n", (5521, 5564), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((5787, 5837), 'pymc.Mixture', 'Mixture', (['"""m"""'], {'w': 'ws', 'comp_dists': 'comp_dist', 'shape': 'nd'}), "('m', w=ws, comp_dists=comp_dist, shape=nd)\n", (5794, 5837), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((5857, 5928), 'pymc.Mixture', 'Mixture', (['"""obs"""'], {'w': 'ws', 'comp_dists': 'comp_dist', 'shape': 'nd', 'observed': 'observed'}), "('obs', w=ws, comp_dists=comp_dist, shape=nd, observed=observed)\n", (5864, 5928), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((5943, 5950), 'pymc.Model', 'Model', ([], {}), '()\n', (5948, 5950), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((6408, 6439), 'pymc.Normal', 'Normal', (['"""mus"""'], {'shape': 'comp_shape'}), "('mus', shape=comp_shape)\n", (6414, 6439), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((6459, 6507), 'pymc.Gamma', 'Gamma', (['"""taus"""'], {'alpha': '(1)', 'beta': '(1)', 'shape': 'comp_shape'}), "('taus', alpha=1, beta=1, shape=comp_shape)\n", (6464, 6507), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((8289, 8296), 'pymc.Model', 'Model', ([], {}), '()\n', (8294, 8296), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((8415, 8460), 'pymc.Gamma', 'Gamma', (['"""mu"""', '(1.0)', '(1.0)'], {'shape': 'self.pois_w.size'}), "('mu', 1.0, 1.0, shape=self.pois_w.size)\n", (8420, 8460), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((8552, 8564), 'pymc.Metropolis', 'Metropolis', ([], {}), '()\n', (8562, 8564), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((8585, 8662), 'pymc.sample', 'sample', (['(5000)', 'step'], {'random_seed': 'self.random_seed', 'progressbar': '(False)', 'chains': '(1)'}), '(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)\n', (8591, 8662), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((8722, 8742), 'numpy.sort', 'np.sort', (['self.pois_w'], {}), '(self.pois_w)\n', (8729, 8742), True, 'import numpy as np\n'), ((8836, 8857), 'numpy.sort', 'np.sort', (['self.pois_mu'], {}), '(self.pois_mu)\n', (8843, 8857), True, 'import numpy as np\n'), ((8947, 8954), 'pymc.Model', 'Model', ([], {}), '()\n', (8952, 8954), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((9073, 9118), 'pymc.Gamma', 'Gamma', (['"""mu"""', '(1.0)', '(1.0)'], {'shape': 'self.pois_w.size'}), "('mu', 1.0, 1.0, shape=self.pois_w.size)\n", (9078, 9118), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((9236, 9248), 'pymc.Metropolis', 'Metropolis', ([], {}), '()\n', (9246, 9248), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((9269, 9346), 'pymc.sample', 'sample', (['(5000)', 'step'], {'random_seed': 'self.random_seed', 'progressbar': '(False)', 'chains': '(1)'}), '(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)\n', (9275, 9346), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((9406, 9426), 'numpy.sort', 'np.sort', (['self.pois_w'], {}), '(self.pois_w)\n', (9413, 9426), True, 'import numpy as np\n'), ((9520, 9541), 'numpy.sort', 'np.sort', (['self.pois_mu'], {}), '(self.pois_mu)\n', (9527, 9541), True, 'import numpy as np\n'), ((9814, 9821), 'pymc.Model', 'Model', ([], {}), '()\n', (9819, 9821), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((9934, 9965), 'pymc.MvNormal.dist', 'MvNormal.dist', ([], {'mu': 'mu1', 'cov': 'cov1'}), '(mu=mu1, cov=cov1)\n', (9947, 9965), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((9989, 10020), 'pymc.MvNormal.dist', 'MvNormal.dist', ([], {'mu': 'mu2', 'cov': 'cov2'}), '(mu=mu2, cov=cov2)\n', (10002, 10020), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((10037, 10092), 'pymc.Mixture', 'Mixture', (['"""x_obs"""', 'w', '[mvncomp1, mvncomp2]'], {'observed': 'obs'}), "('x_obs', w, [mvncomp1, mvncomp2], observed=obs)\n", (10044, 10092), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((11078, 11085), 'pymc.Model', 'Model', ([], {}), '()\n', (11083, 11085), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((11719, 11757), 'pymc.Mixture.dist', 'Mixture.dist', ([], {'w': 'g_w', 'comp_dists': 'g_comp'}), '(w=g_w, comp_dists=g_comp)\n', (11731, 11757), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((11778, 11816), 'pymc.Mixture.dist', 'Mixture.dist', ([], {'w': 'l_w', 'comp_dists': 'l_comp'}), '(w=l_w, comp_dists=l_comp)\n', (11790, 11816), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((13284, 13328), 'numpy.concatenate', 'np.concatenate', (['(mixlogp1, mixlogp2)'], {'axis': '(1)'}), '((mixlogp1, mixlogp2), axis=1)\n', (13298, 13328), True, 'import numpy as np\n'), ((13533, 13552), 'numpy.exp', 'np.exp', (['self.norm_x'], {}), '(self.norm_x)\n', (13539, 13552), True, 'import numpy as np\n'), ((13996, 14016), 'numpy.random.randn', 'np.random.randn', (['nbr'], {}), '(nbr)\n', (14011, 14016), True, 'import numpy as np\n'), ((14352, 14377), 'numpy.array', 'np.array', (['[0.2, 0.5, 0.3]'], {}), '([0.2, 0.5, 0.3])\n', (14360, 14377), True, 'import numpy as np\n'), ((14521, 14555), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {'dtype': 'np.float32'}), '((N, 3), dtype=np.float32)\n', (14529, 14555), True, 'import numpy as np\n'), ((14572, 14600), 'numpy.zeros', 'np.zeros', (['(N,)'], {'dtype': 'np.int'}), '((N,), dtype=np.int)\n', (14580, 14600), True, 'import numpy as np\n'), ((15009, 15019), 'pymc.Model', 'pm.Model', ([], {}), '()\n', (15017, 15019), True, 'import pymc as pm\n'), ((15672, 15718), 'pymc.Mixture', 'pm.Mixture', (['"""x_obs"""', 'pi', 'comp_dist'], {'observed': 'X'}), "('x_obs', pi, comp_dist, observed=X)\n", (15682, 15718), True, 'import pymc as pm\n'), ((15759, 15791), 'pymc.sample', 'pm.sample', (['(30)'], {'tune': '(10)', 'chains': '(1)'}), '(30, tune=10, chains=1)\n', (15768, 15791), True, 'import pymc as pm\n'), ((15854, 15902), 'pymc.sample_posterior_predictive', 'pm.sample_posterior_predictive', (['idata', 'n_samples'], {}), '(idata, n_samples)\n', (15884, 15902), True, 'import pymc as pm\n'), ((15923, 15968), 'pymc.sample_prior_predictive', 'pm.sample_prior_predictive', ([], {'samples': 'n_samples'}), '(samples=n_samples)\n', (15949, 15968), True, 'import pymc as pm\n'), ((16883, 16893), 'pymc.Model', 'pm.Model', ([], {}), '()\n', (16891, 16893), True, 'import pymc as pm\n'), ((17132, 17192), 'pymc.Normal', 'pm.Normal', (['"""latent_m"""'], {'mu': 'mus[..., z]', 'sigma': '(1e-05)', 'shape': 'nd'}), "('latent_m', mu=mus[..., z], sigma=1e-05, shape=nd)\n", (17141, 17192), True, 'import pymc as pm\n'), ((17801, 17811), 'pymc.Model', 'pm.Model', ([], {}), '()\n', (17809, 17811), True, 'import pymc as pm\n'), ((18221, 18272), 'pymc.Normal', 'pm.Normal', (['"""latent_m"""'], {'mu': 'mu', 'sigma': '(1e-05)', 'shape': 'nd'}), "('latent_m', mu=mu, sigma=1e-05, shape=nd)\n", (18230, 18272), True, 'import pymc as pm\n'), ((20254, 20269), 'numpy.array', 'np.array', (['logps'], {}), '(logps)\n', (20262, 20269), True, 'import numpy as np\n'), ((20752, 20778), 'numpy.ones', 'np.ones', (['(*batch_shape, 1)'], {}), '((*batch_shape, 1))\n', (20759, 20778), True, 'import numpy as np\n'), ((20791, 20818), 'numpy.ones', 'np.ones', (['self.mixture_comps'], {}), '(self.mixture_comps)\n', (20798, 20818), True, 'import numpy as np\n'), ((20893, 20903), 'pymc.Model', 'pm.Model', ([], {}), '()\n', (20901, 20903), True, 'import pymc as pm\n'), ((20939, 21013), 'pymc.Multinomial.dist', 'pm.Multinomial.dist', ([], {'p': 'p', 'n': 'n', 'shape': '(*batch_shape, self.mixture_comps, 3)'}), '(p=p, n=n, shape=(*batch_shape, self.mixture_comps, 3))\n', (20958, 21013), True, 'import pymc as pm\n'), ((21036, 21152), 'pymc.MixtureSameFamily', 'pm.MixtureSameFamily', (['"""mixture"""'], {'w': 'w', 'comp_dists': 'comp_dists', 'mixture_axis': 'mixture_axis', 'shape': '(*batch_shape, 3)'}), "('mixture', w=w, comp_dists=comp_dists, mixture_axis=\n mixture_axis, shape=(*batch_shape, 3))\n", (21056, 21152), True, 'import pymc as pm\n'), ((21263, 21313), 'pymc.sample_prior_predictive', 'pm.sample_prior_predictive', ([], {'samples': 'self.n_samples'}), '(samples=self.n_samples)\n', (21289, 21313), True, 'import pymc as pm\n'), ((22308, 22335), 'numpy.ones', 'np.ones', (['self.mixture_comps'], {}), '(self.mixture_comps)\n', (22315, 22335), True, 'import numpy as np\n'), ((22371, 22381), 'pymc.Model', 'pm.Model', ([], {}), '()\n', (22379, 22381), True, 'import pymc as pm\n'), ((22417, 22482), 'pymc.MvNormal.dist', 'pm.MvNormal.dist', ([], {'mu': 'mu', 'chol': 'chol', 'shape': '(self.mixture_comps, 3)'}), '(mu=mu, chol=chol, shape=(self.mixture_comps, 3))\n', (22433, 22482), True, 'import pymc as pm\n'), ((22505, 22596), 'pymc.MixtureSameFamily', 'pm.MixtureSameFamily', (['"""mixture"""'], {'w': 'w', 'comp_dists': 'comp_dists', 'mixture_axis': '(0)', 'shape': '(3,)'}), "('mixture', w=w, comp_dists=comp_dists, mixture_axis=0,\n shape=(3,))\n", (22525, 22596), True, 'import pymc as pm\n'), ((22643, 22693), 'pymc.sample_prior_predictive', 'pm.sample_prior_predictive', ([], {'samples': 'self.n_samples'}), '(samples=self.n_samples)\n', (22669, 22693), True, 'import pymc as pm\n'), ((23321, 23331), 'pymc.Model', 'pm.Model', ([], {}), '()\n', (23329, 23331), True, 'import pymc as pm\n'), ((23359, 23392), 'pymc.Gamma', 'pm.Gamma', (['"""mu"""', '(1.0)', '(1.0)'], {'shape': '(2)'}), "('mu', 1.0, 1.0, shape=2)\n", (23367, 23392), True, 'import pymc as pm\n'), ((23418, 23446), 'pymc.Poisson.dist', 'pm.Poisson.dist', (['mu'], {'shape': '(2)'}), '(mu, shape=2)\n', (23433, 23446), True, 'import pymc as pm\n'), ((23599, 23649), 'pymc.sample_prior_predictive', 'pm.sample_prior_predictive', ([], {'samples': 'self.n_samples'}), '(samples=self.n_samples)\n', (23625, 23649), True, 'import pymc as pm\n'), ((5137, 5151), 'numpy.ones', 'np.ones', (['ncomp'], {}), '(ncomp)\n', (5144, 5151), True, 'import numpy as np\n'), ((5598, 5612), 'numpy.ones', 'np.ones', (['ncomp'], {}), '(ncomp)\n', (5605, 5612), True, 'import numpy as np\n'), ((5672, 5727), 'pymc.Normal.dist', 'Normal.dist', ([], {'mu': 'mus[..., i]', 'tau': 'taus[..., i]', 'shape': 'nd'}), '(mu=mus[..., i], tau=taus[..., i], shape=nd)\n', (5683, 5727), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((6541, 6555), 'numpy.ones', 'np.ones', (['ncomp'], {}), '(ncomp)\n', (6548, 6555), True, 'import numpy as np\n'), ((6953, 7005), 'pymc.NormalMixture', 'NormalMixture', (['"""m"""'], {'w': 'ws', 'mu': 'mus', 'tau': 'taus', 'shape': 'nd'}), "('m', w=ws, mu=mus, tau=taus, shape=nd)\n", (6966, 7005), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((7489, 7562), 'pymc.NormalMixture', 'NormalMixture', (['"""obs"""'], {'w': 'ws', 'mu': 'mus', 'tau': 'taus', 'shape': 'nd', 'observed': 'observed'}), "('obs', w=ws, mu=mus, tau=taus, shape=nd, observed=observed)\n", (7502, 7562), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((8493, 8509), 'pymc.Poisson.dist', 'Poisson.dist', (['mu'], {}), '(mu)\n', (8505, 8509), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((10555, 10577), 'numpy.log', 'np.log', (["testpoint['w']"], {}), "(testpoint['w'])\n", (10561, 10577), True, 'import numpy as np\n'), ((10802, 10812), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (10809, 10812), True, 'import numpy as np\n'), ((15066, 15076), 'numpy.ones', 'np.ones', (['K'], {}), '(K)\n', (15073, 15076), True, 'import numpy as np\n'), ((19617, 19646), 'numpy.ones', 'np.ones', (['z.distribution.shape'], {}), '(z.distribution.shape)\n', (19624, 19646), True, 'import numpy as np\n'), ((2982, 3007), 'numpy.ones_like', 'np.ones_like', (['self.norm_w'], {}), '(self.norm_w)\n', (2994, 3007), True, 'import numpy as np\n'), ((3246, 3276), 'pymc.Normal.dist', 'Normal.dist', (['mu[0]'], {'tau': 'tau[0]'}), '(mu[0], tau=tau[0])\n', (3257, 3276), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((3278, 3308), 'pymc.Normal.dist', 'Normal.dist', (['mu[1]'], {'tau': 'tau[1]'}), '(mu[1], tau=tau[1])\n', (3289, 3308), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((3823, 3848), 'numpy.ones_like', 'np.ones_like', (['self.norm_w'], {}), '(self.norm_w)\n', (3835, 3848), True, 'import numpy as np\n'), ((4871, 4885), 'numpy.ones', 'np.ones', (['ncomp'], {}), '(ncomp)\n', (4878, 4885), True, 'import numpy as np\n'), ((4915, 4933), 'numpy.sqrt', 'np.sqrt', (['test_taus'], {}), '(test_taus)\n', (4922, 4933), True, 'import numpy as np\n'), ((6855, 6907), 'pymc.NormalMixture', 'NormalMixture', (['"""m"""'], {'w': 'ws', 'mu': 'mus', 'tau': 'taus', 'shape': 'nd'}), "('m', w=ws, mu=mus, tau=taus, shape=nd)\n", (6868, 6907), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((7299, 7324), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7312, 7324), False, 'import pytest\n'), ((7346, 7419), 'pymc.NormalMixture', 'NormalMixture', (['"""obs"""'], {'w': 'ws', 'mu': 'mus', 'tau': 'taus', 'shape': 'nd', 'observed': 'observed'}), "('obs', w=ws, mu=mus, tau=taus, shape=nd, observed=observed)\n", (7359, 7419), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((8345, 8370), 'numpy.ones_like', 'np.ones_like', (['self.pois_w'], {}), '(self.pois_w)\n', (8357, 8370), True, 'import numpy as np\n'), ((9003, 9028), 'numpy.ones_like', 'np.ones_like', (['self.pois_w'], {}), '(self.pois_w)\n', (9015, 9028), True, 'import numpy as np\n'), ((9152, 9171), 'pymc.Poisson.dist', 'Poisson.dist', (['mu[0]'], {}), '(mu[0])\n', (9164, 9171), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((9173, 9192), 'pymc.Poisson.dist', 'Poisson.dist', (['mu[1]'], {}), '(mu[1])\n', (9185, 9192), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((9870, 9880), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (9877, 9880), True, 'import numpy as np\n'), ((10196, 10241), 'scipy.stats.multivariate_normal.logpdf', 'st.multivariate_normal.logpdf', (['obs', 'mu1', 'cov1'], {}), '(obs, mu1, cov1)\n', (10225, 10241), True, 'import scipy.stats as st\n'), ((10259, 10304), 'scipy.stats.multivariate_normal.logpdf', 'st.multivariate_normal.logpdf', (['obs', 'mu2', 'cov2'], {}), '(obs, mu2, cov2)\n', (10288, 10304), True, 'import scipy.stats as st\n'), ((10377, 10395), 'aesara.shared', 'aesara.shared', (['obs'], {}), '(obs)\n', (10390, 10395), False, 'import aesara\n'), ((11183, 11238), 'pymc.Exponential', 'Exponential', (['"""mu_g"""'], {'lam': '(1.0)', 'shape': 'nbr', 'transform': 'None'}), "('mu_g', lam=1.0, shape=nbr, transform=None)\n", (11194, 11238), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((11329, 11384), 'pymc.Exponential', 'Exponential', (['"""mu_l"""'], {'lam': '(1.0)', 'shape': 'nbr', 'transform': 'None'}), "('mu_l', lam=1.0, shape=nbr, transform=None)\n", (11340, 11384), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((12018, 12037), 'numpy.exp', 'np.exp', (['self.norm_x'], {}), '(self.norm_x)\n', (12024, 12037), True, 'import numpy as np\n'), ((12830, 12872), 'scipy.stats.norm.logpdf', 'st.norm.logpdf', ([], {'x': 'value', 'loc': "point['mu_g']"}), "(x=value, loc=point['mu_g'])\n", (12844, 12872), True, 'import scipy.stats as st\n'), ((14662, 14690), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'pi'], {}), '(1, pi)\n', (14683, 14690), True, 'import numpy as np\n'), ((14756, 14772), 'numpy.diag', 'np.diag', (['stds[k]'], {}), '(stds[k])\n', (14763, 14772), True, 'import numpy as np\n'), ((15246, 15283), 'pymc.Normal', 'pm.Normal', (["('mu%i' % i)", '(0)', '(10)'], {'shape': 'D'}), "('mu%i' % i, 0, 10, shape=D)\n", (15255, 15283), True, 'import pymc as pm\n'), ((15515, 15573), 'pymc.expand_packed_triangular', 'pm.expand_packed_triangular', (['D', 'packed_chol[i]'], {'lower': '(True)'}), '(D, packed_chol[i], lower=True)\n', (15542, 15573), True, 'import pymc as pm\n'), ((15608, 15657), 'pymc.MvNormal.dist', 'pm.MvNormal.dist', ([], {'mu': 'mu[i]', 'chol': 'chol[i]', 'shape': 'D'}), '(mu=mu[i], chol=chol[i], shape=D)\n', (15624, 15657), True, 'import pymc as pm\n'), ((16502, 16522), 'numpy.arange', 'np.arange', (['self.npop'], {}), '(self.npop)\n', (16511, 16522), True, 'import numpy as np\n'), ((17448, 17462), 'numpy.diff', 'np.diff', (['m_val'], {}), '(m_val)\n', (17455, 17462), True, 'import numpy as np\n'), ((17507, 17528), 'numpy.diff', 'np.diff', (['latent_m_val'], {}), '(latent_m_val)\n', (17514, 17528), True, 'import numpy as np\n'), ((6662, 6687), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6675, 6687), False, 'import pytest\n'), ((6713, 6765), 'pymc.NormalMixture', 'NormalMixture', (['"""m"""'], {'w': 'ws', 'mu': 'mus', 'tau': 'taus', 'shape': 'nd'}), "('m', w=ws, mu=mus, tau=taus, shape=nd)\n", (6726, 6765), False, 'from pymc import Dirichlet, Exponential, Gamma, LogNormal, Metropolis, Mixture, Model, MvNormal, Normal, NormalMixture, Poisson, sample\n'), ((7139, 7159), 'numpy.empty', 'np.empty', (['comp_shape'], {}), '(comp_shape)\n', (7147, 7159), True, 'import numpy as np\n'), ((11899, 11909), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (11906, 11909), True, 'import numpy as np\n'), ((13083, 13104), 'numpy.exp', 'np.exp', (["point['mu_l']"], {}), "(point['mu_l'])\n", (13089, 13104), True, 'import numpy as np\n'), ((16961, 16974), 'numpy.ones', 'np.ones', (['npop'], {}), '(npop)\n', (16968, 16974), True, 'import numpy as np\n'), ((17087, 17100), 'numpy.ones', 'np.ones', (['npop'], {}), '(npop)\n', (17094, 17100), True, 'import numpy as np\n'), ((17895, 17914), 'numpy.ones', 'np.ones', (['(nd, npop)'], {}), '((nd, npop))\n', (17902, 17914), True, 'import numpy as np\n'), ((18092, 18105), 'numpy.ones', 'np.ones', (['npop'], {}), '(npop)\n', (18099, 18105), True, 'import numpy as np\n'), ((18531, 18545), 'numpy.diff', 'np.diff', (['m_val'], {}), '(m_val)\n', (18538, 18545), True, 'import numpy as np\n'), ((18594, 18615), 'numpy.diff', 'np.diff', (['latent_m_val'], {}), '(latent_m_val)\n', (18601, 18615), True, 'import numpy as np\n'), ((23512, 23522), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (23519, 23522), True, 'import numpy as np\n'), ((11508, 11520), 'numpy.ones', 'np.ones', (['nbr'], {}), '(nbr)\n', (11515, 11520), True, 'import numpy as np\n'), ((11609, 11621), 'numpy.ones', 'np.ones', (['nbr'], {}), '(nbr)\n', (11616, 11621), True, 'import numpy as np\n'), ((12593, 12625), 'scipy.stats.expon.logpdf', 'st.expon.logpdf', ([], {'x': "point['mu_l']"}), "(x=point['mu_l'])\n", (12608, 12625), True, 'import scipy.stats as st\n'), ((12938, 12958), 'numpy.log', 'np.log', (["point['g_w']"], {}), "(point['g_w'])\n", (12944, 12958), True, 'import numpy as np\n'), ((13171, 13191), 'numpy.log', 'np.log', (["point['l_w']"], {}), "(point['l_w'])\n", (13177, 13191), True, 'import numpy as np\n'), ((13382, 13404), 'numpy.log', 'np.log', (["point['mix_w']"], {}), "(point['mix_w'])\n", (13388, 13404), True, 'import numpy as np\n'), ((15423, 15446), 'pymc.HalfNormal.dist', 'pm.HalfNormal.dist', (['(2.5)'], {}), '(2.5)\n', (15441, 15446), True, 'import pymc as pm\n'), ((21744, 21753), 'numpy.log', 'np.log', (['w'], {}), '(w)\n', (21750, 21753), True, 'import numpy as np\n'), ((23082, 23091), 'numpy.log', 'np.log', (['w'], {}), '(w)\n', (23088, 23091), True, 'import numpy as np\n'), ((12747, 12757), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (12754, 12757), True, 'import numpy as np\n'), ((12364, 12396), 'scipy.stats.expon.logpdf', 'st.expon.logpdf', ([], {'x': "point['mu_g']"}), "(x=point['mu_g'])\n", (12379, 12396), True, 'import scipy.stats as st\n'), ((19085, 19100), 'numpy.corrcoef', 'np.corrcoef', (['ss'], {}), '(ss)\n', (19096, 19100), True, 'import numpy as np\n'), ((12516, 12528), 'numpy.ones', 'np.ones', (['nbr'], {}), '(nbr)\n', (12523, 12528), True, 'import numpy as np\n'), ((12287, 12299), 'numpy.ones', 'np.ones', (['nbr'], {}), '(nbr)\n', (12294, 12299), True, 'import numpy as np\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_logging_ops
def _OptimizerOptions():
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
yield tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold)))
class FunctionTest(tf.test.TestCase):
def _mat(self, x):
return np.array([x]).astype("float32").reshape([1, 1])
def testBasic(self):
g = tf.Graph()
# Define a function
# foo(a:float, b:float, c:float)->u:float,v:float,w:float
# u = matmul(a, b) + c
# v = u^2
# w = u + v
foo = tf.Graph()
with foo.as_default():
a = tf.placeholder(tf.float32, name="a")
b = tf.placeholder(tf.float32, name="b")
c = tf.placeholder(tf.float32, name="c")
u = tf.add(tf.matmul(a, b), c, name="u")
v = tf.square(u, name="v")
w = tf.add_n([u, v], name="w")
fdef = function._graph_to_function_def(foo, "foo", [a, b, c], [u, v, w])
class Mock(function._DefinedFunction):
def __init__(self, fdef):
self._func_name = "foo"
self._definition = fdef
self._sub_functions = collections.OrderedDict()
self._grad_func = None
self._python_grad_func = None
self._hash = hash(fdef.SerializeToString())
g._add_function(Mock(fdef))
# Compute 2 * 3 + 4 and its square.
with g.as_default(), tf.Session() as sess:
two = tf.constant(self._mat(2.0), name="two")
three = tf.constant(self._mat(3.0), name="three")
four = tf.constant(self._mat(4.0), name="four")
# TODO(zhifengc): w/ @decorator sugar, we will just do:
# y, s, t = foo_func(two, three, four)
# The graph contains two ops each of which calls foo.
u0, v0, w0 = g.create_op(
"foo", [two, three, four], [tf.float32, tf.float32, tf.float32],
compute_shapes=False).outputs
u1, v1, w1 = g.create_op(
"foo", [four, two, three], [tf.float32, tf.float32, tf.float32],
compute_shapes=False).outputs
# Checks some property of the graph def.
gdef = g.as_graph_def()
self.assertEqual(len(gdef.node), 5) # 5 nodes added.
self.assertEqual(len(gdef.library.function), 1) # 1 function is defined.
for _ in xrange(10):
# Run the graph, which is basically two function calls.
ans_u0, ans_v0, ans_w0, ans_u1, ans_v1, ans_w1 = sess.run([u0, v0, w0,
u1, v1, w1])
self.assertAllEqual(ans_u0, self._mat(10.0)) # 2 * 3 + 4 = 10
self.assertAllEqual(ans_v0, self._mat(100.0)) # 10^2 = 100
self.assertAllEqual(ans_w0, self._mat(110.0)) # 100 + 10 = 110
self.assertAllEqual(ans_u1, self._mat(11.0)) # 4 * 2 + 3 = 11
self.assertAllEqual(ans_v1, self._mat(121.0)) # 11^2 = 121
self.assertAllEqual(ans_w1, self._mat(132.0)) # 11 + 121 = 132
def testDefineFunction2Args(self):
@function.Defun(tf.float32, tf.float32)
def APlus2B(a, b):
return a + b * 2
with tf.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEquals("APlus2B", call.op.name)
with tf.Session() as sess:
self.assertAllEqual([5.0], sess.run(call))
def testGradientFunc(self):
@function.Defun(tf.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(tf.float32, tf.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(
input=[x, dy], Tout=[tf.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = tf.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with tf.Session() as sess:
self.assertAllClose([5.0], sess.run(call_f))
self.assertAllClose([0.4], sess.run(call_g))
def testTanhSymGrad(self):
@function.Defun(tf.float32)
def Forward(x):
return tf.reduce_sum(tf.tanh(x))
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32)
y = Forward(x)
dx = tf.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=True)))
with tf.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
def testCustomGradient(self):
dtype = tf.float32
@function.Defun(dtype, dtype, dtype)
def XentLossGrad(logits, labels, dloss):
dlogits = tf.reshape(dloss, [-1, 1]) * (tf.nn.softmax(logits) - labels)
dlabels = tf.zeros_like(labels)
# Takes exp(dlogits) to differentiate it from the "correct" gradient.
return tf.exp(dlogits), dlabels
@function.Defun(dtype, dtype, grad_func=XentLossGrad)
def XentLoss(logits, labels):
return tf.reduce_sum(labels * tf.log(tf.nn.softmax(logits)), 1)
g = tf.Graph()
with g.as_default():
logits = tf.placeholder(dtype)
labels = tf.placeholder(dtype)
loss = XentLoss(logits, labels)
dlogits = tf.gradients([loss], [logits])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
for cfg in _OptimizerOptions():
print("cfg = ", cfg)
with tf.Session(graph=g, config=cfg) as sess:
out, = sess.run(dlogits, {logits: x, labels: y})
self.assertAllClose(out, np.exp(prob - y))
def testCustomGradientError(self):
dtype = tf.float32
@function.Defun(dtype, dtype, dtype)
def Grad(x, dy, dz):
# Should have returned 1 result.
return x, dy + dz
@function.Defun(dtype, grad_func=Grad)
def Forward(x):
return x, x
g = tf.Graph()
with g.as_default():
inp = tf.placeholder(dtype)
out = tf.add_n(Forward(inp))
dinp = tf.gradients(out, [inp])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
with tf.Session(graph=g) as sess:
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"SymGrad expects to return 1.*but get 2.*instead"):
_ = sess.run(dinp, {inp: x})
def testSymGradShape(self):
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32, [25, 4])
y = tf.placeholder(tf.float32, [200, 100])
dz = tf.placeholder(tf.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops._symbolic_gradient(
input=[x, y, dz], Tout=[tf.float32] * 2, f="Foo")
self.assertEquals(x.get_shape(), dx.get_shape())
self.assertEquals(y.get_shape(), dy.get_shape())
def testZNoDepOnY(self):
@function.Defun(tf.float32, tf.float32)
def Foo(x, y): # pylint: disable=unused-argument
return x * 2
with tf.Graph().as_default():
# z = Foo(x, y). z doe
x = tf.constant(1.0)
y = tf.constant(2.0)
z = Foo(x, y)
dx, dy = tf.gradients([z], [x, y])
with tf.Session() as sess:
dx_val, dy_val = sess.run([dx, dy])
self.assertEquals([2.0], dx_val)
self.assertEquals([0.0], dy_val)
def testDefineFunctionNoArgs(self):
@function.Defun()
def AConstant():
return tf.constant([42])
with tf.Graph().as_default():
call = AConstant()
self.assertEquals("AConstant", call.op.name)
with tf.Session() as sess:
self.assertAllEqual([42], sess.run(call))
def testDefineFunctionNames(self):
@function.Defun(tf.float32)
def Foo(a):
return a + 1
with tf.Graph().as_default():
call1 = Foo([1.0])
self.assertEquals("Foo", call1.op.name)
call2 = Foo([1.0])
self.assertEquals("Foo_1", call2.op.name)
# pylint: disable=unexpected-keyword-arg
call3 = Foo([1.0], name="mine")
self.assertEquals("mine", call3.op.name)
with tf.name_scope("my"):
call4 = Foo([1.0], name="precious")
self.assertEquals("my/precious", call4.op.name)
def testNoOp(self):
@function.Defun(tf.float32)
def Foo(x):
y = tf.Print(x, [x], "Hello")
with tf.control_dependencies([y]):
z = tf.no_op()
with tf.control_dependencies([z]):
return x * 2
with tf.Graph().as_default(), self.test_session():
z = Foo(tf.constant(3.0))
self.assertAllEqual(z.eval(), 6.0)
def testAssert(self):
@function.Defun(tf.float32)
def Foo(x):
check = gen_logging_ops._assert(tf.greater(x, 0), [x])
with tf.control_dependencies([check]):
return x * 2
g = tf.Graph()
with g.as_default(), self.test_session():
self.assertAllEqual(Foo(tf.constant(3.0)).eval(), 6.0)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"assertion failed.*-3"):
self.assertAllEqual(Foo(tf.constant(-3.0)).eval(), 6.0)
def testVar(self):
@function.Defun(tf.float32)
def Foo(x):
return x * x + 1
g = tf.Graph()
with g.as_default():
v = tf.Variable(tf.constant(10.0))
z = Foo(v)
with self.test_session(graph=g):
tf.initialize_all_variables().run()
self.assertAllEqual(z.eval(), 101.)
def testDefineErrors(self):
with tf.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "return at least one tensor"):
@function.Defun()
def NoResult():
pass
_ = NoResult.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def DefaultArg(unused_a=12):
return tf.constant([1])
_ = DefaultArg.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def KwArgs(**unused_kwargs):
return tf.constant([1])
_ = KwArgs.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun()
def PlusMinusV1(a, b):
return a + b, b - a
_ = PlusMinusV1.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(tf.float32)
def PlusMinusV2(a, b):
return a + b, b - a
_ = PlusMinusV2.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(tf.float32, tf.float32, tf.float32)
def PlusMinusV3(a, b):
return a + b, b - a
_ = PlusMinusV3.definition
def testCallErrors(self):
@function.Defun()
def Const():
return tf.constant(1)
@function.Defun(tf.int32)
def PlusOne(a):
return a + 1
@function.Defun(tf.int32, tf.int32)
def PlusMinus(a, b):
return a + b, b - a
with tf.Graph().as_default():
_ = Const()
# pylint: disable=too-many-function-args
# pylint: disable=unexpected-keyword-arg
# pylint: disable=no-value-for-parameter
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1)
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne()
_ = PlusOne(1)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus()
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus(1)
_ = PlusMinus(1, 2)
_ = PlusOne(1, name="p1")
with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"):
_ = PlusOne(1, device="/gpu:0")
def testDupDefinition(self):
@function.Defun(tf.float32)
def Foo(x):
return x + 1
@function.Defun(tf.float32, func_name="Foo")
def Bar(x):
return x + 1
@function.Defun(tf.float32, func_name="Foo")
def Baz(x):
return x + 2
with tf.Graph().as_default():
y = Foo(100.0)
z = Bar(100.0) # OK.
with self.test_session():
self.assertAllEqual(y.eval(), z.eval())
with self.assertRaisesRegexp(ValueError, "already defined"):
z = Baz(100.0)
def testFunctionDecorator(self):
@function.Defun(tf.float32)
def Minus1(b):
return b - 1.0
with tf.Graph().as_default():
call1 = Minus1([2.])
self.assertTrue(isinstance(Minus1, function._DefinedFunction))
self.assertEqual(Minus1.name, "Minus1")
# pylint: disable=unexpected-keyword-arg
call2 = Minus1(call1, name="next")
# pylint: enable=unexpected-keyword-arg
self.assertEquals("next", call2.op.name)
with tf.Session() as sess:
self.assertAllEqual([1], sess.run(call1))
self.assertAllEqual([0], sess.run(call2))
def testNestedFunction(self):
@function.Defun(tf.float32)
def Cube(x):
return x * x * x
@function.Defun(tf.float32, tf.float32)
def CubeXPlusY(x, y):
return Cube(x) + y
with tf.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
def testNestedDefinedFunction(self):
@function.Defun(tf.float32, tf.float32)
def CubeXPlusY(x, y):
@function.Defun(tf.float32)
def Cube(x):
return x * x * x
return Cube(x) + y
with tf.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
def testUnusedFunction(self):
invoked = False
# pylint: disable=unused-variable
@function.Defun()
def Unused():
invoked = True
return tf.constant(42.)
self.assertFalse(invoked)
g = tf.Graph()
with g.as_default():
@function.Defun()
def Unused2():
invoked = True
return tf.constant(7.)
tf.constant(3.)
# pylint: enable=unused-variable
self.assertFalse(invoked)
gdef = g.as_graph_def()
self.assertEquals(0, len(gdef.library.function))
def testReduction(self):
g = tf.Graph()
# BN0 is computing batch normed matrix along rows.
def BN0(x):
mean = tf.reduce_mean(x, [0])
var = tf.reduce_mean(tf.square(x - mean)) # biased var
rstd = tf.rsqrt(var + 1e-8)
return (x - mean) * rstd
# Wraps BatchNorm in a tf function.
@function.Defun(tf.float32)
def BN1(x):
return BN0(x)
with g.as_default():
x = tf.placeholder(tf.float32)
y0 = BN0(x) # A plain graph
y1 = BN1(x) # A tf function
dx0, = tf.gradients([y0], [x])
dx1, = tf.gradients([y1], [x])
# Both should produce the same result and gradient.
with self.test_session(graph=g) as sess:
vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))})
self.assertAllClose(vals[0], vals[1])
self.assertAllClose(vals[2], vals[3])
def testDeclareTypeMistake(self):
foo = function.Declare("Foo", [tf.float32], [tf.float32])
@function.Defun(tf.float32)
def Foo(x):
return x * x + 1
g = tf.Graph()
with g.as_default():
y = foo(2.0)
with self.test_session(graph=g):
with self.assertRaisesRegexp(tf.errors.NotFoundError, "not registered"):
_ = y.eval()
g = tf.Graph()
with g.as_default():
Foo.add_to_graph(g)
y = foo(2)
with self.test_session(graph=g):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"int32.*float"):
_ = y.eval()
g = tf.Graph()
with g.as_default():
Foo.add_to_graph(g)
with self.assertRaisesRegexp(
ValueError, "Expected number of arguments: 1, received: 2"):
_ = foo(2.0, 2.0)
g = tf.Graph()
with g.as_default():
Foo.add_to_graph(g)
y = foo(2.0)
with self.test_session(graph=g):
self.assertAllEqual(y.eval(), 5.0)
class UnrollLSTMTest(tf.test.TestCase):
BATCH_SIZE = 16
LSTM_DIMS = 32
NUM_UNROLL = 20
def _Weights(self):
dims = self.LSTM_DIMS
return tf.random_uniform([2 * dims, 4 * dims], -1, 1, seed=123456)
def _Input(self):
return tf.random_uniform(
[self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS], seed=654321)
# Helper to construct a LSTM cell graph.
@classmethod
def LSTMCell(cls, x, mprev, cprev, weights):
xm = tf.concat(1, [x, mprev])
i_i, i_g, f_g, o_g = tf.split(1, 4, tf.matmul(xm, weights))
new_c = tf.sigmoid(f_g) * cprev + tf.sigmoid(i_g) * tf.tanh(i_i)
new_c = tf.clip_by_value(new_c, -50.0, 50.0)
new_m = tf.sigmoid(o_g) * tf.tanh(new_c)
return new_m, new_c
def _BuildForward(self, weights, inp, mode="cell"):
def Loop(cell, w, i):
x = tf.unpack(i, self.NUM_UNROLL)
m = tf.zeros_like(x[0])
c = tf.zeros_like(x[0])
for i in range(self.NUM_UNROLL):
m, c = cell(x[i], m, c, w)
return m
cell = UnrollLSTMTest.LSTMCell
if mode == "complete":
# Constructs the complete graph in python.
return Loop(cell, weights, inp)
cell = function.Defun(tf.float32, tf.float32, tf.float32, tf.float32)(cell)
if mode == "cell":
# Just represent the LSTM as a function.
return Loop(cell, weights, inp)
if mode == "loop":
# Wraps the whole loop as a function.
@function.Defun(tf.float32, tf.float32)
def LSTMLoop(w, i):
return Loop(cell, w, i)
return LSTMLoop(weights, inp)
if mode == "loop10":
# Wraps 10 lstm steps into one function, and the whole loop
# into another calling the formers.
# Groups 10 steps at a time.
@function.Defun(tf.float32, tf.float32, tf.float32, *([tf.float32] * 10))
def Loop10(w, m, c, *args):
for x in args:
m, c = cell(x, m, c, w)
return m, c
@function.Defun(tf.float32, tf.float32)
def LSTMLoop10(weights, inp):
x = tf.unpack(inp, self.NUM_UNROLL)
m = tf.zeros_like(x[0])
c = tf.zeros_like(x[0])
assert self.NUM_UNROLL % 10 == 0
for i in range(0, self.NUM_UNROLL, 10):
m, c = Loop10(weights, m, c, *x[i:i + 10])
return m
return LSTMLoop10(weights, inp)
def testUnrollLSTM(self):
# Run one step of the unrolled lstm graph.
def RunForward(mode, cfg=None):
print("mode = ", mode)
g = tf.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
gdef = g.as_graph_def()
finish = time.time()
print("time: ", finish - start, " txt size: ", len(str(gdef)),
"gdef bin size: ", len(gdef.SerializeToString()))
with g.as_default(), tf.Session(config=cfg) as sess:
return sess.run(m)
mv0 = RunForward("complete")
for cfg in _OptimizerOptions():
print("cfg = ", cfg)
mv1 = RunForward("cell", cfg)
mv2 = RunForward("loop", cfg)
mv3 = RunForward("loop10", cfg)
self.assertAllClose(mv0, mv1, rtol=1e-4)
self.assertAllClose(mv0, mv2, rtol=1e-4)
self.assertAllClose(mv0, mv3, rtol=1e-4)
def testUnrollLSTMGrad(self):
# Run one step of the unrolled lstm graph.
def RunForwardBackward(mode, cfg=None):
print("mode = ", mode)
g = tf.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
loss = tf.reduce_sum(tf.square(m))
dw = tf.gradients([loss], [weights])
gdef = g.as_graph_def()
finish = time.time()
print("time: ", finish - start, " txt size: ", len(str(gdef)),
"gdef bin size: ", len(gdef.SerializeToString()))
with g.as_default(), tf.Session(config=cfg) as sess:
return sess.run(dw)
d0 = RunForwardBackward("complete")
for cfg in _OptimizerOptions():
print("cfg = ", cfg)
d1 = RunForwardBackward("cell", cfg)
d2 = RunForwardBackward("loop", cfg)
d3 = RunForwardBackward("loop10", cfg)
self.assertAllClose(d0, d1, rtol=1e-4)
self.assertAllClose(d0, d2, rtol=1e-4)
self.assertAllClose(d0, d3, rtol=1e-4)
class FunctionInlineControlTest(tf.test.TestCase):
def testFoo(self):
dtype = tf.float32
cfg = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
for noinline in [False, True]:
# pylint: disable=unexpected-keyword-arg
@function.Defun(dtype, noinline=noinline)
def Cell(v):
# If v is a vector [n, 1], x is a big square matrix.
x = tf.tanh(v + tf.transpose(v, [1, 0]))
return tf.reduce_sum(x, 1, keep_dims=True)
@function.Defun(dtype)
def Forward(x):
for _ in range(10):
# pylint: disable=cell-var-from-loop
x = Cell(x)
return tf.reduce_sum(x, [0, 1])
g = tf.Graph()
with g.as_default():
x = tf.placeholder(dtype)
y = Forward(x)
dx, = tf.gradients([y], [x])
np.random.seed(321)
inp = np.random.uniform(-1, 1, [16, 1]).astype(np.float32)
with tf.Session(graph=g, config=cfg) as sess:
ans = sess.run([y, dx], {x: inp})
print(ans[0], np.sum(ans[1]))
self.assertAllClose(ans[0], 255.971, rtol=1e-3)
self.assertAllClose(np.sum(ans[1]), 13.0408, rtol=1e-3)
@function.Defun(*[tf.float32] * 3)
def Linear(w, b, x):
return tf.nn.relu(tf.matmul(x, w) + b)
@function.Defun(*[tf.float32] * 5)
def Linear2(w1, b1, w2, b2, x):
return Linear(w2, b2, Linear(w1, b1, x))
class ModuleFunctionTest(tf.test.TestCase):
def testBasic(self):
with tf.Graph().as_default():
a, b, c, d, e = [tf.constant([[_]], dtype=tf.float32) for _ in range(5)]
y = Linear(a, b, c)
z = Linear2(a, b, c, d, e)
with tf.Session() as sess:
self.assertAllEqual([[1]], sess.run(y))
self.assertAllEqual([[5]], sess.run(z))
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.transpose",
"tensorflow.tanh",
"tensorflow.reduce_sum",
"tensorflow.gradients",
"numpy.array",
"six.moves.xrange",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.Graph",
"tensorflow.Print",
"tensorflow.placeholder",
"tensorflow.S... | [((23280, 23315), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['*([tf.float32] * 3)'], {}), '(*([tf.float32] * 3))\n', (23294, 23315), False, 'from tensorflow.python.framework import function\n'), ((23379, 23414), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['*([tf.float32] * 5)'], {}), '(*([tf.float32] * 5))\n', (23393, 23414), False, 'from tensorflow.python.framework import function\n'), ((23890, 23904), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (23902, 23904), True, 'import tensorflow as tf\n'), ((1699, 1709), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1707, 1709), True, 'import tensorflow as tf\n'), ((1878, 1888), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1886, 1888), True, 'import tensorflow as tf\n'), ((2185, 2250), 'tensorflow.python.framework.function._graph_to_function_def', 'function._graph_to_function_def', (['foo', '"""foo"""', '[a, b, c]', '[u, v, w]'], {}), "(foo, 'foo', [a, b, c], [u, v, w])\n", (2216, 2250), False, 'from tensorflow.python.framework import function\n'), ((4251, 4289), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32', 'tf.float32'], {}), '(tf.float32, tf.float32)\n', (4265, 4289), False, 'from tensorflow.python.framework import function\n'), ((4576, 4632), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {'func_name': '"""XSquarePlusOneFn"""'}), "(tf.float32, func_name='XSquarePlusOneFn')\n", (4590, 4632), False, 'from tensorflow.python.framework import function\n'), ((4691, 4729), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32', 'tf.float32'], {}), '(tf.float32, tf.float32)\n', (4705, 4729), False, 'from tensorflow.python.framework import function\n'), ((4913, 4923), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4921, 4923), True, 'import tensorflow as tf\n'), ((5210, 5236), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (5224, 5236), False, 'from tensorflow.python.framework import function\n'), ((5305, 5315), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5313, 5315), True, 'import tensorflow as tf\n'), ((5444, 5486), 'numpy.array', 'np.array', (['[-1, 1, 2, -2]'], {'dtype': 'np.float32'}), '([-1, 1, 2, -2], dtype=np.float32)\n', (5452, 5486), True, 'import numpy as np\n'), ((5887, 5922), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['dtype', 'dtype', 'dtype'], {}), '(dtype, dtype, dtype)\n', (5901, 5922), False, 'from tensorflow.python.framework import function\n'), ((6204, 6256), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['dtype', 'dtype'], {'grad_func': 'XentLossGrad'}), '(dtype, dtype, grad_func=XentLossGrad)\n', (6218, 6256), False, 'from tensorflow.python.framework import function\n'), ((6370, 6380), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6378, 6380), True, 'import tensorflow as tf\n'), ((7048, 7083), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['dtype', 'dtype', 'dtype'], {}), '(dtype, dtype, dtype)\n', (7062, 7083), False, 'from tensorflow.python.framework import function\n'), ((7178, 7215), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['dtype'], {'grad_func': 'Grad'}), '(dtype, grad_func=Grad)\n', (7192, 7215), False, 'from tensorflow.python.framework import function\n'), ((7263, 7273), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (7271, 7273), True, 'import tensorflow as tf\n'), ((7730, 7740), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (7738, 7740), True, 'import tensorflow as tf\n'), ((8373, 8411), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32', 'tf.float32'], {}), '(tf.float32, tf.float32)\n', (8387, 8411), False, 'from tensorflow.python.framework import function\n'), ((8868, 8884), 'tensorflow.python.framework.function.Defun', 'function.Defun', ([], {}), '()\n', (8882, 8884), False, 'from tensorflow.python.framework import function\n'), ((9176, 9202), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (9190, 9202), False, 'from tensorflow.python.framework import function\n'), ((9710, 9736), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (9724, 9736), False, 'from tensorflow.python.framework import function\n'), ((10075, 10101), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (10089, 10101), False, 'from tensorflow.python.framework import function\n'), ((10254, 10264), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10262, 10264), True, 'import tensorflow as tf\n'), ((10591, 10617), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (10605, 10617), False, 'from tensorflow.python.framework import function\n'), ((10666, 10676), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10674, 10676), True, 'import tensorflow as tf\n'), ((12194, 12210), 'tensorflow.python.framework.function.Defun', 'function.Defun', ([], {}), '()\n', (12208, 12210), False, 'from tensorflow.python.framework import function\n'), ((12262, 12286), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.int32'], {}), '(tf.int32)\n', (12276, 12286), False, 'from tensorflow.python.framework import function\n'), ((12332, 12366), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.int32', 'tf.int32'], {}), '(tf.int32, tf.int32)\n', (12346, 12366), False, 'from tensorflow.python.framework import function\n'), ((13376, 13402), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (13390, 13402), False, 'from tensorflow.python.framework import function\n'), ((13444, 13487), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {'func_name': '"""Foo"""'}), "(tf.float32, func_name='Foo')\n", (13458, 13487), False, 'from tensorflow.python.framework import function\n'), ((13529, 13572), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {'func_name': '"""Foo"""'}), "(tf.float32, func_name='Foo')\n", (13543, 13572), False, 'from tensorflow.python.framework import function\n'), ((13904, 13930), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (13918, 13930), False, 'from tensorflow.python.framework import function\n'), ((14501, 14527), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (14515, 14527), False, 'from tensorflow.python.framework import function\n'), ((14574, 14612), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32', 'tf.float32'], {}), '(tf.float32, tf.float32)\n', (14588, 14612), False, 'from tensorflow.python.framework import function\n'), ((14853, 14891), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32', 'tf.float32'], {}), '(tf.float32, tf.float32)\n', (14867, 14891), False, 'from tensorflow.python.framework import function\n'), ((15262, 15278), 'tensorflow.python.framework.function.Defun', 'function.Defun', ([], {}), '()\n', (15276, 15278), False, 'from tensorflow.python.framework import function\n'), ((15387, 15397), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (15395, 15397), True, 'import tensorflow as tf\n'), ((15730, 15740), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (15738, 15740), True, 'import tensorflow as tf\n'), ((16022, 16048), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (16036, 16048), False, 'from tensorflow.python.framework import function\n'), ((16608, 16659), 'tensorflow.python.framework.function.Declare', 'function.Declare', (['"""Foo"""', '[tf.float32]', '[tf.float32]'], {}), "('Foo', [tf.float32], [tf.float32])\n", (16624, 16659), False, 'from tensorflow.python.framework import function\n'), ((16666, 16692), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (16680, 16692), False, 'from tensorflow.python.framework import function\n'), ((16741, 16751), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (16749, 16751), True, 'import tensorflow as tf\n'), ((16948, 16958), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (16956, 16958), True, 'import tensorflow as tf\n'), ((17221, 17231), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (17229, 17231), True, 'import tensorflow as tf\n'), ((17425, 17435), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (17433, 17435), True, 'import tensorflow as tf\n'), ((17743, 17802), 'tensorflow.random_uniform', 'tf.random_uniform', (['[2 * dims, 4 * dims]', '(-1)', '(1)'], {'seed': '(123456)'}), '([2 * dims, 4 * dims], -1, 1, seed=123456)\n', (17760, 17802), True, 'import tensorflow as tf\n'), ((17835, 17922), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS]'], {'seed': '(654321)'}), '([self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS], seed=\n 654321)\n', (17852, 17922), True, 'import tensorflow as tf\n'), ((18042, 18066), 'tensorflow.concat', 'tf.concat', (['(1)', '[x, mprev]'], {}), '(1, [x, mprev])\n', (18051, 18066), True, 'import tensorflow as tf\n'), ((18212, 18248), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['new_c', '(-50.0)', '(50.0)'], {}), '(new_c, -50.0, 50.0)\n', (18228, 18248), True, 'import tensorflow as tf\n'), ((1926, 1962), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""a"""'}), "(tf.float32, name='a')\n", (1940, 1962), True, 'import tensorflow as tf\n'), ((1973, 2009), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""b"""'}), "(tf.float32, name='b')\n", (1987, 2009), True, 'import tensorflow as tf\n'), ((2020, 2056), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""c"""'}), "(tf.float32, name='c')\n", (2034, 2056), True, 'import tensorflow as tf\n'), ((2114, 2136), 'tensorflow.square', 'tf.square', (['u'], {'name': '"""v"""'}), "(u, name='v')\n", (2123, 2136), True, 'import tensorflow as tf\n'), ((2147, 2173), 'tensorflow.add_n', 'tf.add_n', (['[u, v]'], {'name': '"""w"""'}), "([u, v], name='w')\n", (2155, 2173), True, 'import tensorflow as tf\n'), ((2668, 2680), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2678, 2680), True, 'import tensorflow as tf\n'), ((3550, 3560), 'six.moves.xrange', 'xrange', (['(10)'], {}), '(10)\n', (3556, 3560), False, 'from six.moves import xrange\n'), ((4776, 4881), 'tensorflow.python.ops.functional_ops._symbolic_gradient', 'functional_ops._symbolic_gradient', ([], {'input': '[x, dy]', 'Tout': '[tf.float32]', 'f': '"""XSquarePlusOneFn"""', 'name': '"""dx"""'}), "(input=[x, dy], Tout=[tf.float32], f=\n 'XSquarePlusOneFn', name='dx')\n", (4809, 4881), False, 'from tensorflow.python.ops import functional_ops\n'), ((5351, 5377), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (5365, 5377), True, 'import tensorflow as tf\n'), ((5410, 5432), 'tensorflow.gradients', 'tf.gradients', (['[y]', '[x]'], {}), '([y], [x])\n', (5422, 5432), True, 'import tensorflow as tf\n'), ((5694, 5725), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g', 'config': 'cfg'}), '(graph=g, config=cfg)\n', (5704, 5725), True, 'import tensorflow as tf\n'), ((6062, 6083), 'tensorflow.zeros_like', 'tf.zeros_like', (['labels'], {}), '(labels)\n', (6075, 6083), True, 'import tensorflow as tf\n'), ((6421, 6442), 'tensorflow.placeholder', 'tf.placeholder', (['dtype'], {}), '(dtype)\n', (6435, 6442), True, 'import tensorflow as tf\n'), ((6458, 6479), 'tensorflow.placeholder', 'tf.placeholder', (['dtype'], {}), '(dtype)\n', (6472, 6479), True, 'import tensorflow as tf\n'), ((6534, 6564), 'tensorflow.gradients', 'tf.gradients', (['[loss]', '[logits]'], {}), '([loss], [logits])\n', (6546, 6564), True, 'import tensorflow as tf\n'), ((6646, 6655), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (6652, 6655), True, 'import numpy as np\n'), ((7311, 7332), 'tensorflow.placeholder', 'tf.placeholder', (['dtype'], {}), '(dtype)\n', (7325, 7332), True, 'import tensorflow as tf\n'), ((7381, 7405), 'tensorflow.gradients', 'tf.gradients', (['out', '[inp]'], {}), '(out, [inp])\n', (7393, 7405), True, 'import tensorflow as tf\n'), ((7485, 7504), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (7495, 7504), True, 'import tensorflow as tf\n'), ((7776, 7811), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[25, 4]'], {}), '(tf.float32, [25, 4])\n', (7790, 7811), True, 'import tensorflow as tf\n'), ((7822, 7860), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[200, 100]'], {}), '(tf.float32, [200, 100])\n', (7836, 7860), True, 'import tensorflow as tf\n'), ((7872, 7903), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1]'], {}), '(tf.float32, [1])\n', (7886, 7903), True, 'import tensorflow as tf\n'), ((8134, 8221), 'tensorflow.python.ops.functional_ops._symbolic_gradient', 'functional_ops._symbolic_gradient', ([], {'input': '[x, y, dz]', 'Tout': '([tf.float32] * 2)', 'f': '"""Foo"""'}), "(input=[x, y, dz], Tout=[tf.float32] * 2,\n f='Foo')\n", (8167, 8221), False, 'from tensorflow.python.ops import functional_ops\n'), ((8559, 8575), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {}), '(1.0)\n', (8570, 8575), True, 'import tensorflow as tf\n'), ((8586, 8602), 'tensorflow.constant', 'tf.constant', (['(2.0)'], {}), '(2.0)\n', (8597, 8602), True, 'import tensorflow as tf\n'), ((8638, 8663), 'tensorflow.gradients', 'tf.gradients', (['[z]', '[x, y]'], {}), '([z], [x, y])\n', (8650, 8663), True, 'import tensorflow as tf\n'), ((8919, 8936), 'tensorflow.constant', 'tf.constant', (['[42]'], {}), '([42])\n', (8930, 8936), True, 'import tensorflow as tf\n'), ((9763, 9788), 'tensorflow.Print', 'tf.Print', (['x', '[x]', '"""Hello"""'], {}), "(x, [x], 'Hello')\n", (9771, 9788), True, 'import tensorflow as tf\n'), ((12241, 12255), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (12252, 12255), True, 'import tensorflow as tf\n'), ((14926, 14952), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (14940, 14952), False, 'from tensorflow.python.framework import function\n'), ((15331, 15348), 'tensorflow.constant', 'tf.constant', (['(42.0)'], {}), '(42.0)\n', (15342, 15348), True, 'import tensorflow as tf\n'), ((15431, 15447), 'tensorflow.python.framework.function.Defun', 'function.Defun', ([], {}), '()\n', (15445, 15447), False, 'from tensorflow.python.framework import function\n'), ((15530, 15546), 'tensorflow.constant', 'tf.constant', (['(3.0)'], {}), '(3.0)\n', (15541, 15546), True, 'import tensorflow as tf\n'), ((15826, 15848), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x', '[0]'], {}), '(x, [0])\n', (15840, 15848), True, 'import tensorflow as tf\n'), ((15924, 15945), 'tensorflow.rsqrt', 'tf.rsqrt', (['(var + 1e-08)'], {}), '(var + 1e-08)\n', (15932, 15945), True, 'import tensorflow as tf\n'), ((16121, 16147), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (16135, 16147), True, 'import tensorflow as tf\n'), ((16231, 16254), 'tensorflow.gradients', 'tf.gradients', (['[y0]', '[x]'], {}), '([y0], [x])\n', (16243, 16254), True, 'import tensorflow as tf\n'), ((16268, 16291), 'tensorflow.gradients', 'tf.gradients', (['[y1]', '[x]'], {}), '([y1], [x])\n', (16280, 16291), True, 'import tensorflow as tf\n'), ((18107, 18129), 'tensorflow.matmul', 'tf.matmul', (['xm', 'weights'], {}), '(xm, weights)\n', (18116, 18129), True, 'import tensorflow as tf\n'), ((18261, 18276), 'tensorflow.sigmoid', 'tf.sigmoid', (['o_g'], {}), '(o_g)\n', (18271, 18276), True, 'import tensorflow as tf\n'), ((18279, 18293), 'tensorflow.tanh', 'tf.tanh', (['new_c'], {}), '(new_c)\n', (18286, 18293), True, 'import tensorflow as tf\n'), ((18410, 18439), 'tensorflow.unpack', 'tf.unpack', (['i', 'self.NUM_UNROLL'], {}), '(i, self.NUM_UNROLL)\n', (18419, 18439), True, 'import tensorflow as tf\n'), ((18450, 18469), 'tensorflow.zeros_like', 'tf.zeros_like', (['x[0]'], {}), '(x[0])\n', (18463, 18469), True, 'import tensorflow as tf\n'), ((18480, 18499), 'tensorflow.zeros_like', 'tf.zeros_like', (['x[0]'], {}), '(x[0])\n', (18493, 18499), True, 'import tensorflow as tf\n'), ((18751, 18813), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32', 'tf.float32', 'tf.float32', 'tf.float32'], {}), '(tf.float32, tf.float32, tf.float32, tf.float32)\n', (18765, 18813), False, 'from tensorflow.python.framework import function\n'), ((19003, 19041), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32', 'tf.float32'], {}), '(tf.float32, tf.float32)\n', (19017, 19041), False, 'from tensorflow.python.framework import function\n'), ((19314, 19386), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32', 'tf.float32', 'tf.float32', '*([tf.float32] * 10)'], {}), '(tf.float32, tf.float32, tf.float32, *([tf.float32] * 10))\n', (19328, 19386), False, 'from tensorflow.python.framework import function\n'), ((19506, 19544), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32', 'tf.float32'], {}), '(tf.float32, tf.float32)\n', (19520, 19544), False, 'from tensorflow.python.framework import function\n'), ((20038, 20048), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (20046, 20048), True, 'import tensorflow as tf\n'), ((20063, 20074), 'time.time', 'time.time', ([], {}), '()\n', (20072, 20074), False, 'import time\n'), ((20260, 20271), 'time.time', 'time.time', ([], {}), '()\n', (20269, 20271), False, 'import time\n'), ((21000, 21010), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (21008, 21010), True, 'import tensorflow as tf\n'), ((21025, 21036), 'time.time', 'time.time', ([], {}), '()\n', (21034, 21036), False, 'import time\n'), ((21310, 21321), 'time.time', 'time.time', ([], {}), '()\n', (21319, 21321), False, 'import time\n'), ((22380, 22420), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['dtype'], {'noinline': 'noinline'}), '(dtype, noinline=noinline)\n', (22394, 22420), False, 'from tensorflow.python.framework import function\n'), ((22609, 22630), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['dtype'], {}), '(dtype)\n', (22623, 22630), False, 'from tensorflow.python.framework import function\n'), ((22801, 22811), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (22809, 22811), True, 'import tensorflow as tf\n'), ((22940, 22959), 'numpy.random.seed', 'np.random.seed', (['(321)'], {}), '(321)\n', (22954, 22959), True, 'import numpy as np\n'), ((23355, 23370), 'tensorflow.matmul', 'tf.matmul', (['x', 'w'], {}), '(x, w)\n', (23364, 23370), True, 'import tensorflow as tf\n'), ((2074, 2089), 'tensorflow.matmul', 'tf.matmul', (['a', 'b'], {}), '(a, b)\n', (2083, 2089), True, 'import tensorflow as tf\n'), ((2422, 2447), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2445, 2447), False, 'import collections\n'), ((4466, 4478), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4476, 4478), True, 'import tensorflow as tf\n'), ((5046, 5058), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5056, 5058), True, 'import tensorflow as tf\n'), ((5284, 5294), 'tensorflow.tanh', 'tf.tanh', (['x'], {}), '(x)\n', (5291, 5294), True, 'import tensorflow as tf\n'), ((5984, 6010), 'tensorflow.reshape', 'tf.reshape', (['dloss', '[-1, 1]'], {}), '(dloss, [-1, 1])\n', (5994, 6010), True, 'import tensorflow as tf\n'), ((6173, 6188), 'tensorflow.exp', 'tf.exp', (['dlogits'], {}), '(dlogits)\n', (6179, 6188), True, 'import tensorflow as tf\n'), ((6574, 6617), 'numpy.random.uniform', 'np.random.uniform', (['(-10.0)', '(10.0)'], {'size': '(4, 9)'}), '(-10.0, 10.0, size=(4, 9))\n', (6591, 6617), True, 'import numpy as np\n'), ((6665, 6674), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (6671, 6674), True, 'import numpy as np\n'), ((6699, 6742), 'numpy.random.uniform', 'np.random.uniform', (['(-10.0)', '(10.0)'], {'size': '(4, 9)'}), '(-10.0, 10.0, size=(4, 9))\n', (6716, 6742), True, 'import numpy as np\n'), ((6834, 6865), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g', 'config': 'cfg'}), '(graph=g, config=cfg)\n', (6844, 6865), True, 'import tensorflow as tf\n'), ((6963, 6979), 'numpy.exp', 'np.exp', (['(prob - y)'], {}), '(prob - y)\n', (6969, 6979), True, 'import numpy as np\n'), ((7415, 7458), 'numpy.random.uniform', 'np.random.uniform', (['(-10.0)', '(10.0)'], {'size': '(4, 9)'}), '(-10.0, 10.0, size=(4, 9))\n', (7432, 7458), True, 'import numpy as np\n'), ((8675, 8687), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8685, 8687), True, 'import tensorflow as tf\n'), ((9060, 9072), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9070, 9072), True, 'import tensorflow as tf\n'), ((9560, 9579), 'tensorflow.name_scope', 'tf.name_scope', (['"""my"""'], {}), "('my')\n", (9573, 9579), True, 'import tensorflow as tf\n'), ((9800, 9828), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[y]'], {}), '([y])\n', (9823, 9828), True, 'import tensorflow as tf\n'), ((9842, 9852), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (9850, 9852), True, 'import tensorflow as tf\n'), ((9864, 9892), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[z]'], {}), '([z])\n', (9887, 9892), True, 'import tensorflow as tf\n'), ((9985, 10001), 'tensorflow.constant', 'tf.constant', (['(3.0)'], {}), '(3.0)\n', (9996, 10001), True, 'import tensorflow as tf\n'), ((10156, 10172), 'tensorflow.greater', 'tf.greater', (['x', '(0)'], {}), '(x, 0)\n', (10166, 10172), True, 'import tensorflow as tf\n'), ((10190, 10222), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[check]'], {}), '([check])\n', (10213, 10222), True, 'import tensorflow as tf\n'), ((10724, 10741), 'tensorflow.constant', 'tf.constant', (['(10.0)'], {}), '(10.0)\n', (10735, 10741), True, 'import tensorflow as tf\n'), ((11035, 11051), 'tensorflow.python.framework.function.Defun', 'function.Defun', ([], {}), '()\n', (11049, 11051), False, 'from tensorflow.python.framework import function\n'), ((11203, 11219), 'tensorflow.python.framework.function.Defun', 'function.Defun', ([], {}), '()\n', (11217, 11219), False, 'from tensorflow.python.framework import function\n'), ((11405, 11421), 'tensorflow.python.framework.function.Defun', 'function.Defun', ([], {}), '()\n', (11419, 11421), False, 'from tensorflow.python.framework import function\n'), ((11607, 11623), 'tensorflow.python.framework.function.Defun', 'function.Defun', ([], {}), '()\n', (11621, 11623), False, 'from tensorflow.python.framework import function\n'), ((11804, 11830), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (11818, 11830), False, 'from tensorflow.python.framework import function\n'), ((12011, 12061), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32', 'tf.float32', 'tf.float32'], {}), '(tf.float32, tf.float32, tf.float32)\n', (12025, 12061), False, 'from tensorflow.python.framework import function\n'), ((14340, 14352), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (14350, 14352), True, 'import tensorflow as tf\n'), ((15507, 15523), 'tensorflow.constant', 'tf.constant', (['(7.0)'], {}), '(7.0)\n', (15518, 15523), True, 'import tensorflow as tf\n'), ((15876, 15895), 'tensorflow.square', 'tf.square', (['(x - mean)'], {}), '(x - mean)\n', (15885, 15895), True, 'import tensorflow as tf\n'), ((18143, 18158), 'tensorflow.sigmoid', 'tf.sigmoid', (['f_g'], {}), '(f_g)\n', (18153, 18158), True, 'import tensorflow as tf\n'), ((18169, 18184), 'tensorflow.sigmoid', 'tf.sigmoid', (['i_g'], {}), '(i_g)\n', (18179, 18184), True, 'import tensorflow as tf\n'), ((18187, 18199), 'tensorflow.tanh', 'tf.tanh', (['i_i'], {}), '(i_i)\n', (18194, 18199), True, 'import tensorflow as tf\n'), ((19593, 19624), 'tensorflow.unpack', 'tf.unpack', (['inp', 'self.NUM_UNROLL'], {}), '(inp, self.NUM_UNROLL)\n', (19602, 19624), True, 'import tensorflow as tf\n'), ((19637, 19656), 'tensorflow.zeros_like', 'tf.zeros_like', (['x[0]'], {}), '(x[0])\n', (19650, 19656), True, 'import tensorflow as tf\n'), ((19669, 19688), 'tensorflow.zeros_like', 'tf.zeros_like', (['x[0]'], {}), '(x[0])\n', (19682, 19688), True, 'import tensorflow as tf\n'), ((20430, 20452), 'tensorflow.Session', 'tf.Session', ([], {'config': 'cfg'}), '(config=cfg)\n', (20440, 20452), True, 'import tensorflow as tf\n'), ((21233, 21264), 'tensorflow.gradients', 'tf.gradients', (['[loss]', '[weights]'], {}), '([loss], [weights])\n', (21245, 21264), True, 'import tensorflow as tf\n'), ((21480, 21502), 'tensorflow.Session', 'tf.Session', ([], {'config': 'cfg'}), '(config=cfg)\n', (21490, 21502), True, 'import tensorflow as tf\n'), ((22565, 22600), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x', '(1)'], {'keep_dims': '(True)'}), '(x, 1, keep_dims=True)\n', (22578, 22600), True, 'import tensorflow as tf\n'), ((22765, 22789), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x', '[0, 1]'], {}), '(x, [0, 1])\n', (22778, 22789), True, 'import tensorflow as tf\n'), ((22851, 22872), 'tensorflow.placeholder', 'tf.placeholder', (['dtype'], {}), '(dtype)\n', (22865, 22872), True, 'import tensorflow as tf\n'), ((22910, 22932), 'tensorflow.gradients', 'tf.gradients', (['[y]', '[x]'], {}), '([y], [x])\n', (22922, 22932), True, 'import tensorflow as tf\n'), ((23036, 23067), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g', 'config': 'cfg'}), '(graph=g, config=cfg)\n', (23046, 23067), True, 'import tensorflow as tf\n'), ((23615, 23651), 'tensorflow.constant', 'tf.constant', (['[[_]]'], {'dtype': 'tf.float32'}), '([[_]], dtype=tf.float32)\n', (23626, 23651), True, 'import tensorflow as tf\n'), ((23741, 23753), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (23751, 23753), True, 'import tensorflow as tf\n'), ((4346, 4356), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4354, 4356), True, 'import tensorflow as tf\n'), ((5805, 5817), 'numpy.tanh', 'np.tanh', (['inp'], {}), '(inp)\n', (5812, 5817), True, 'import numpy as np\n'), ((6014, 6035), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (6027, 6035), True, 'import tensorflow as tf\n'), ((8495, 8505), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8503, 8505), True, 'import tensorflow as tf\n'), ((8947, 8957), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8955, 8957), True, 'import tensorflow as tf\n'), ((9248, 9258), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9256, 9258), True, 'import tensorflow as tf\n'), ((9925, 9935), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9933, 9935), True, 'import tensorflow as tf\n'), ((10804, 10833), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (10831, 10833), True, 'import tensorflow as tf\n'), ((10922, 10932), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10930, 10932), True, 'import tensorflow as tf\n'), ((11274, 11290), 'tensorflow.constant', 'tf.constant', (['[1]'], {}), '([1])\n', (11285, 11290), True, 'import tensorflow as tf\n'), ((11476, 11492), 'tensorflow.constant', 'tf.constant', (['[1]'], {}), '([1])\n', (11487, 11492), True, 'import tensorflow as tf\n'), ((12428, 12438), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (12436, 12438), True, 'import tensorflow as tf\n'), ((13618, 13628), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (13626, 13628), True, 'import tensorflow as tf\n'), ((13981, 13991), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (13989, 13991), True, 'import tensorflow as tf\n'), ((14674, 14684), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (14682, 14684), True, 'import tensorflow as tf\n'), ((15033, 15043), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (15041, 15043), True, 'import tensorflow as tf\n'), ((16440, 16470), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3, 7)'}), '(size=(3, 7))\n', (16457, 16470), True, 'import numpy as np\n'), ((21206, 21218), 'tensorflow.square', 'tf.square', (['m'], {}), '(m)\n', (21215, 21218), True, 'import tensorflow as tf\n'), ((22972, 23005), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '[16, 1]'], {}), '(-1, 1, [16, 1])\n', (22989, 23005), True, 'import numpy as np\n'), ((23141, 23155), 'numpy.sum', 'np.sum', (['ans[1]'], {}), '(ans[1])\n', (23147, 23155), True, 'import numpy as np\n'), ((23241, 23255), 'numpy.sum', 'np.sum', (['ans[1]'], {}), '(ans[1])\n', (23247, 23255), True, 'import numpy as np\n'), ((23567, 23577), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (23575, 23577), True, 'import tensorflow as tf\n'), ((1619, 1632), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (1627, 1632), True, 'import numpy as np\n'), ((5589, 5674), 'tensorflow.OptimizerOptions', 'tf.OptimizerOptions', ([], {'opt_level': 'tf.OptimizerOptions.L1', 'do_function_inlining': '(True)'}), '(opt_level=tf.OptimizerOptions.L1, do_function_inlining=True\n )\n', (5608, 5674), True, 'import tensorflow as tf\n'), ((6334, 6355), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (6347, 6355), True, 'import tensorflow as tf\n'), ((22090, 22246), 'tensorflow.OptimizerOptions', 'tf.OptimizerOptions', ([], {'opt_level': 'tf.OptimizerOptions.L0', 'do_common_subexpression_elimination': '(True)', 'do_function_inlining': '(True)', 'do_constant_folding': '(True)'}), '(opt_level=tf.OptimizerOptions.L0,\n do_common_subexpression_elimination=True, do_function_inlining=True,\n do_constant_folding=True)\n', (22109, 22246), True, 'import tensorflow as tf\n'), ((22525, 22548), 'tensorflow.transpose', 'tf.transpose', (['v', '[1, 0]'], {}), '(v, [1, 0])\n', (22537, 22548), True, 'import tensorflow as tf\n'), ((10341, 10357), 'tensorflow.constant', 'tf.constant', (['(3.0)'], {}), '(3.0)\n', (10352, 10357), True, 'import tensorflow as tf\n'), ((10531, 10548), 'tensorflow.constant', 'tf.constant', (['(-3.0)'], {}), '(-3.0)\n', (10542, 10548), True, 'import tensorflow as tf\n'), ((1328, 1486), 'tensorflow.OptimizerOptions', 'tf.OptimizerOptions', ([], {'opt_level': 'tf.OptimizerOptions.L0', 'do_common_subexpression_elimination': 'cse', 'do_function_inlining': 'inline', 'do_constant_folding': 'cfold'}), '(opt_level=tf.OptimizerOptions.L0,\n do_common_subexpression_elimination=cse, do_function_inlining=inline,\n do_constant_folding=cfold)\n', (1347, 1486), True, 'import tensorflow as tf\n')] |
import numpy as np
import math
from scipy.spatial import KDTree
from typing import Tuple
class Grid():
def __init__(self, x_max, y_max, tile_size):
"""
Initiaise a grid
:param x_max:
:param y_max:
:param tile_size:
"""
# generate the indicies for our grid size
# the indicies are the positions of our grid
y_mesh, x_mesh = np.indices((y_max, x_max))
# we want to store the center value in pixels for
# our tiles
self.tile_size = tile_size
self.half_tile_size = (self.tile_size / 2.0)
x_mesh = ((x_mesh + 1) * self.tile_size) - self.half_tile_size
y_mesh = ((y_mesh + 1) * self.tile_size) - self.half_tile_size
# let's persist center positions
pixel_center_positions = np.squeeze(np.dstack([y_mesh.ravel(), x_mesh.ravel()]))
# property `map_pixel_center_positions` is for assisting human lookups of the grid
self.map_pixel_center_positions = np.squeeze(np.reshape(pixel_center_positions, (y_max, x_max, 2)))
# property `flat_pixel_positions` is for assisting computer lookup of the grid and
# reverse lookups for pixels to positions
flat_pixel_positions = pixel_center_positions.flatten()
self.flat_pixel_positions = np.reshape(flat_pixel_positions, (int(len(flat_pixel_positions) / 2), -1))
# for data storage in our grid - initialised to 0s
# TODO: how to handle many things at a grid position?
# perhaps a dictionary?
self.data = np.zeros(y_max * x_max)
# to find a position based on pixel position we'll use the scipy.spatial.KDTree data type
self.tree = KDTree(self.flat_pixel_positions)
# let's keep the last row, column values to minimise repeated lookups
self.last_x = None
self.last_y = None
# let's keep the last x and y distances
self.last_x_distances = None
self.last_y_distances = None
def __getitem__(self, item: Tuple[int, int]):
"""
Get the data stored nearest to x, y
:param item: Tuple[int, int]
:return:
"""
x = item[0]
y = item[1]
query_result = self.query_tree(x, y, k =1, distance_upper_bound = self.tile_size)
if not query_result:
raise ValueError(f"Pixel positions not found in grid! {x}, {y}")
# query_result[0] - The distances to the nearest neighbour
# query_result[1] - The locations of the neighbours
return self.data[query_result[1]]
def __setitem__(self, key: Tuple[int, int], value):
"""
Set the data stored nearest to x, y
:param key:
:param value:
:return:
"""
x = key[0]
y = key[1]
query_result = self.query_tree(x, y)
if not query_result:
raise ValueError(f"Pixel positions not found in grid! {x}, {y}")
# query_result[0] - The distances to the nearest neighbour
# query_result[1] - The locations of the neighbours
self.data[query_result[1]] = value
return
def __sub__(self, item):
"""
Remove all occurances of an item from the data layer
:param item:
:return:
"""
matches = np.where(self.data == item)
self.data[matches] = 0.0
def __add__(self, other: Tuple[int, int, int]):
"""
Add to data layer, expected to be a Tuple
:param other: (data_to_be_addded, at_x, at_y)
:return:
"""
self.__setitem__((other[1], other[2]), other[0])
def convert_position_to_pixels(self, row, column):
"""
Convert a row, column to y, x (respective) values
:param row:
:param column:
:return:
"""
x = (column + 1) * self.half_tile_size
y = (row + 1) * self.half_tile_size
return x, y
def get_pixel_center(self, row, column):
"""
Get the pixel enter of a given grid position
:param row:
:param column:
:return:
"""
result = self.map_pixel_center_positions[row][column]
return (result[1], result[0])
def get_pos_for_pixels(self, x, y):
"""
Reverse lookup for grid position based on pixels
:param x:
:param y:
:return:
"""
query_result = self.query_tree(x, y)
if not query_result:
raise ValueError(f"Pixel positions not found in grid! {x}, {y}")
# query_result[0] - The distances to the nearest neighbour
# query_result[1] - The locations of the neighbours
return self.flat_pixel_positions[query_result[1]]
def query_tree(self, x, y, k = 1, distance_upper_bound = np.inf):
"""
Get the data in our grid at the specified pixel position
:param x:
:param y:
:param k: (optional) The number of nearest neighbors to return.
:return:
"""
# query_result[0] - The distances to the nearest neighbour
# query_result[1] - The locations of the neighbours
query_result = self.tree.query([y, x], k = k, distance_upper_bound = distance_upper_bound)
return query_result
def query(self, x, y, k = 1, distance_upper_bound = np.inf):
query_result = self.query_tree(x, y, k = k, distance_upper_bound = distance_upper_bound)
if query_result:
try:
# returning into a flattened array so that we there's only
# one result we still have an array
return list(zip(
np.array(self.data[query_result[1]]).flatten(),
np.array(query_result[0]).flatten()
))
except IndexError:
pass
return None, None
def get_x_y_distances(self, x, y):
"""
Get the distances for rows and columns from the given x, y pixel point.
Used mostly by other distance calculation functions.
:param x:
:param y:
:return:
"""
if x == self.last_x:
x_distances = self.last_x_distances
else:
self.last_x = x
self.last_x_distances = self.flat_pixel_positions[:, 1] - x
x_distances = self.last_x_distances
if y == self.last_y:
y_distances = self.last_y_distances
else:
self.last_y = y
self.last_y_distances = self.flat_pixel_positions[:, 0] - y
y_distances = self.last_y_distances
return y_distances, x_distances
def get_row_column_distances(self, row, column):
"""
Get the distances for rows and columns from the given row, column.
Used mostly by other distance calculation functions.
:param row:
:param column:
:return:
"""
y, x = self.get_pixel_center(row, column)
return self.get_x_y_distances(x, y)
def get_straight_line_distances(self, row, column):
"""
Get the straight line distance from the center of the given row, column
to all other centers in the grid.
:param row:
:param column:
:return:
"""
row_distances, column_distances = self.get_row_column_distances(row, column)
return np.sqrt((column_distances * column_distances) + (row_distances * row_distances))
def get_straight_line_distance_to_point(self, start_row, start_column, end_row, end_column):
"""
Get the straight line distance between two grid positions centers.
:param start_row:
:param start_column:
:param end_row:
:param end_column:
:return:
"""
start_pixels = self.get_pixel_center(start_row, start_column)
end_pixels = self.get_pixel_center(end_row, end_column)
return math.sqrt((start_pixels[1] * end_pixels[1]) + (start_pixels[0] * end_pixels[0]))
def get_angles(self, row, column, origin_angle):
"""
Get the direction in degrees from the center of the given row, column
to all other centers in the grid.
:param row:
:param column:
:param origin_angle:
:return:
"""
row_distances, column_distances = self.get_row_column_distances(row, column)
result = origin_angle - np.degrees(np.arctan2(row_distances, column_distances) % (2 * np.pi))
result[np.where(result < 0)] += 360
result[np.where(result >= 360)] -= 360
return result
def get_positions_in_fov(self, row, column, origin_angle, fov, tile_distance):
"""
Get an indexer for grid positions that in the field of view
from the center of the given row, column
:param row:
:param column:
:param fov:
:param tile_distance:
:return:
"""
straight_line_distances = self.get_straight_line_distances(row, column)
theta = self.get_angles(row, column, origin_angle)
half_fov = fov / 2
return np.logical_and(
np.logical_or(theta >= (360 - half_fov), theta <= half_fov),
straight_line_distances < (self.half_tile_size * tile_distance) - self.half_tile_size
)
| [
"numpy.sqrt",
"numpy.reshape",
"numpy.where",
"scipy.spatial.KDTree",
"math.sqrt",
"numpy.logical_or",
"numpy.indices",
"numpy.array",
"numpy.zeros",
"numpy.arctan2"
] | [((415, 441), 'numpy.indices', 'np.indices', (['(y_max, x_max)'], {}), '((y_max, x_max))\n', (425, 441), True, 'import numpy as np\n'), ((1623, 1646), 'numpy.zeros', 'np.zeros', (['(y_max * x_max)'], {}), '(y_max * x_max)\n', (1631, 1646), True, 'import numpy as np\n'), ((1774, 1807), 'scipy.spatial.KDTree', 'KDTree', (['self.flat_pixel_positions'], {}), '(self.flat_pixel_positions)\n', (1780, 1807), False, 'from scipy.spatial import KDTree\n'), ((3456, 3483), 'numpy.where', 'np.where', (['(self.data == item)'], {}), '(self.data == item)\n', (3464, 3483), True, 'import numpy as np\n'), ((7584, 7660), 'numpy.sqrt', 'np.sqrt', (['(column_distances * column_distances + row_distances * row_distances)'], {}), '(column_distances * column_distances + row_distances * row_distances)\n', (7591, 7660), True, 'import numpy as np\n'), ((8138, 8214), 'math.sqrt', 'math.sqrt', (['(start_pixels[1] * end_pixels[1] + start_pixels[0] * end_pixels[0])'], {}), '(start_pixels[1] * end_pixels[1] + start_pixels[0] * end_pixels[0])\n', (8147, 8214), False, 'import math\n'), ((1061, 1114), 'numpy.reshape', 'np.reshape', (['pixel_center_positions', '(y_max, x_max, 2)'], {}), '(pixel_center_positions, (y_max, x_max, 2))\n', (1071, 1114), True, 'import numpy as np\n'), ((8712, 8732), 'numpy.where', 'np.where', (['(result < 0)'], {}), '(result < 0)\n', (8720, 8732), True, 'import numpy as np\n'), ((8756, 8779), 'numpy.where', 'np.where', (['(result >= 360)'], {}), '(result >= 360)\n', (8764, 8779), True, 'import numpy as np\n'), ((9367, 9424), 'numpy.logical_or', 'np.logical_or', (['(theta >= 360 - half_fov)', '(theta <= half_fov)'], {}), '(theta >= 360 - half_fov, theta <= half_fov)\n', (9380, 9424), True, 'import numpy as np\n'), ((8638, 8681), 'numpy.arctan2', 'np.arctan2', (['row_distances', 'column_distances'], {}), '(row_distances, column_distances)\n', (8648, 8681), True, 'import numpy as np\n'), ((5836, 5872), 'numpy.array', 'np.array', (['self.data[query_result[1]]'], {}), '(self.data[query_result[1]])\n', (5844, 5872), True, 'import numpy as np\n'), ((5905, 5930), 'numpy.array', 'np.array', (['query_result[0]'], {}), '(query_result[0])\n', (5913, 5930), True, 'import numpy as np\n')] |
#Reading Gemini GMOS filters
from astropy import units as u, constants as const
from numpy import genfromtxt, asscalar
import pandas as pd
import os
from glob import glob
def read_hst_filter(fname):
"""
Reading the gemini filter file into a dataframe
Parameters
----------
fname: ~str
path to file to be read
"""
for i, line in enumerate(open(fname)):
if line.strip().startswith('1'):
skiprows = i - 1
break
else:
raise ValueError('File {0} not formatted in Gemini style'.format(fname))
data = pd.DataFrame(genfromtxt(fname, skip_header=skiprows, usecols=(1, 2)),
columns=['wavelength', 'transmission_lambda'])
start_filter_idx = asscalar(
(data.transmission_lambda > 0).searchsorted(1) - 1)
end_filter_idx = (data.transmission_lambda > 0)[::-1].searchsorted(1)
end_filter_idx = asscalar((len(data) - end_filter_idx) + 1)
return data.iloc[start_filter_idx:end_filter_idx]
def read_dataset(fname_list, prefix, name_parser=None):
"""
Reading a whole list of filters
Parameters
----------
fname_list: list
list of filenames
prefix: str
prefix for the dictionary keys
Returns
-------
dict
"""
filter_dict = {}
for fname in fname_list:
if name_parser is not None:
filter_name = name_parser(fname)
else:
filter_name = fname
filter_path = os.path.join(prefix, filter_name)
filter_dict[filter_path] = read_hst_filter(fname)
return filter_dict
def read_all_hst():
hst_nameparser = (lambda fname:
'_'.join(os.path.basename(fname).lower().split('.')[:2]))
hst_filters = read_dataset(glob('filter_data/*.tab'), 'hst/wfc3',
hst_nameparser)
#rewriting hst_filters dict keys:
for key, value in hst_filters.items():
del hst_filters[key]
long_fname = key.split('/')[-1]
filter_name, band = long_fname.split('_')
new_key = '/'.join(key.split('/')[:-1] + [band, filter_name])
hst_filters[new_key] = value
return hst_filters
def save_to_hdf(filter_dict, hdf_file, mode='a'):
fh = pd.HDFStore(hdf_file, mode=mode)
for key in filter_dict:
filter_dict[key].to_hdf(fh, key)
fh.close()
| [
"os.path.join",
"os.path.basename",
"pandas.HDFStore",
"numpy.genfromtxt",
"glob.glob"
] | [((2258, 2290), 'pandas.HDFStore', 'pd.HDFStore', (['hdf_file'], {'mode': 'mode'}), '(hdf_file, mode=mode)\n', (2269, 2290), True, 'import pandas as pd\n'), ((598, 653), 'numpy.genfromtxt', 'genfromtxt', (['fname'], {'skip_header': 'skiprows', 'usecols': '(1, 2)'}), '(fname, skip_header=skiprows, usecols=(1, 2))\n', (608, 653), False, 'from numpy import genfromtxt, asscalar\n'), ((1495, 1528), 'os.path.join', 'os.path.join', (['prefix', 'filter_name'], {}), '(prefix, filter_name)\n', (1507, 1528), False, 'import os\n'), ((1782, 1807), 'glob.glob', 'glob', (['"""filter_data/*.tab"""'], {}), "('filter_data/*.tab')\n", (1786, 1807), False, 'from glob import glob\n'), ((1702, 1725), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (1718, 1725), False, 'import os\n')] |
import os
from gym import utils
from gym.envs.robotics import fetch_env
import numpy as np
import copy
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join('fetch', 'push_moving_double_obstacle2.xml')
class FetchPushMovingDoubleObstacleEnv2(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type='sparse'):
self.further = False
# TODO: configure adaption parameters
self.adapt_dict = dict()
self.adapt_dict["field"] = [1.3, 0.75, 0.6, 0.25, 0.25, 0.2]
#centers of the interval where goal and initial position will be sampld
self.target_goal_center = np.array([1.3, 0.57, 0.425])
self.object_center = np.array([1.3, 0.93, 0.425])
#for moving
self.vel_lims = [0.6, 0.9]
self.n_moving_obstacles = 2
self.current_obstacle_vels = []
self.obstacle_directions = []
self.obstacle_upper_limits = []
self.obstacle_lower_limits = []
self.pos_difs = []
for _ in range(self.n_moving_obstacles):
self.current_obstacle_vels.append(0.9)
self.obstacle_directions.append(1)
self.obstacle_upper_limits.append(1.39)
self.obstacle_lower_limits.append(1.16)
self.obstacle_upper_limits.append(1.44)
self.obstacle_lower_limits.append(1.21)
for i in range(self.n_moving_obstacles):
self.pos_difs.append((self.obstacle_upper_limits[i] - self.obstacle_lower_limits[i]) / 2.)
initial_qpos = {
'robot0:slide0': 0.405,
'robot0:slide1': 0.48,
'robot0:slide2': 0.0,
'object0:joint': [1.3, 0.93, 0.42505, 1., 0., 0., 0.], # origin 0.53
}
fetch_env.FetchEnv.__init__(
self, MODEL_XML_PATH, has_object=True, block_gripper=True, n_substeps=20,
gripper_extra_height=0.0, target_in_the_air=False, target_offset=0.0,
obj_range=0.02, target_range=0.02, distance_threshold=0.05,
initial_qpos=initial_qpos, reward_type=reward_type)
utils.EzPickle.__init__(self)
self.obstacle_slider_idxs = []
self.obstacle_slider_idxs.append(self.sim.model.joint_names.index('obstacle:joint'))
self.obstacle_slider_idxs.append(self.sim.model.joint_names.index('obstacle2:joint'))
self.geom_id_object = self.sim.model.geom_name2id('object0')
self.geom_ids_obstacles = []
for name in ['o', 'o2']:
self.geom_ids_obstacles.append(self.sim.model.geom_name2id(name))
self.use_reset_sim = True
# RobotEnv methods
# ----------------------------
def test_setup(self, new_vel_lims=[1.1, 1.4]):
'''
changes the parameter for further tests after training an agent
'''
# the default values makes the obstacle in average faster
self.vel_lims = new_vel_lims
for i in range(self.n_moving_obstacles):
self.current_obstacle_vels[i] = new_vel_lims[1]
def set_obstacle_slide_pos(self, positions):
qpos = self.sim.data.qpos.flat[:]
for i, pos in enumerate(positions):
# move obstacles
qpos[self.obstacle_slider_idxs[i]] = pos
to_mod = copy.deepcopy(self.sim.get_state())
to_mod = to_mod._replace(qpos=qpos)
self.sim.set_state(to_mod)
self.sim.forward()
def set_obstacle_slide_vel(self, velocities):
qvel = self.sim.data.qvel.flat[:]
for i, vel in enumerate(velocities):
qvel[self.obstacle_slider_idxs[i]] = vel
to_mod = copy.deepcopy(self.sim.get_state())
to_mod = to_mod._replace(qvel=qvel)
self.sim.set_state(to_mod)
self.sim.forward()
def move_obstacle(self):
dt = self.sim.nsubsteps * self.sim.model.opt.timestep
qpos = self.sim.data.qpos.flat[:]
new_positions = []
for i in range(self.n_moving_obstacles):
current_qpos = qpos[self.obstacle_slider_idxs[i]]
if self.obstacle_directions[i] == 1:
if current_qpos >= self.pos_difs[i]:
new_pos = current_qpos - self.current_obstacle_vels[i] * dt
#self.set_obstacle_slide_pos(new_pos)
self.obstacle_directions[i] = -1
else:
extra_dist = self.current_obstacle_vels[i] * dt
if current_qpos + extra_dist >= self.pos_difs[i]:
new_pos = self.pos_difs[i]
#self.set_obstacle_slide_pos(new_pos)
self.obstacle_directions[i] = -1
else:
new_pos = current_qpos + extra_dist
#self.set_obstacle_slide_pos(new_pos)
else:
if current_qpos <= -self.pos_difs[i]:
new_pos = current_qpos + self.current_obstacle_vels[i] * dt
#self.set_obstacle_slide_pos(new_pos)
self.obstacle_directions[i] = 1
else:
extra_dist = self.current_obstacle_vels[i] * dt
if current_qpos - extra_dist <= -self.pos_difs[i]:
new_pos = -self.pos_difs[i]
#self.set_obstacle_slide_pos(new_pos)
self.obstacle_directions[i] = 1
else:
new_pos = current_qpos - extra_dist
#self.set_obstacle_slide_pos(new_pos)
new_positions.append(new_pos)
self.set_obstacle_slide_pos(new_positions)
def step(self, action):
self.move_obstacle()
return super(FetchPushMovingDoubleObstacleEnv2, self).step(action)
def _set_gripper_during_setup(self):
# Move end effector into position.
orig_pos = self.sim.data.get_site_xpos('robot0:grip')
gripper_target = np.array([-0.5399, 0.305, -0.306 + self.gripper_extra_height]) + orig_pos
gripper_rotation = np.array([1., 0., 1., 0.])
self.sim.data.set_mocap_pos('robot0:mocap', gripper_target)
self.sim.data.set_mocap_quat('robot0:mocap', gripper_rotation)
for _ in range(10):
self.sim.step()
def _sample_goal(self):
goal = self.target_goal_center + self.np_random.uniform(-self.target_range, self.target_range, size=3)
goal[2] = self.height_offset
return goal.copy()
def _reset_sim(self):
self.sim.set_state(self.initial_state)
a = np.random.randint(2)
if a == 0:
self.obstacle_directions = [1, -1]
else:
self.obstacle_directions = [-1, 1]
velocities = []
for i in range(self.n_moving_obstacles):
possible_vels = np.linspace(start=self.vel_lims[0], stop=self.vel_lims[1], num=10, endpoint=True)
vel = np.random.choice(possible_vels)
self.current_obstacle_vels[i] = vel
velocities.append(vel*self.obstacle_directions[i])
self.set_obstacle_slide_vel(velocities)
object_xpos = self.object_center[:2] + self.np_random.uniform(-self.obj_range,
self.obj_range, size=2)
object_qpos = self.sim.data.get_joint_qpos('object0:joint')
assert object_qpos.shape == (7,)
object_qpos[:2] = object_xpos
object_qpos[3:] = np.array([1., 0., 0., 0.])
self.sim.data.set_joint_qpos('object0:joint', object_qpos)
self.sim.forward()
return True
def _get_obs(self):
obs = super(FetchPushMovingDoubleObstacleEnv2, self)._get_obs()
body_id = self.sim.model.body_name2id('obstacle')
pos1 = np.array(self.sim.data.body_xpos[body_id].copy())
body_id2 = self.sim.model.body_name2id('obstacle2')
pos2 = np.array(self.sim.data.body_xpos[body_id2].copy())
dims = np.array([0.11, 0.02, 0.035])
ob1 = np.concatenate((pos1, dims.copy()))
ob2 = np.concatenate((pos2, dims.copy()))
obs['real_obstacle_info'] = np.array([ob1, ob2])
obs['real_size_goal'] = np.array([0.04, 0.04, 0.02])
return obs
| [
"gym.envs.robotics.fetch_env.FetchEnv.__init__",
"numpy.random.choice",
"os.path.join",
"numpy.array",
"numpy.random.randint",
"gym.utils.EzPickle.__init__",
"numpy.linspace"
] | [((175, 232), 'os.path.join', 'os.path.join', (['"""fetch"""', '"""push_moving_double_obstacle2.xml"""'], {}), "('fetch', 'push_moving_double_obstacle2.xml')\n", (187, 232), False, 'import os\n'), ((650, 678), 'numpy.array', 'np.array', (['[1.3, 0.57, 0.425]'], {}), '([1.3, 0.57, 0.425])\n', (658, 678), True, 'import numpy as np\n'), ((708, 736), 'numpy.array', 'np.array', (['[1.3, 0.93, 0.425]'], {}), '([1.3, 0.93, 0.425])\n', (716, 736), True, 'import numpy as np\n'), ((1736, 2035), 'gym.envs.robotics.fetch_env.FetchEnv.__init__', 'fetch_env.FetchEnv.__init__', (['self', 'MODEL_XML_PATH'], {'has_object': '(True)', 'block_gripper': '(True)', 'n_substeps': '(20)', 'gripper_extra_height': '(0.0)', 'target_in_the_air': '(False)', 'target_offset': '(0.0)', 'obj_range': '(0.02)', 'target_range': '(0.02)', 'distance_threshold': '(0.05)', 'initial_qpos': 'initial_qpos', 'reward_type': 'reward_type'}), '(self, MODEL_XML_PATH, has_object=True,\n block_gripper=True, n_substeps=20, gripper_extra_height=0.0,\n target_in_the_air=False, target_offset=0.0, obj_range=0.02,\n target_range=0.02, distance_threshold=0.05, initial_qpos=initial_qpos,\n reward_type=reward_type)\n', (1763, 2035), False, 'from gym.envs.robotics import fetch_env\n'), ((2077, 2106), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (2100, 2106), False, 'from gym import utils\n'), ((6016, 6046), 'numpy.array', 'np.array', (['[1.0, 0.0, 1.0, 0.0]'], {}), '([1.0, 0.0, 1.0, 0.0])\n', (6024, 6046), True, 'import numpy as np\n'), ((6528, 6548), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (6545, 6548), True, 'import numpy as np\n'), ((7424, 7454), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (7432, 7454), True, 'import numpy as np\n'), ((7927, 7956), 'numpy.array', 'np.array', (['[0.11, 0.02, 0.035]'], {}), '([0.11, 0.02, 0.035])\n', (7935, 7956), True, 'import numpy as np\n'), ((8093, 8113), 'numpy.array', 'np.array', (['[ob1, ob2]'], {}), '([ob1, ob2])\n', (8101, 8113), True, 'import numpy as np\n'), ((8146, 8174), 'numpy.array', 'np.array', (['[0.04, 0.04, 0.02]'], {}), '([0.04, 0.04, 0.02])\n', (8154, 8174), True, 'import numpy as np\n'), ((5915, 5977), 'numpy.array', 'np.array', (['[-0.5399, 0.305, -0.306 + self.gripper_extra_height]'], {}), '([-0.5399, 0.305, -0.306 + self.gripper_extra_height])\n', (5923, 5977), True, 'import numpy as np\n'), ((6778, 6864), 'numpy.linspace', 'np.linspace', ([], {'start': 'self.vel_lims[0]', 'stop': 'self.vel_lims[1]', 'num': '(10)', 'endpoint': '(True)'}), '(start=self.vel_lims[0], stop=self.vel_lims[1], num=10, endpoint\n =True)\n', (6789, 6864), True, 'import numpy as np\n'), ((6878, 6909), 'numpy.random.choice', 'np.random.choice', (['possible_vels'], {}), '(possible_vels)\n', (6894, 6909), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from scipy import signal
from scipy import linalg as la
from functools import partial
from model.rnncell import RNNCell
from model.orthogonalcell import OrthogonalLinear
from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer
from model.op import LegSAdaptiveTransitionManual, LegTAdaptiveTransitionManual, LagTAdaptiveTransitionManual, TLagTAdaptiveTransitionManual
forward_aliases = ['euler', 'forward_euler', 'forward', 'forward_diff']
backward_aliases = ['backward', 'backward_diff', 'backward_euler']
bilinear_aliases = ['bilinear', 'tustin', 'trapezoidal', 'trapezoid']
zoh_aliases = ['zoh']
class MemoryCell(RNNCell):
name = None
valid_keys = ['uxh', 'ux', 'uh', 'um', 'hxm', 'hx', 'hm', 'hh', 'bias', ]
def default_initializers(self):
return {
'uxh': 'uniform',
'hxm': 'xavier',
'hx': 'xavier',
'hm': 'xavier',
'um': 'zero',
'hh': 'xavier',
}
def default_architecture(self):
return {
'ux': True,
# 'uh': True,
'um': False,
'hx': True,
'hm': True,
'hh': False,
'bias': True,
}
def __init__(self, input_size, hidden_size, memory_size, memory_order,
memory_activation='id',
gate='G', # 'N' | 'G' | UR'
memory_output=False,
**kwargs
):
self.memory_size = memory_size
self.memory_order = memory_order
self.memory_activation = memory_activation
self.gate = gate
self.memory_output = memory_output
super(MemoryCell, self).__init__(input_size, hidden_size, **kwargs)
self.input_to_hidden_size = self.input_size if self.architecture['hx'] else 0
self.input_to_memory_size = self.input_size if self.architecture['ux'] else 0
# Construct and initialize u
self.W_uxh = nn.Linear(self.input_to_memory_size + self.hidden_size, self.memory_size,
bias=self.architecture['bias'])
# nn.init.zeros_(self.W_uxh.bias)
if 'uxh' in self.initializers:
get_initializer(self.initializers['uxh'], self.memory_activation)(self.W_uxh.weight)
if 'ux' in self.initializers: # Re-init if passed in
get_initializer(self.initializers['ux'], self.memory_activation)(self.W_uxh.weight[:, :self.input_size])
if 'uh' in self.initializers: # Re-init if passed in
get_initializer(self.initializers['uh'], self.memory_activation)(self.W_uxh.weight[:, self.input_size:])
# Construct and initialize h
self.memory_to_hidden_size = self.memory_size * self.memory_order if self.architecture['hm'] else 0
preact_ctor = Linear_
preact_args = [self.input_to_hidden_size + self.memory_to_hidden_size, self.hidden_size,
self.architecture['bias']]
self.W_hxm = preact_ctor(*preact_args)
if self.initializers.get('hxm', None) is not None: # Re-init if passed in
get_initializer(self.initializers['hxm'], self.hidden_activation)(self.W_hxm.weight)
if self.initializers.get('hx', None) is not None: # Re-init if passed in
get_initializer(self.initializers['hx'], self.hidden_activation)(self.W_hxm.weight[:, :self.input_size])
if self.initializers.get('hm', None) is not None: # Re-init if passed in
get_initializer(self.initializers['hm'], self.hidden_activation)(self.W_hxm.weight[:, self.input_size:])
if self.architecture['um']:
# No bias here because the implementation is awkward otherwise, but probably doesn't matter
self.W_um = nn.Parameter(torch.Tensor(self.memory_size, self.memory_order))
get_initializer(self.initializers['um'], self.memory_activation)(self.W_um)
if self.architecture['hh']:
self.reset_hidden_to_hidden()
else:
self.W_hh = None
if self.gate is not None:
if self.architecture['hh']:
print("input to hidden size, memory to hidden size, hidden size:", self.input_to_hidden_size, self.memory_to_hidden_size, self.hidden_size)
preact_ctor = Linear_
preact_args = [self.input_to_hidden_size + self.memory_to_hidden_size + self.hidden_size, self.hidden_size,
self.architecture['bias']]
self.W_gxm = Gate(self.hidden_size, preact_ctor, preact_args, mechanism=self.gate)
def reset_parameters(self):
# super().reset_parameters()
self.hidden_activation_fn = get_activation(self.hidden_activation, self.hidden_size) # TODO figure out how to remove this duplication
self.memory_activation_fn = get_activation(self.memory_activation, self.memory_size)
def forward(self, input, state):
h, m, time_step = state
input_to_hidden = input if self.architecture['hx'] else input.new_empty((0,))
input_to_memory = input if self.architecture['ux'] else input.new_empty((0,))
# Construct the update features
memory_preact = self.W_uxh(torch.cat((input_to_memory, h), dim=-1)) # (batch, memory_size)
if self.architecture['um']:
memory_preact = memory_preact + (m * self.W_um).sum(dim=-1)
u = self.memory_activation_fn(memory_preact) # (batch, memory_size)
# Update the memory
m = self.update_memory(m, u, time_step) # (batch, memory_size, memory_order)
# Update hidden state from memory
if self.architecture['hm']:
memory_to_hidden = m.view(input.shape[0], self.memory_size*self.memory_order)
else:
memory_to_hidden = input.new_empty((0,))
m_inputs = (torch.cat((input_to_hidden, memory_to_hidden), dim=-1),)
hidden_preact = self.W_hxm(*m_inputs)
if self.architecture['hh']:
hidden_preact = hidden_preact + self.W_hh(h)
hidden = self.hidden_activation_fn(hidden_preact)
# Construct gate if necessary
if self.gate is None:
h = hidden
else:
if self.architecture['hh']:
m_inputs = torch.cat((m_inputs[0], h), -1),
g = self.W_gxm(*m_inputs)
h = (1.-g) * h + g * hidden
next_state = (h, m, time_step + 1)
output = self.output(next_state)
return output, next_state
def update_memory(self, m, u, time_step):
"""
m: (B, M, N) [batch size, memory size, memory order]
u: (B, M)
Output: (B, M, N)
"""
raise NotImplementedError
def default_state(self, input, batch_size=None):
batch_size = input.size(0) if batch_size is None else batch_size
return (input.new_zeros(batch_size, self.hidden_size, requires_grad=False),
input.new_zeros(batch_size, self.memory_size, self.memory_order, requires_grad=False),
0)
def output(self, state):
""" Converts a state into a single output (tensor) """
h, m, time_step = state
if self.memory_output:
hm = torch.cat((h, m.view(m.shape[0], self.memory_size*self.memory_order)), dim=-1)
return hm
else:
return h
def state_size(self):
return self.hidden_size + self.memory_size*self.memory_order
def output_size(self):
if self.memory_output:
return self.hidden_size + self.memory_size*self.memory_order
else:
return self.hidden_size
class LTICell(MemoryCell):
""" A cell implementing Linear Time Invariant dynamics: c' = Ac + Bf. """
def __init__(self, input_size, hidden_size, memory_size, memory_order,
A, B,
trainable_scale=0., # how much to scale LR on A and B
dt=0.01,
discretization='zoh',
**kwargs
):
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
C = np.ones((1, memory_order))
D = np.zeros((1,))
dA, dB, _, _, _ = signal.cont2discrete((A, B, C, D), dt=dt, method=discretization)
dA = dA - np.eye(memory_order) # puts into form: x += Ax
self.trainable_scale = np.sqrt(trainable_scale)
if self.trainable_scale <= 0.:
self.register_buffer('A', torch.Tensor(dA))
self.register_buffer('B', torch.Tensor(dB))
else:
self.A = nn.Parameter(torch.Tensor(dA / self.trainable_scale), requires_grad=True)
self.B = nn.Parameter(torch.Tensor(dB / self.trainable_scale), requires_grad=True)
# TODO: proper way to implement LR scale is a preprocess() function that occurs once per unroll
# also very useful for orthogonal params
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
if self.trainable_scale <= 0.:
return m + F.linear(m, self.A) + F.linear(u, self.B)
else:
return m + F.linear(m, self.A * self.trainable_scale) + F.linear(u, self.B * self.trainable_scale)
class LSICell(MemoryCell):
""" A cell implementing Linear 'Scale' Invariant dynamics: c' = 1/t (Ac + Bf). """
def __init__(self, input_size, hidden_size, memory_size, memory_order,
A, B,
init_t = 0, # 0 for special case at t=0 (new code), else old code without special case
max_length=1024,
discretization='bilinear',
**kwargs
):
"""
# TODO: make init_t start at arbitrary time (instead of 0 or 1)
"""
# B should have shape (N, 1)
assert len(B.shape) == 2 and B.shape[1] == 1
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
assert isinstance(init_t, int)
self.init_t = init_t
self.max_length = max_length
A_stacked = np.empty((max_length, memory_order, memory_order), dtype=A.dtype)
B_stacked = np.empty((max_length, memory_order), dtype=B.dtype)
B = B[:,0]
N = memory_order
for t in range(1, max_length + 1):
At = A / t
Bt = B / t
if discretization in forward_aliases:
A_stacked[t - 1] = np.eye(N) + At
B_stacked[t - 1] = Bt
elif discretization in backward_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True)
elif discretization in bilinear_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True)
elif discretization in zoh_aliases:
A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True)
B_stacked = B_stacked[:, :, None]
A_stacked -= np.eye(memory_order) # puts into form: x += Ax
self.register_buffer('A', torch.Tensor(A_stacked))
self.register_buffer('B', torch.Tensor(B_stacked))
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
t = time_step - 1 + self.init_t
if t < 0:
return F.pad(u, (0, self.memory_order - 1))
else:
if t >= self.max_length: t = self.max_length - 1
return m + F.linear(m, self.A[t]) + F.linear(u, self.B[t])
class TimeMemoryCell(MemoryCell):
""" MemoryCell with timestamped data """
def __init__(self, input_size, hidden_size, memory_size, memory_order, **kwargs):
super().__init__(input_size-1, hidden_size, memory_size, memory_order, **kwargs)
def forward(self, input, state):
h, m, time_step = state
timestamp, input = input[:, 0], input[:, 1:]
input_to_hidden = input if self.architecture['hx'] else input.new_empty((0,))
input_to_memory = input if self.architecture['ux'] else input.new_empty((0,))
# Construct the update features
memory_preact = self.W_uxh(torch.cat((input_to_memory, h), dim=-1)) # (batch, memory_size)
if self.architecture['um']:
memory_preact = memory_preact + (m * self.W_um).sum(dim=-1)
u = self.memory_activation_fn(memory_preact) # (batch, memory_size)
# Update the memory
m = self.update_memory(m, u, time_step, timestamp) # (batch, memory_size, memory_order)
# Update hidden state from memory
if self.architecture['hm']:
memory_to_hidden = m.view(input.shape[0], self.memory_size*self.memory_order)
else:
memory_to_hidden = input.new_empty((0,))
m_inputs = (torch.cat((input_to_hidden, memory_to_hidden), dim=-1),)
hidden_preact = self.W_hxm(*m_inputs)
if self.architecture['hh']:
hidden_preact = hidden_preact + self.W_hh(h)
hidden = self.hidden_activation_fn(hidden_preact)
# Construct gate if necessary
if self.gate is None:
h = hidden
else:
if self.architecture['hh']:
m_inputs = torch.cat((m_inputs[0], h), -1),
g = self.W_gxm(*m_inputs)
h = (1.-g) * h + g * hidden
next_state = (h, m, timestamp)
output = self.output(next_state)
return output, next_state
class TimeLSICell(TimeMemoryCell):
""" A cell implementing "Linear Scale Invariant" dynamics: c' = Ac + Bf with timestamped inputs. """
name = 'tlsi'
def __init__(self, input_size, hidden_size, memory_size=1, memory_order=-1,
measure='legs',
measure_args={},
method='manual',
discretization='bilinear',
**kwargs
):
if memory_order < 0:
memory_order = hidden_size
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
assert measure in ['legs', 'lagt', 'tlagt', 'legt']
assert method in ['manual', 'linear', 'toeplitz']
if measure == 'legs':
if method == 'manual':
self.transition = LegSAdaptiveTransitionManual(self.memory_order)
kwargs = {'precompute': False}
if measure == 'legt':
if method == 'manual':
self.transition = LegTAdaptiveTransitionManual(self.memory_order)
kwargs = {'precompute': False}
elif measure == 'lagt':
if method == 'manual':
self.transition = LagTAdaptiveTransitionManual(self.memory_order)
kwargs = {'precompute': False}
elif measure == 'tlagt':
if method == 'manual':
self.transition = TLagTAdaptiveTransitionManual(self.memory_order, **measure_args)
kwargs = {'precompute': False}
if discretization in forward_aliases:
self.transition_fn = partial(self.transition.forward_diff, **kwargs)
elif discretization in backward_aliases:
self.transition_fn = partial(self.transition.backward_diff, **kwargs)
elif discretization in bilinear_aliases:
self.transition_fn = partial(self.transition.bilinear, **kwargs)
else: assert False
def update_memory(self, m, u, t0, t1):
"""
m: (B, M, N) [batch, memory_size, memory_order]
u: (B, M)
t0: (B,) previous time
t1: (B,) current time
"""
if torch.eq(t1, 0.).any():
return F.pad(u.unsqueeze(-1), (0, self.memory_order - 1))
else:
dt = ((t1-t0)/t1).unsqueeze(-1)
m = self.transition_fn(dt, m, u)
return m
class TimeLTICell(TimeLSICell):
""" A cell implementing Linear Time Invariant dynamics: c' = Ac + Bf with timestamped inputs. """
name = 'tlti'
def __init__(self, input_size, hidden_size, memory_size=1, memory_order=-1,
dt=1.0,
**kwargs
):
if memory_order < 0:
memory_order = hidden_size
self.dt = dt
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
def update_memory(self, m, u, t0, t1):
"""
m: (B, M, N) [batch, memory_size, memory_order]
u: (B, M)
t0: (B,) previous time
t1: (B,) current time
"""
dt = self.dt*(t1-t0).unsqueeze(-1)
m = self.transition_fn(dt, m, u)
return m
| [
"model.components.get_activation",
"numpy.sqrt",
"model.op.TLagTAdaptiveTransitionManual",
"torch.eq",
"math.log",
"torch.nn.functional.pad",
"torch.nn.functional.linear",
"model.op.LegSAdaptiveTransitionManual",
"numpy.empty",
"scipy.linalg.solve_triangular",
"numpy.eye",
"numpy.ones",
"mod... | [((2120, 2229), 'torch.nn.Linear', 'nn.Linear', (['(self.input_to_memory_size + self.hidden_size)', 'self.memory_size'], {'bias': "self.architecture['bias']"}), "(self.input_to_memory_size + self.hidden_size, self.memory_size,\n bias=self.architecture['bias'])\n", (2129, 2229), True, 'import torch.nn as nn\n'), ((4835, 4891), 'model.components.get_activation', 'get_activation', (['self.hidden_activation', 'self.hidden_size'], {}), '(self.hidden_activation, self.hidden_size)\n', (4849, 4891), False, 'from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer\n'), ((4977, 5033), 'model.components.get_activation', 'get_activation', (['self.memory_activation', 'self.memory_size'], {}), '(self.memory_activation, self.memory_size)\n', (4991, 5033), False, 'from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer\n'), ((8253, 8279), 'numpy.ones', 'np.ones', (['(1, memory_order)'], {}), '((1, memory_order))\n', (8260, 8279), True, 'import numpy as np\n'), ((8292, 8306), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (8300, 8306), True, 'import numpy as np\n'), ((8333, 8397), 'scipy.signal.cont2discrete', 'signal.cont2discrete', (['(A, B, C, D)'], {'dt': 'dt', 'method': 'discretization'}), '((A, B, C, D), dt=dt, method=discretization)\n', (8353, 8397), False, 'from scipy import signal\n'), ((8496, 8520), 'numpy.sqrt', 'np.sqrt', (['trainable_scale'], {}), '(trainable_scale)\n', (8503, 8520), True, 'import numpy as np\n'), ((10182, 10247), 'numpy.empty', 'np.empty', (['(max_length, memory_order, memory_order)'], {'dtype': 'A.dtype'}), '((max_length, memory_order, memory_order), dtype=A.dtype)\n', (10190, 10247), True, 'import numpy as np\n'), ((10268, 10319), 'numpy.empty', 'np.empty', (['(max_length, memory_order)'], {'dtype': 'B.dtype'}), '((max_length, memory_order), dtype=B.dtype)\n', (10276, 10319), True, 'import numpy as np\n'), ((11364, 11384), 'numpy.eye', 'np.eye', (['memory_order'], {}), '(memory_order)\n', (11370, 11384), True, 'import numpy as np\n'), ((4659, 4728), 'model.components.Gate', 'Gate', (['self.hidden_size', 'preact_ctor', 'preact_args'], {'mechanism': 'self.gate'}), '(self.hidden_size, preact_ctor, preact_args, mechanism=self.gate)\n', (4663, 4728), False, 'from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer\n'), ((5353, 5392), 'torch.cat', 'torch.cat', (['(input_to_memory, h)'], {'dim': '(-1)'}), '((input_to_memory, h), dim=-1)\n', (5362, 5392), False, 'import torch\n'), ((5972, 6026), 'torch.cat', 'torch.cat', (['(input_to_hidden, memory_to_hidden)'], {'dim': '(-1)'}), '((input_to_hidden, memory_to_hidden), dim=-1)\n', (5981, 6026), False, 'import torch\n'), ((8417, 8437), 'numpy.eye', 'np.eye', (['memory_order'], {}), '(memory_order)\n', (8423, 8437), True, 'import numpy as np\n'), ((11446, 11469), 'torch.Tensor', 'torch.Tensor', (['A_stacked'], {}), '(A_stacked)\n', (11458, 11469), False, 'import torch\n'), ((11505, 11528), 'torch.Tensor', 'torch.Tensor', (['B_stacked'], {}), '(B_stacked)\n', (11517, 11528), False, 'import torch\n'), ((11695, 11731), 'torch.nn.functional.pad', 'F.pad', (['u', '(0, self.memory_order - 1)'], {}), '(u, (0, self.memory_order - 1))\n', (11700, 11731), True, 'import torch.nn.functional as F\n'), ((12505, 12544), 'torch.cat', 'torch.cat', (['(input_to_memory, h)'], {'dim': '(-1)'}), '((input_to_memory, h), dim=-1)\n', (12514, 12544), False, 'import torch\n'), ((13135, 13189), 'torch.cat', 'torch.cat', (['(input_to_hidden, memory_to_hidden)'], {'dim': '(-1)'}), '((input_to_hidden, memory_to_hidden), dim=-1)\n', (13144, 13189), False, 'import torch\n'), ((15377, 15424), 'functools.partial', 'partial', (['self.transition.forward_diff'], {}), '(self.transition.forward_diff, **kwargs)\n', (15384, 15424), False, 'from functools import partial\n'), ((2350, 2415), 'model.components.get_initializer', 'get_initializer', (["self.initializers['uxh']", 'self.memory_activation'], {}), "(self.initializers['uxh'], self.memory_activation)\n", (2365, 2415), False, 'from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer\n'), ((2509, 2573), 'model.components.get_initializer', 'get_initializer', (["self.initializers['ux']", 'self.memory_activation'], {}), "(self.initializers['ux'], self.memory_activation)\n", (2524, 2573), False, 'from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer\n'), ((2688, 2752), 'model.components.get_initializer', 'get_initializer', (["self.initializers['uh']", 'self.memory_activation'], {}), "(self.initializers['uh'], self.memory_activation)\n", (2703, 2752), False, 'from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer\n'), ((3261, 3326), 'model.components.get_initializer', 'get_initializer', (["self.initializers['hxm']", 'self.hidden_activation'], {}), "(self.initializers['hxm'], self.hidden_activation)\n", (3276, 3326), False, 'from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer\n'), ((3440, 3504), 'model.components.get_initializer', 'get_initializer', (["self.initializers['hx']", 'self.hidden_activation'], {}), "(self.initializers['hx'], self.hidden_activation)\n", (3455, 3504), False, 'from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer\n'), ((3639, 3703), 'model.components.get_initializer', 'get_initializer', (["self.initializers['hm']", 'self.hidden_activation'], {}), "(self.initializers['hm'], self.hidden_activation)\n", (3654, 3703), False, 'from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer\n'), ((3922, 3971), 'torch.Tensor', 'torch.Tensor', (['self.memory_size', 'self.memory_order'], {}), '(self.memory_size, self.memory_order)\n', (3934, 3971), False, 'import torch\n'), ((3985, 4049), 'model.components.get_initializer', 'get_initializer', (["self.initializers['um']", 'self.memory_activation'], {}), "(self.initializers['um'], self.memory_activation)\n", (4000, 4049), False, 'from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer\n'), ((8598, 8614), 'torch.Tensor', 'torch.Tensor', (['dA'], {}), '(dA)\n', (8610, 8614), False, 'import torch\n'), ((8654, 8670), 'torch.Tensor', 'torch.Tensor', (['dB'], {}), '(dB)\n', (8666, 8670), False, 'import torch\n'), ((8720, 8759), 'torch.Tensor', 'torch.Tensor', (['(dA / self.trainable_scale)'], {}), '(dA / self.trainable_scale)\n', (8732, 8759), False, 'import torch\n'), ((8815, 8854), 'torch.Tensor', 'torch.Tensor', (['(dB / self.trainable_scale)'], {}), '(dB / self.trainable_scale)\n', (8827, 8854), False, 'import torch\n'), ((9192, 9211), 'torch.nn.functional.linear', 'F.linear', (['u', 'self.B'], {}), '(u, self.B)\n', (9200, 9211), True, 'import torch.nn.functional as F\n'), ((9294, 9336), 'torch.nn.functional.linear', 'F.linear', (['u', '(self.B * self.trainable_scale)'], {}), '(u, self.B * self.trainable_scale)\n', (9302, 9336), True, 'import torch.nn.functional as F\n'), ((11855, 11877), 'torch.nn.functional.linear', 'F.linear', (['u', 'self.B[t]'], {}), '(u, self.B[t])\n', (11863, 11877), True, 'import torch.nn.functional as F\n'), ((14598, 14645), 'model.op.LegSAdaptiveTransitionManual', 'LegSAdaptiveTransitionManual', (['self.memory_order'], {}), '(self.memory_order)\n', (14626, 14645), False, 'from model.op import LegSAdaptiveTransitionManual, LegTAdaptiveTransitionManual, LagTAdaptiveTransitionManual, TLagTAdaptiveTransitionManual\n'), ((14792, 14839), 'model.op.LegTAdaptiveTransitionManual', 'LegTAdaptiveTransitionManual', (['self.memory_order'], {}), '(self.memory_order)\n', (14820, 14839), False, 'from model.op import LegSAdaptiveTransitionManual, LegTAdaptiveTransitionManual, LagTAdaptiveTransitionManual, TLagTAdaptiveTransitionManual\n'), ((15507, 15555), 'functools.partial', 'partial', (['self.transition.backward_diff'], {}), '(self.transition.backward_diff, **kwargs)\n', (15514, 15555), False, 'from functools import partial\n'), ((15925, 15942), 'torch.eq', 'torch.eq', (['t1', '(0.0)'], {}), '(t1, 0.0)\n', (15933, 15942), False, 'import torch\n'), ((6401, 6432), 'torch.cat', 'torch.cat', (['(m_inputs[0], h)', '(-1)'], {}), '((m_inputs[0], h), -1)\n', (6410, 6432), False, 'import torch\n'), ((9170, 9189), 'torch.nn.functional.linear', 'F.linear', (['m', 'self.A'], {}), '(m, self.A)\n', (9178, 9189), True, 'import torch.nn.functional as F\n'), ((9249, 9291), 'torch.nn.functional.linear', 'F.linear', (['m', '(self.A * self.trainable_scale)'], {}), '(m, self.A * self.trainable_scale)\n', (9257, 9291), True, 'import torch.nn.functional as F\n'), ((10538, 10547), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (10544, 10547), True, 'import numpy as np\n'), ((11830, 11852), 'torch.nn.functional.linear', 'F.linear', (['m', 'self.A[t]'], {}), '(m, self.A[t])\n', (11838, 11852), True, 'import torch.nn.functional as F\n'), ((13564, 13595), 'torch.cat', 'torch.cat', (['(m_inputs[0], h)', '(-1)'], {}), '((m_inputs[0], h), -1)\n', (13573, 13595), False, 'import torch\n'), ((14988, 15035), 'model.op.LagTAdaptiveTransitionManual', 'LagTAdaptiveTransitionManual', (['self.memory_order'], {}), '(self.memory_order)\n', (15016, 15035), False, 'from model.op import LegSAdaptiveTransitionManual, LegTAdaptiveTransitionManual, LagTAdaptiveTransitionManual, TLagTAdaptiveTransitionManual\n'), ((15638, 15681), 'functools.partial', 'partial', (['self.transition.bilinear'], {}), '(self.transition.bilinear, **kwargs)\n', (15645, 15681), False, 'from functools import partial\n'), ((10715, 10724), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (10721, 10724), True, 'import numpy as np\n'), ((15185, 15249), 'model.op.TLagTAdaptiveTransitionManual', 'TLagTAdaptiveTransitionManual', (['self.memory_order'], {}), '(self.memory_order, **measure_args)\n', (15214, 15249), False, 'from model.op import LegSAdaptiveTransitionManual, LegTAdaptiveTransitionManual, LagTAdaptiveTransitionManual, TLagTAdaptiveTransitionManual\n'), ((10699, 10708), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (10705, 10708), True, 'import numpy as np\n'), ((10793, 10802), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (10799, 10802), True, 'import numpy as np\n'), ((11239, 11299), 'scipy.linalg.solve_triangular', 'la.solve_triangular', (['A', '(A_stacked[t - 1] @ B - B)'], {'lower': '(True)'}), '(A, A_stacked[t - 1] @ B - B, lower=True)\n', (11258, 11299), True, 'from scipy import linalg as la\n'), ((10933, 10942), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (10939, 10942), True, 'import numpy as np\n'), ((10953, 10962), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (10959, 10962), True, 'import numpy as np\n'), ((11040, 11049), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (11046, 11049), True, 'import numpy as np\n'), ((11172, 11187), 'math.log', 'math.log', (['(t + 1)'], {}), '(t + 1)\n', (11180, 11187), False, 'import math\n'), ((11190, 11201), 'math.log', 'math.log', (['t'], {}), '(t)\n', (11198, 11201), False, 'import math\n')] |
"""
Unit and regression test for the maxsmi package.
"""
# Import package, test suite, and other packages as needed
# import maxsmi
import pytest
import sys
import numpy
from maxsmi.utils_smiles import (
smiles_to_canonical,
smiles_to_random,
smiles_to_max_random,
is_connected,
smiles_to_selfies,
smiles_to_deepsmiles,
control_smiles_duplication,
get_num_heavy_atoms,
validity_check,
smiles_from_folder_name,
smiles_to_folder_name,
smiles_to_morgan_fingerprint,
)
def test_maxsmi_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "maxsmi" in sys.modules
####################
@pytest.mark.parametrize(
"smiles, solution",
[("C", "C"), ("OC", "CO"), ("KCahsbl", None)],
)
def test_smiles_to_canonical(smiles, solution):
canonical_smi = smiles_to_canonical(smiles)
assert solution == canonical_smi
@pytest.mark.parametrize(
"smiles, int_aug, solution",
[("C", 3, ["C", "C", "C"]), ("sakjncal", 3, None), ("OC", 0, ["OC"])],
)
def test_smiles_to_random(smiles, int_aug, solution):
rand_smi = smiles_to_random(smiles, int_aug)
assert solution == rand_smi
def test_smiles_to_random_exception():
with pytest.raises(Exception):
assert smiles_to_random("OC", -1)
@pytest.mark.parametrize(
"smiles, solution",
[("C.", False), ("CCC", True)],
)
def test_is_connected(smiles, solution):
connected_smi = is_connected(smiles)
assert solution == connected_smi
@pytest.mark.parametrize(
"smiles, max_duplication, solution",
[
("Csab", 3, None),
("CO", -1, ["CO"]),
("CCC", 300, ["CCC", "C(C)C"]),
],
)
def test_smiles_to_max_random(smiles, max_duplication, solution):
ran_max_smi = smiles_to_max_random(smiles, max_duplication)
assert solution == ran_max_smi
@pytest.mark.parametrize(
"smiles, control_function, solution",
[
(["CCC", "CCC"], lambda x: 1, ["CCC"]),
(["CCC", "CCC"], lambda x: x, ["CCC", "CCC"]),
(["CCC", "CCC", "C(C)C", "C(C)C", "C(C)C"], lambda x: 1, ["CCC", "C(C)C"]),
(["CCC", "CCC", "C(C)C"], lambda x: x, ["CCC", "CCC", "C(C)C"]),
(["CCC", "CCC", "C(C)C"], lambda x: x / 2, ["CCC", "C(C)C"]),
],
)
def test_control_smiles_duplication(smiles, control_function, solution):
controlled_duplicates = control_smiles_duplication(
smiles, duplicate_control=control_function
)
assert solution == controlled_duplicates
@pytest.mark.parametrize(
"smiles, solution", [("c1ccccc1", ["[C][=C][C][=C][C][=C][Ring1][=Branch1]"])]
)
def test_smiles_to_selfies(smiles, solution):
selfies = smiles_to_selfies(smiles)
assert solution == selfies
@pytest.mark.parametrize(
"smiles, solution", [("c1cccc(C(=O)Cl)c1", ["cccccC=O)Cl))c6"])]
)
def test_smiles_to_deepsmiles(smiles, solution):
deepsmiles = smiles_to_deepsmiles(smiles)
assert solution == deepsmiles
@pytest.mark.parametrize(
"smiles, solution",
[("C", 1), ("OC", 2), ("KCahsbl", None)],
)
def test_get_num_heavy_atoms(smiles, solution):
num_heavy_atoms = get_num_heavy_atoms(smiles)
assert solution == num_heavy_atoms
@pytest.mark.parametrize(
"smiles, solution",
[
("CCC", "CCC"),
],
)
def test_validity_check(smiles, solution):
result = validity_check(smiles)
assert solution == result
def test_validity_check_exception():
with pytest.raises(Exception):
assert validity_check("CC111C")
@pytest.mark.parametrize(
"smiles, solution",
[("CCC%2F%5C", "CCC/\\"), ("%2A%2A", "**")],
)
def test_smiles_from_folder_name(smiles, solution):
new_smiles = smiles_from_folder_name(smiles)
assert solution == new_smiles
@pytest.mark.parametrize(
"smiles, solution",
[
("CCC/c\\c", "CCC%2Fc%5Cc"),
],
)
def test_smiles_to_folder_name(smiles, solution):
smiles = smiles_to_folder_name(smiles)
assert solution == smiles
@pytest.mark.parametrize(
"smiles, solution",
[
("C", numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])),
],
)
def test_smiles_to_morgan_fingerprint(smiles, solution):
smiles = smiles_to_morgan_fingerprint(smiles, nbits=10)
assert (solution == smiles).all()
| [
"maxsmi.utils_smiles.smiles_to_random",
"maxsmi.utils_smiles.smiles_to_morgan_fingerprint",
"maxsmi.utils_smiles.smiles_to_max_random",
"maxsmi.utils_smiles.validity_check",
"maxsmi.utils_smiles.smiles_to_deepsmiles",
"maxsmi.utils_smiles.smiles_from_folder_name",
"maxsmi.utils_smiles.get_num_heavy_atom... | [((679, 774), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, solution"""', "[('C', 'C'), ('OC', 'CO'), ('KCahsbl', None)]"], {}), "('smiles, solution', [('C', 'C'), ('OC', 'CO'), (\n 'KCahsbl', None)])\n", (702, 774), False, 'import pytest\n'), ((917, 1044), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, int_aug, solution"""', "[('C', 3, ['C', 'C', 'C']), ('sakjncal', 3, None), ('OC', 0, ['OC'])]"], {}), "('smiles, int_aug, solution', [('C', 3, ['C', 'C',\n 'C']), ('sakjncal', 3, None), ('OC', 0, ['OC'])])\n", (940, 1044), False, 'import pytest\n'), ((1308, 1383), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, solution"""', "[('C.', False), ('CCC', True)]"], {}), "('smiles, solution', [('C.', False), ('CCC', True)])\n", (1331, 1383), False, 'import pytest\n'), ((1517, 1654), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, max_duplication, solution"""', "[('Csab', 3, None), ('CO', -1, ['CO']), ('CCC', 300, ['CCC', 'C(C)C'])]"], {}), "('smiles, max_duplication, solution', [('Csab', 3,\n None), ('CO', -1, ['CO']), ('CCC', 300, ['CCC', 'C(C)C'])])\n", (1540, 1654), False, 'import pytest\n'), ((1861, 2231), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, control_function, solution"""', "[(['CCC', 'CCC'], lambda x: 1, ['CCC']), (['CCC', 'CCC'], lambda x: x, [\n 'CCC', 'CCC']), (['CCC', 'CCC', 'C(C)C', 'C(C)C', 'C(C)C'], lambda x: 1,\n ['CCC', 'C(C)C']), (['CCC', 'CCC', 'C(C)C'], lambda x: x, ['CCC', 'CCC',\n 'C(C)C']), (['CCC', 'CCC', 'C(C)C'], lambda x: x / 2, ['CCC', 'C(C)C'])]"], {}), "('smiles, control_function, solution', [(['CCC',\n 'CCC'], lambda x: 1, ['CCC']), (['CCC', 'CCC'], lambda x: x, ['CCC',\n 'CCC']), (['CCC', 'CCC', 'C(C)C', 'C(C)C', 'C(C)C'], lambda x: 1, [\n 'CCC', 'C(C)C']), (['CCC', 'CCC', 'C(C)C'], lambda x: x, ['CCC', 'CCC',\n 'C(C)C']), (['CCC', 'CCC', 'C(C)C'], lambda x: x / 2, ['CCC', 'C(C)C'])])\n", (1884, 2231), False, 'import pytest\n'), ((2507, 2615), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, solution"""', "[('c1ccccc1', ['[C][=C][C][=C][C][=C][Ring1][=Branch1]'])]"], {}), "('smiles, solution', [('c1ccccc1', [\n '[C][=C][C][=C][C][=C][Ring1][=Branch1]'])])\n", (2530, 2615), False, 'import pytest\n'), ((2737, 2831), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, solution"""', "[('c1cccc(C(=O)Cl)c1', ['cccccC=O)Cl))c6'])]"], {}), "('smiles, solution', [('c1cccc(C(=O)Cl)c1', [\n 'cccccC=O)Cl))c6'])])\n", (2760, 2831), False, 'import pytest\n'), ((2965, 3055), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, solution"""', "[('C', 1), ('OC', 2), ('KCahsbl', None)]"], {}), "('smiles, solution', [('C', 1), ('OC', 2), (\n 'KCahsbl', None)])\n", (2988, 3055), False, 'import pytest\n'), ((3202, 3263), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, solution"""', "[('CCC', 'CCC')]"], {}), "('smiles, solution', [('CCC', 'CCC')])\n", (3225, 3263), False, 'import pytest\n'), ((3516, 3609), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, solution"""', "[('CCC%2F%5C', 'CCC/\\\\'), ('%2A%2A', '**')]"], {}), "('smiles, solution', [('CCC%2F%5C', 'CCC/\\\\'), (\n '%2A%2A', '**')])\n", (3539, 3609), False, 'import pytest\n'), ((3754, 3828), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""smiles, solution"""', "[('CCC/c\\\\c', 'CCC%2Fc%5Cc')]"], {}), "('smiles, solution', [('CCC/c\\\\c', 'CCC%2Fc%5Cc')])\n", (3777, 3828), False, 'import pytest\n'), ((849, 876), 'maxsmi.utils_smiles.smiles_to_canonical', 'smiles_to_canonical', (['smiles'], {}), '(smiles)\n', (868, 876), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((1121, 1154), 'maxsmi.utils_smiles.smiles_to_random', 'smiles_to_random', (['smiles', 'int_aug'], {}), '(smiles, int_aug)\n', (1137, 1154), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((1456, 1476), 'maxsmi.utils_smiles.is_connected', 'is_connected', (['smiles'], {}), '(smiles)\n', (1468, 1476), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((1777, 1822), 'maxsmi.utils_smiles.smiles_to_max_random', 'smiles_to_max_random', (['smiles', 'max_duplication'], {}), '(smiles, max_duplication)\n', (1797, 1822), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((2374, 2444), 'maxsmi.utils_smiles.control_smiles_duplication', 'control_smiles_duplication', (['smiles'], {'duplicate_control': 'control_function'}), '(smiles, duplicate_control=control_function)\n', (2400, 2444), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((2677, 2702), 'maxsmi.utils_smiles.smiles_to_selfies', 'smiles_to_selfies', (['smiles'], {}), '(smiles)\n', (2694, 2702), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((2899, 2927), 'maxsmi.utils_smiles.smiles_to_deepsmiles', 'smiles_to_deepsmiles', (['smiles'], {}), '(smiles)\n', (2919, 2927), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((3132, 3159), 'maxsmi.utils_smiles.get_num_heavy_atoms', 'get_num_heavy_atoms', (['smiles'], {}), '(smiles)\n', (3151, 3159), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((3346, 3368), 'maxsmi.utils_smiles.validity_check', 'validity_check', (['smiles'], {}), '(smiles)\n', (3360, 3368), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((3685, 3716), 'maxsmi.utils_smiles.smiles_from_folder_name', 'smiles_from_folder_name', (['smiles'], {}), '(smiles)\n', (3708, 3716), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((3918, 3947), 'maxsmi.utils_smiles.smiles_to_folder_name', 'smiles_to_folder_name', (['smiles'], {}), '(smiles)\n', (3939, 3947), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((4175, 4221), 'maxsmi.utils_smiles.smiles_to_morgan_fingerprint', 'smiles_to_morgan_fingerprint', (['smiles'], {'nbits': '(10)'}), '(smiles, nbits=10)\n', (4203, 4221), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((1237, 1261), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1250, 1261), False, 'import pytest\n'), ((1278, 1304), 'maxsmi.utils_smiles.smiles_to_random', 'smiles_to_random', (['"""OC"""', '(-1)'], {}), "('OC', -1)\n", (1294, 1304), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((3447, 3471), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3460, 3471), False, 'import pytest\n'), ((3488, 3512), 'maxsmi.utils_smiles.validity_check', 'validity_check', (['"""CC111C"""'], {}), "('CC111C')\n", (3502, 3512), False, 'from maxsmi.utils_smiles import smiles_to_canonical, smiles_to_random, smiles_to_max_random, is_connected, smiles_to_selfies, smiles_to_deepsmiles, control_smiles_duplication, get_num_heavy_atoms, validity_check, smiles_from_folder_name, smiles_to_folder_name, smiles_to_morgan_fingerprint\n'), ((4050, 4093), 'numpy.array', 'numpy.array', (['[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (4061, 4093), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import initializers, regularizers
from keras.engine.topology import InputSpec
from keras import backend as K
import numpy as np
import warnings
from ..capsule import Module
from ..utils.caps_utils import mixed_shape
from ..utils import dissimilarity_funcs as diss_funcs
from LVQ import constraints
from ..utils import pre_training
class PointPrototype(Module):
def __init__(self,
prototype_initializer='TruncatedNormal',
prototype_regularizer=None,
prototype_constraint=None, # That's similar to a normalization.
signal_output='signals',
linear_factor=0.5, # linear factor (None --> always add) prev_diss*(1-alpha) + alpha*curr_diss
signal_regularizer=None,
diss_regularizer=None,
**kwargs):
if linear_factor is None or (isinstance(linear_factor, float) and 0 < linear_factor < 1):
self.linear_factor = linear_factor
else:
raise TypeError("The linear factor for the dissimilarity value must be None or a float in the range of "
"(0,1). You provide: " + str(linear_factor))
self.prototype_initializer = initializers.get(prototype_initializer)
self.prototype_regularizer = regularizers.get(prototype_regularizer)
self.prototype_constraint = constraints.get(prototype_constraint)
self.prototypes = None
self.signal_output = signal_output
self.output_regularizers = [regularizers.get(signal_regularizer),
regularizers.get(diss_regularizer)]
# be sure to call this at the end
super(PointPrototype, self).__init__(module_input=True,
module_output=True,
support_sparse_signal=True,
support_full_signal=True,
**self._del_module_args(**kwargs))
def _build(self, input_shape):
if not self.built:
if input_shape[0][1] != self.capsule_number:
raise ValueError('The capsule number provided by input_shape is not equal the self.capsule_number: '
'input_shape[0][1]=' + str(input_shape[0][1]) + ' != ' +
'self.capsule_number=' + str(self.capsule_number))
if input_shape[1][1] != self.proto_number:
raise ValueError('The prototype number provided by input_shape is not equal the self.proto_number: '
'input_shape[1][1]=' + str(input_shape[1][1]) + ' != ' +
'self.proto_number=' + str(self.proto_number))
# the signal dimension is input_shape[0][3:]
self.prototypes = self.add_weight(shape=(self.proto_number,) + tuple(input_shape[0][3:]),
initializer=self.prototype_initializer,
regularizer=self.prototype_regularizer,
constraint=self.prototype_constraint,
name='prototypes')
self.input_spec = [InputSpec(shape=(None,) + tuple(input_shape[0][1:])),
InputSpec(shape=(None,) + tuple(input_shape[1][1:]))]
def _build_sparse(self, input_shape):
if self.signal_output != 'signals':
raise ValueError('If the signal is sparse, the signal_output must be `signals`.')
# manipulate input_shape to call the full build method instead of a new implementation
signal_shape = list(input_shape[0])
signal_shape[1] = self.capsule_number
self._build([tuple(signal_shape), input_shape[1]])
self.input_spec = [InputSpec(shape=(None,) + tuple(input_shape[0][1:])),
InputSpec(shape=(None,) + tuple(input_shape[1][1:]))]
def _call(self, inputs, **kwargs):
# signals_shape input: batch x capsule_number x channels x dim1 x dim2 x ... x dimN
# after measuring the signal is (output): batch x proto_number x channels x dim1 x dim2 x ... x dimN
signals = inputs[0]
diss0 = inputs[1]
if self.sparse_signal:
signals, diss1 = self.distance_sparse(signals)
else:
signals, diss1 = self.distance(signals)
with K.name_scope('dissimilarity_update'):
if self.linear_factor is None:
diss = diss0 + diss1
else:
diss = (1 - self.linear_factor) * diss0 + self.linear_factor * diss1
return {0: signals, 1: diss}
def _call_sparse(self, inputs, **kwargs):
if self.signal_output != 'signals':
raise ValueError('If the signal is sparse, the signal_output must be `signals`.')
return self._call(inputs, **kwargs)
def distance(self, signals):
raise NotImplementedError
def distance_sparse(self, signals):
raise NotImplementedError
def _compute_output_shape(self, input_shape):
signal_shape = list(input_shape[0])
diss_shape = list(input_shape[1])
signal_shape[1] = self.proto_number
return [tuple(signal_shape), tuple(diss_shape)]
def _compute_output_shape_sparse(self, input_shape):
if self.signal_output != 'signals':
raise ValueError('If the signal is sparse, the signal_output must be `signals`.')
return input_shape
def get_config(self):
config = {'prototype_initializer': initializers.serialize(self.prototype_initializer),
'prototype_regularizer': regularizers.serialize(self.prototype_regularizer),
'prototype_constraint': constraints.serialize(self.prototype_constraint),
'signal_output': self.signal_output,
'linear_factor': self.linear_factor,
'signal_regularizer': regularizers.serialize(self.output_regularizers[0]),
'diss_regularizer': regularizers.serialize(self.output_regularizers[1])}
super_config = super(PointPrototype, self).get_config()
return dict(list(super_config.items()) + list(config.items()))
def pre_training(self,
x_train,
y_train=None, # class labels [0,..,n], if given assume class correspondence and init respectively
capsule_inputs_are_equal=False, # use just the first capsule channels; ignored if sparse
batch_version=False,
**kmeans_params): # params of kMeans or batch kMeans of sklearn
# overwrite flag if sparse
if self.sparse_signal:
capsule_inputs_are_equal = True
try:
self._is_callable()
except ValueError:
raise ValueError("The module " + self.name + " is not proper initialized for pre-training. Be sure that you"
" assigned the module to a capsule and built it by calling the capsule.")
if self.input_spec[0].shape[1:] != x_train.shape[1:]:
raise ValueError("The shape of x_train must be equal to the assumed input shape. You provide: "
+ str(x_train.shape) + "; we expect: " + str(self.input_spec[0].shape))
if y_train is not None:
# convert y_train to non categorical
if y_train.ndim != 1:
y_train = np.argmax(y_train, 1)
# check if all classes are present
if list(np.unique(y_train)) != list(range(self.capsule_number)):
raise ValueError("We detected that not each class is represented in the training data. Be sure that "
"for each class, at least one sample is given.")
if y_train.shape[0] != x_train.shape[0]:
raise ValueError("The number of samples in y_train and x_train must be equal. You provide: y_train="
+ str(y_train.shape[0]) + " is not equal x_train=" + str(x_train.shape[0]) + ".")
centers, _, _ = _pre_train_prototypes(x_train,
y_train,
capsule_inputs_are_equal,
batch_version,
self._proto_distrib,
**kmeans_params)
# we apply constraint here, because pre-training is like an optimization step
if self.prototype_constraint is not None:
centers = K.eval(self.prototype_constraint(K.variable(centers)))
self.set_weights([centers] + self.get_weights()[1:])
# definition of prototype which consist of a data point and a matrix
class PointMatrixPrototype(PointPrototype):
def __init__(self,
prototype_initializer='TruncatedNormal',
prototype_regularizer=None,
prototype_constraint=None, # That's similar to a normalization.
matrix_initializer='TruncatedNormal',
matrix_regularizer=None,
matrix_constraint=None, # That's similar to a normalization.
matrix_scope='local', # local, global or capsule_wise
projected_atom_shape=None,
signal_output='signals',
linear_factor=0.5, # linear factor (None --> always add) prev_diss*(1-alpha) + alpha*curr_diss
signal_regularizer=None,
diss_regularizer=None,
**kwargs):
super(PointMatrixPrototype, self).__init__(prototype_initializer=prototype_initializer,
prototype_regularizer=prototype_regularizer,
prototype_constraint=prototype_constraint,
signal_output=signal_output,
linear_factor=linear_factor,
signal_regularizer=signal_regularizer,
diss_regularizer=diss_regularizer,
**kwargs)
# scope: local, global, capsule_wise (defines the apply scope of the transformation)
if matrix_scope in ('local', 'global', 'capsule_wise'):
self.matrix_scope = matrix_scope
else:
raise ValueError("scope must be 'local', 'global' or 'capsule_wise': You provide: " + str(matrix_scope))
if isinstance(projected_atom_shape, (list, tuple)):
self._projected_atom_shape = tuple(projected_atom_shape)
elif isinstance(projected_atom_shape, int):
self._projected_atom_shape = (projected_atom_shape,)
elif projected_atom_shape is None:
self._projected_atom_shape = projected_atom_shape
else:
raise ValueError("projected_atom_shape must be list, tuple, int or None. You provide: " +
str(projected_atom_shape))
self.projected_atom_shape = None
self.matrix_initializer = initializers.get(matrix_initializer)
self.matrix_regularizer = regularizers.get(matrix_regularizer)
self.matrix_constraint = constraints.get(matrix_constraint)
self.matrices = None
self.input_output_shape_equal = ('signals', 'protos')
def _build(self, input_shape):
if not self.built:
# compute shape of matrix_shape
if self.matrix_scope == 'local':
self._num_maps = (self.proto_number,)
elif self.matrix_scope == 'global':
self._num_maps = ()
else: # capsule_wise
self._num_maps = (self.capsule_number,)
super(PointMatrixPrototype, self)._build(input_shape)
if self._projected_atom_shape is None:
self.projected_atom_shape = self._compute_output_shape(input_shape)[0][3:]
else:
self.projected_atom_shape = self._projected_atom_shape
matrix_shape = self._num_maps + (np.prod(np.array(input_shape[0][3:], dtype=int)),
np.prod(np.array(self.projected_atom_shape, dtype=int)))
# transposed matrix (we compute x^T * A^T instead of A * x, because the signal is always given in the form
# x^T)
self.matrices = self.add_weight(shape=matrix_shape,
initializer=self.matrix_initializer,
regularizer=self.matrix_regularizer,
constraint=self.matrix_constraint,
name='matrices')
def _compute_output_shape(self, input_shape):
signal_shape = list(input_shape[0])
diss_shape = input_shape[1]
signal_shape[1] = self.proto_number
if self.signal_output in self.input_output_shape_equal:
return [signal_shape, diss_shape]
else:
if not self.built:
if self._projected_atom_shape is None:
projected_atom_shape = tuple(input_shape[0][3:])
else:
projected_atom_shape = self._projected_atom_shape
return [tuple(signal_shape[0:3]) + projected_atom_shape, diss_shape]
else:
return [tuple(signal_shape[0:3]) + self.projected_atom_shape, diss_shape]
def get_config(self):
config = {'matrix_initializer': initializers.serialize(self.matrix_initializer),
'matrix_regularizer': regularizers.serialize(self.matrix_regularizer),
'matrix_constraint': constraints.serialize(self.matrix_constraint),
'matrix_scope': self.matrix_scope,
'projected_atom_shape': self.projected_atom_shape}
super_config = super(PointMatrixPrototype, self).get_config()
return dict(list(super_config.items()) + list(config.items()))
def pre_training(self,
x_train,
y_train=None, # class labels [0,..,n], if given assume class correspondence and init respectively
capsule_inputs_are_equal=False, # use just the first capsule channels
batch_version=False,
**sk_params):
# sk_params: {'kmeans_params': dict,
# 'svd_params': dict}
# kmeans_params ... params dict of kmeans_params or kmeans_params_batch; Note: in the case of a memory error the
# method switches automatically to the batch version. In this case be sure that all parameters are accept by the
# batch version.
# svd_params ... params of svd_params
# Todo: test that svd works correct (full rank and sparse rank)
# overwrite flag if sparse
if self.sparse_signal:
capsule_inputs_are_equal = True
if hasattr(sk_params, 'kmeans_params'):
kmeans_params = sk_params['kmeans_params']
if not isinstance(kmeans_params, dict):
raise TypeError("The type of kmeans_params parameters in sk_params must be dict.")
del sk_params['kmeans_params']
else:
kmeans_params = {}
if hasattr(sk_params, 'svd_params'):
svd_params = sk_params['svd_params']
if not isinstance(svd_params, dict):
raise TypeError("The type of the svd_params parameters in sk_params must be dict.")
del sk_params['svd_params']
else:
svd_params = {}
try:
self._is_callable()
except ValueError:
raise ValueError("The module " + self.name + " is not proper initialized for pre-training. Be sure that you"
" assigned the module to a capsule and built it by calling the capsule.")
if self.input_spec[0].shape[1:] != x_train.shape[1:]:
raise ValueError("The shape of x_train must be equal to the assumed input shape. You provide: "
+ str(x_train.shape) + "; we expect: " + str(self.input_spec[0].shape))
if y_train is not None:
# convert y_train to non categorical
if y_train.ndim != 1:
y_train = np.argmax(y_train, 1)
# check if all classes are present
if list(np.unique(y_train)) != list(range(self.capsule_number)):
raise ValueError("We detected that not each class is represented in the training data. Be sure that "
"for each class, at least one sample is given.")
if y_train.shape[0] != x_train.shape[0]:
raise ValueError("The number of samples in y_train and x_train must be equal. You provide: y_train="
+ str(y_train.shape[0]) + " is not equal x_train=" + str(x_train.shape[0]) + ".")
centers, labels, clusters = _pre_train_prototypes(x_train,
y_train,
capsule_inputs_are_equal,
batch_version,
self._proto_distrib,
**kmeans_params)
# we apply constraint here, because pre-training is like an optimization step
if self.prototype_constraint is not None:
centers = K.eval(self.prototype_constraint(K.variable(centers)))
# transform clusters in accordance to the matrix scope
if self.matrix_scope == 'global':
clusters = [np.reshape(np.concatenate(clusters, 0), [-1, np.prod(self.input_spec[0].shape[3:], dtype=int)])]
elif self.matrix_scope == 'local':
clusters_ = []
for i in range(len(clusters)):
for l in range(max(labels[i])+1):
cluster_flatten = np.reshape(clusters[i], [-1, np.prod(self.input_spec[0].shape[3:], dtype=int)])
clusters_.append(cluster_flatten[labels[i] == l, :])
clusters = clusters_
else:
clusters_ = []
for i in range(len(clusters)):
clusters_.append(np.reshape(clusters[i], [-1, np.prod(self.input_spec[0].shape[3:], dtype=int)]))
clusters = clusters_
# center the clusters
for i in range(len(clusters)):
clusters[i] = clusters[i] - np.mean(clusters[i], axis=0)
if K.int_shape(self.matrices)[-1] > K.int_shape(self.matrices)[-2]:
warnings.warn("Can't pre-train matrices if the projection shape (" + str(K.int_shape(self.matrices)[-1]) +
") is higher than the input shape (" + str(K.int_shape(self.matrices)[-2]) + "). Skip " +
"pre-training of matrices and use the current parameters.")
self.set_weights([centers, self.get_weights()[1]])
else:
n_components = np.prod(self.projected_atom_shape, dtype=int)
matrices, valid = pre_training.svd(clusters, n_components=n_components, **svd_params)
old_matrices = self.get_weights()[1]
warning_sent = False
for i in range(len(matrices)):
if not valid[i]:
if not warning_sent:
warnings.warn("Some of the matrices were not be pre-tarined successfully. The old matrix "
"instantiation is reused and the pre-trained matrix is skipped. One reason for "
"that could be that your cluster doesn't contain enough points regarding your "
"matrix dimension. If you are not pre-training on the whole dataset try to "
"increase the number of the pre-training batch.")
warning_sent = True
matrices[i] = old_matrices[i]
if self.matrix_scope == 'global':
matrices = matrices[0]
else:
matrices = np.stack(matrices, axis=0)
# we apply constraint here, because pre-training is like an optimization step
if self.matrix_constraint is not None:
matrices = K.eval(self.matrix_constraint(K.variable(matrices)))
self.set_weights([centers, matrices] + self.get_weights()[2:])
class MinkowskiDistance(PointPrototype):
def __init__(self,
order_p=2, # could be np.inf
prototype_initializer='TruncatedNormal',
prototype_regularizer=None,
prototype_constraint=None, # That's similar to a normalization.
signal_output='signals', # what should be send to the output (signals, protos, params)
epsilon=K.epsilon(), # used for stabilization of the sqrt
squared_dissimilarity=False, # output the squared distance (without reciprocal power)
linear_factor=0.5, # linear factor (None --> always add) prev_diss*(1-alpha) + alpha*curr_diss
signal_regularizer=None,
diss_regularizer=None,
**kwargs):
valid_signal_output = ('signals', 'protos')
if signal_output not in valid_signal_output:
raise ValueError("signal_output must be in " + str(valid_signal_output) + ". You provide: "
+ str(signal_output))
if isinstance(order_p, (int, float)) or order_p == np.inf:
if order_p > 0:
self.order_p = order_p
else:
raise ValueError("The order p of the Minkowski distance must be greater than 0. You provide: "
+ str(order_p))
else:
raise TypeError("order_p must be int float or numpy.inf. You provide: " + str(order_p))
self.squared_dissimilarity = squared_dissimilarity
self.epsilon = epsilon
super(MinkowskiDistance, self).__init__(prototype_initializer=prototype_initializer,
prototype_regularizer=prototype_regularizer,
prototype_constraint=prototype_constraint,
signal_output=signal_output,
linear_factor=linear_factor,
signal_regularizer=signal_regularizer,
diss_regularizer=diss_regularizer,
**kwargs)
def distance(self, signals):
# Todo: make test with and without sqrt by this simple maximum stabilization if it works think about more
# complex implementations for stabilization (test convergence behavior with sqrt and w/o)
# tile capsule regarding proto distribution
if self.proto_number != self.capsule_number:
with K.name_scope('signal_preprocessing'):
# vector_shape for permute commands
vector_shape = list(range(3, self.input_spec[0].ndim))
signals = K.gather(K.permute_dimensions(signals, [1, 0, 2] + vector_shape),
self._capsule_extension)
signals = K.permute_dimensions(signals, [1, 0, 2] + vector_shape)
diss = diss_funcs.minkowski_distance(signals=signals,
protos=self.prototypes,
order_p=self.order_p,
squared=self.squared_dissimilarity,
epsilon=self.epsilon)
with K.name_scope('get_signals'):
if self.signal_output == 'protos':
signal_shape = mixed_shape(signals)
signals = K.tile(K.expand_dims(K.expand_dims(self.prototypes, 1), 0),
[K.shape(signals)[0], 1, signal_shape[2]]
+ list(np.ones((self.input_spec[0].ndim - 3,), dtype=int)))
else:
signals = signals
return signals, diss
def distance_sparse(self, signals):
diss = diss_funcs.minkowski_distance(signals=signals,
protos=self.prototypes,
order_p=self.order_p,
squared=self.squared_dissimilarity,
epsilon=self.epsilon)
return signals, diss
def get_config(self):
config = {'order_p': self.order_p,
'squared_dissimilarity': self.squared_dissimilarity,
'epsilon': self.epsilon}
super_config = super(MinkowskiDistance, self).get_config()
return dict(list(super_config.items()) + list(config.items()))
class TangentDistance(PointMatrixPrototype):
def __init__(self,
projected_atom_shape,
prototype_initializer='TruncatedNormal',
prototype_regularizer=None,
prototype_constraint=None,
matrix_initializer='TruncatedNormal',
matrix_regularizer=None,
matrix_scope='local', # local, global or capsule_wise
signal_output='signals', # what should be send to the output (signals, protos, projected_signals, projected_protos )
epsilon=K.epsilon(),
squared_dissimilarity=False,
linear_factor=0.5, # linear factor (None --> always add) prev_diss*(1-alpha) + alpha*curr_diss
signal_regularizer=None,
diss_regularizer=None,
**kwargs):
valid_signal_output = ('signals', 'projected_signals', 'parameterized_signals')
if signal_output not in valid_signal_output:
raise ValueError("signal_output must be in " + str(valid_signal_output) + ". You provide: "
+ str(signal_output))
self.squared_dissimilarity = squared_dissimilarity
self.epsilon = epsilon
if projected_atom_shape is None:
raise ValueError("projected_atom_shape must be unequal None.")
if 'matrix_constraint' in kwargs:
del kwargs['matrix_constraint']
super(TangentDistance, self).__init__(prototype_initializer=prototype_initializer,
prototype_regularizer=prototype_regularizer,
prototype_constraint=prototype_constraint,
matrix_initializer=matrix_initializer,
matrix_regularizer=matrix_regularizer,
matrix_constraint='orthogonalization',
matrix_scope=matrix_scope,
projected_atom_shape=projected_atom_shape,
signal_output=signal_output,
linear_factor=linear_factor,
signal_regularizer=signal_regularizer,
diss_regularizer=diss_regularizer,
**kwargs)
self.input_output_shape_equal = ('signals', 'projected_signals')
def _build(self, input_shape):
super(TangentDistance, self)._build(input_shape)
if np.prod(input_shape[0][3:]) <= np.prod(self.projected_atom_shape):
raise ValueError("The dimension of the projected shape must be lower than input_shape. You "
"provide: np.prod(signal_shape[3:])=" + str(np.prod(input_shape[0][3:])) + " < " +
"np.prod(self.projected_atom_shape)=" + str(np.prod(self.projected_atom_shape)))
def distance(self, signals):
# tile capsule regarding proto distribution
if self.proto_number != self.capsule_number:
with K.name_scope('signal_preprocessing'):
# vector_shape for permute commands
vector_axes = list(range(3, self.input_spec[0].ndim))
signals = K.gather(K.permute_dimensions(signals, [1, 0, 2] + vector_axes),
self._capsule_extension)
signals = K.permute_dimensions(signals, [1, 0, 2] + vector_axes)
if self.matrix_scope == 'capsule_wise':
with K.name_scope('subspace_preprocessing'):
matrices = K.gather(self.matrices, self._capsule_extension)
else:
matrices = self.matrices
diss = diss_funcs.tangent_distance(signals=signals,
protos=self.prototypes,
subspaces=matrices,
squared=self.squared_dissimilarity,
epsilon=self.epsilon)
with K.name_scope('get_signals'):
if self.signal_output == 'signals':
signals = signals
elif self.signal_output == 'projected_signals':
signals = self._get_projected_signals(signals, matrices)
else: # 'parameterized_signals'
signals = self._get_parameterized_signals(signals, matrices)
return signals, diss
def distance_sparse(self, signals):
if self.matrix_scope == 'capsule_wise':
with K.name_scope('subspace_preprocessing'):
matrices = K.gather(self.matrices, self._capsule_extension)
else:
matrices = self.matrices
diss = diss_funcs.tangent_distance(signals=signals,
protos=self.prototypes,
subspaces=matrices,
squared=self.squared_dissimilarity,
epsilon=self.epsilon)
return signals, diss
def get_projected_signals(self, signals):
if self.matrix_scope == 'capsule_wise':
with K.name_scope('matrix_preprocessing'):
matrices = K.gather(self.matrices, self._capsule_extension)
else:
matrices = self.matrices
return self._get_projected_signals(signals, matrices)
# affine_projection: w + W * W.T * (v - w)
def _get_projected_signals(self, signals, matrices):
# signal must be of shape (batch x protos x channels x dim1 x ... x dimN
with K.name_scope('get_projected_signals'):
signal_shape = mixed_shape(signals)
atom_axes = list(range(3, len(signal_shape)))
signals = K.permute_dimensions(signals, [0, 2, 1] + atom_axes)
diff = signals - self.prototypes
if self.matrix_scope == 'global':
with K.name_scope('projector'):
projector = K.dot(matrices, K.transpose(matrices))
with K.name_scope('tangentspace_projections'):
diff = K.reshape(diff, (signal_shape[0] * signal_shape[2], signal_shape[1], -1))
projected_diff = K.dot(diff, projector)
projected_diff = K.reshape(projected_diff,
(signal_shape[0], signal_shape[2], signal_shape[1]) + signal_shape[3:])
matching_protos = self.prototypes + projected_diff
matching_protos = K.permute_dimensions(matching_protos, [0, 2, 1] + atom_axes)
else: # local or capsule_wise
with K.name_scope('projector'):
projector = K.batch_dot(matrices, matrices, [2, 2])
with K.name_scope('tangentspace_projections'):
diff = K.reshape(diff, (signal_shape[0] * signal_shape[2], signal_shape[1], -1))
diff = K.permute_dimensions(diff, [1, 0, 2])
projected_diff = K.batch_dot(diff, projector)
projected_diff = K.permute_dimensions(projected_diff, [1, 0, 2])
projected_diff = K.reshape(projected_diff,
(signal_shape[0], signal_shape[2], signal_shape[1]) + signal_shape[3:])
matching_protos = self.prototypes + projected_diff
matching_protos = K.permute_dimensions(matching_protos, [0, 2, 1] + atom_axes)
return matching_protos
def get_parameterized_signals(self, signals):
if self.matrix_scope == 'capsule_wise':
with K.name_scope('matrix_preprocessing'):
matrices = K.gather(self.matrices, self._capsule_extension)
else:
matrices = self.matrices
return self._get_parameterized_signals(signals, matrices)
# params: W.T * (v - w)
def _get_parameterized_signals(self, signals, matrices):
# signal must be of shape (batch x protos x channels x dim1 x ... x dimN
with K.name_scope('get_projected_signals'):
signal_shape = mixed_shape(signals)
atom_axes = list(range(3, len(signal_shape)))
signals = K.permute_dimensions(signals, [0, 2, 1] + atom_axes)
diff = signals - self.prototypes
if self.matrix_scope == 'global':
with K.name_scope('tangentspace_parameters'):
diff = K.reshape(diff, (signal_shape[0] * signal_shape[2], signal_shape[1], -1))
params = K.dot(diff, matrices)
params = K.reshape(params,
(signal_shape[0], signal_shape[2], signal_shape[1]) + self.projected_atom_shape)
params = K.permute_dimensions(params, [0, 2, 1] + atom_axes)
else: # local or capsule_wise
with K.name_scope('tangentspace_parameters'):
diff = K.reshape(diff, (signal_shape[0] * signal_shape[2], signal_shape[1], -1))
diff = K.permute_dimensions(diff, [1, 0, 2])
params = K.batch_dot(diff, matrices)
params = K.reshape(params,
(signal_shape[1], signal_shape[0], signal_shape[2]) + self.projected_atom_shape)
params = K.permute_dimensions(params, [1, 0, 2] + atom_axes)
return params
def get_config(self):
config = {'squared_dissimilarity': self.squared_dissimilarity,
'epsilon': self.epsilon}
super_config = super(TangentDistance, self).get_config()
return dict(list(super_config.items()) + list(config.items()))
def _pre_train_prototypes(x_train,
y_train, # non-categorical
capsule_inputs_are_equal,
batch_version,
proto_distrib,
**kmeans_params):
# signals_shape: samples x capsule_number x channels x dim1 x dim2 x ... x dimN
signal_shape = x_train.shape[3:]
proto_number = sum([len(x) for x in proto_distrib])
x_trains = []
# all the cases for k-means computation
if y_train is None and capsule_inputs_are_equal:
# samples x dim1 x dim2 x ... x dimN
x = x_train[:, 0, :]
n_clusters = proto_number
x = np.reshape(x, (-1, np.prod(signal_shape)))
x_trains.append(x)
else:
n_clusters = []
for i, p in enumerate(proto_distrib):
n_clusters.append(len(p))
if y_train is None and not capsule_inputs_are_equal:
# capsule_number x samples x dim1 x dim2 x ... x dimN
x = x_train[:, i, :]
elif y_train is not None and capsule_inputs_are_equal:
# samples x channels x dim1 x dim2 x ... x dimN
x = x_train[y_train == i, 0, :]
else:
# samples x capsule_number x channels x dim1 x dim2 x ... x dimN
x = x_train[y_train == i, i, :]
x = np.reshape(x, (-1, np.prod(signal_shape)))
x_trains.append(x)
centers, labels = pre_training.kmeans(x_trains, n_clusters, batch_version, **kmeans_params)
centers = np.concatenate(centers, 0)
# reshape back to real shape
centers = np.reshape(centers, (proto_number, ) + signal_shape)
return centers, labels, x_trains
| [
"numpy.prod",
"keras.backend.shape",
"keras.backend.reshape",
"LVQ.constraints.serialize",
"numpy.array",
"keras.backend.dot",
"numpy.mean",
"numpy.reshape",
"LVQ.constraints.get",
"keras.initializers.serialize",
"numpy.stack",
"keras.backend.transpose",
"numpy.concatenate",
"keras.backend... | [((37063, 37089), 'numpy.concatenate', 'np.concatenate', (['centers', '(0)'], {}), '(centers, 0)\n', (37077, 37089), True, 'import numpy as np\n'), ((37137, 37188), 'numpy.reshape', 'np.reshape', (['centers', '((proto_number,) + signal_shape)'], {}), '(centers, (proto_number,) + signal_shape)\n', (37147, 37188), True, 'import numpy as np\n'), ((1373, 1412), 'keras.initializers.get', 'initializers.get', (['prototype_initializer'], {}), '(prototype_initializer)\n', (1389, 1412), False, 'from keras import initializers, regularizers\n'), ((1450, 1489), 'keras.regularizers.get', 'regularizers.get', (['prototype_regularizer'], {}), '(prototype_regularizer)\n', (1466, 1489), False, 'from keras import initializers, regularizers\n'), ((1526, 1563), 'LVQ.constraints.get', 'constraints.get', (['prototype_constraint'], {}), '(prototype_constraint)\n', (1541, 1563), False, 'from LVQ import constraints\n'), ((11496, 11532), 'keras.initializers.get', 'initializers.get', (['matrix_initializer'], {}), '(matrix_initializer)\n', (11512, 11532), False, 'from keras import initializers, regularizers\n'), ((11567, 11603), 'keras.regularizers.get', 'regularizers.get', (['matrix_regularizer'], {}), '(matrix_regularizer)\n', (11583, 11603), False, 'from keras import initializers, regularizers\n'), ((11637, 11671), 'LVQ.constraints.get', 'constraints.get', (['matrix_constraint'], {}), '(matrix_constraint)\n', (11652, 11671), False, 'from LVQ import constraints\n'), ((21407, 21418), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (21416, 21418), True, 'from keras import backend as K\n'), ((26138, 26149), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (26147, 26149), True, 'from keras import backend as K\n'), ((1677, 1713), 'keras.regularizers.get', 'regularizers.get', (['signal_regularizer'], {}), '(signal_regularizer)\n', (1693, 1713), False, 'from keras import initializers, regularizers\n'), ((1751, 1785), 'keras.regularizers.get', 'regularizers.get', (['diss_regularizer'], {}), '(diss_regularizer)\n', (1767, 1785), False, 'from keras import initializers, regularizers\n'), ((4643, 4679), 'keras.backend.name_scope', 'K.name_scope', (['"""dissimilarity_update"""'], {}), "('dissimilarity_update')\n", (4655, 4679), True, 'from keras import backend as K\n'), ((5806, 5856), 'keras.initializers.serialize', 'initializers.serialize', (['self.prototype_initializer'], {}), '(self.prototype_initializer)\n', (5828, 5856), False, 'from keras import initializers, regularizers\n'), ((5901, 5951), 'keras.regularizers.serialize', 'regularizers.serialize', (['self.prototype_regularizer'], {}), '(self.prototype_regularizer)\n', (5923, 5951), False, 'from keras import initializers, regularizers\n'), ((5995, 6043), 'LVQ.constraints.serialize', 'constraints.serialize', (['self.prototype_constraint'], {}), '(self.prototype_constraint)\n', (6016, 6043), False, 'from LVQ import constraints\n'), ((6195, 6246), 'keras.regularizers.serialize', 'regularizers.serialize', (['self.output_regularizers[0]'], {}), '(self.output_regularizers[0])\n', (6217, 6246), False, 'from keras import initializers, regularizers\n'), ((6286, 6337), 'keras.regularizers.serialize', 'regularizers.serialize', (['self.output_regularizers[1]'], {}), '(self.output_regularizers[1])\n', (6308, 6337), False, 'from keras import initializers, regularizers\n'), ((13954, 14001), 'keras.initializers.serialize', 'initializers.serialize', (['self.matrix_initializer'], {}), '(self.matrix_initializer)\n', (13976, 14001), False, 'from keras import initializers, regularizers\n'), ((14043, 14090), 'keras.regularizers.serialize', 'regularizers.serialize', (['self.matrix_regularizer'], {}), '(self.matrix_regularizer)\n', (14065, 14090), False, 'from keras import initializers, regularizers\n'), ((14131, 14176), 'LVQ.constraints.serialize', 'constraints.serialize', (['self.matrix_constraint'], {}), '(self.matrix_constraint)\n', (14152, 14176), False, 'from LVQ import constraints\n'), ((19530, 19575), 'numpy.prod', 'np.prod', (['self.projected_atom_shape'], {'dtype': 'int'}), '(self.projected_atom_shape, dtype=int)\n', (19537, 19575), True, 'import numpy as np\n'), ((24350, 24377), 'keras.backend.name_scope', 'K.name_scope', (['"""get_signals"""'], {}), "('get_signals')\n", (24362, 24377), True, 'from keras import backend as K\n'), ((28253, 28280), 'numpy.prod', 'np.prod', (['input_shape[0][3:]'], {}), '(input_shape[0][3:])\n', (28260, 28280), True, 'import numpy as np\n'), ((28284, 28318), 'numpy.prod', 'np.prod', (['self.projected_atom_shape'], {}), '(self.projected_atom_shape)\n', (28291, 28318), True, 'import numpy as np\n'), ((29777, 29804), 'keras.backend.name_scope', 'K.name_scope', (['"""get_signals"""'], {}), "('get_signals')\n", (29789, 29804), True, 'from keras import backend as K\n'), ((31352, 31389), 'keras.backend.name_scope', 'K.name_scope', (['"""get_projected_signals"""'], {}), "('get_projected_signals')\n", (31364, 31389), True, 'from keras import backend as K\n'), ((31520, 31572), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['signals', '([0, 2, 1] + atom_axes)'], {}), '(signals, [0, 2, 1] + atom_axes)\n', (31540, 31572), True, 'from keras import backend as K\n'), ((33828, 33865), 'keras.backend.name_scope', 'K.name_scope', (['"""get_projected_signals"""'], {}), "('get_projected_signals')\n", (33840, 33865), True, 'from keras import backend as K\n'), ((33996, 34048), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['signals', '([0, 2, 1] + atom_axes)'], {}), '(signals, [0, 2, 1] + atom_axes)\n', (34016, 34048), True, 'from keras import backend as K\n'), ((7713, 7734), 'numpy.argmax', 'np.argmax', (['y_train', '(1)'], {}), '(y_train, 1)\n', (7722, 7734), True, 'import numpy as np\n'), ((16751, 16772), 'numpy.argmax', 'np.argmax', (['y_train', '(1)'], {}), '(y_train, 1)\n', (16760, 16772), True, 'import numpy as np\n'), ((18999, 19027), 'numpy.mean', 'np.mean', (['clusters[i]'], {'axis': '(0)'}), '(clusters[i], axis=0)\n', (19006, 19027), True, 'import numpy as np\n'), ((19040, 19066), 'keras.backend.int_shape', 'K.int_shape', (['self.matrices'], {}), '(self.matrices)\n', (19051, 19066), True, 'from keras import backend as K\n'), ((19073, 19099), 'keras.backend.int_shape', 'K.int_shape', (['self.matrices'], {}), '(self.matrices)\n', (19084, 19099), True, 'from keras import backend as K\n'), ((20654, 20680), 'numpy.stack', 'np.stack', (['matrices'], {'axis': '(0)'}), '(matrices, axis=0)\n', (20662, 20680), True, 'import numpy as np\n'), ((23594, 23630), 'keras.backend.name_scope', 'K.name_scope', (['"""signal_preprocessing"""'], {}), "('signal_preprocessing')\n", (23606, 23630), True, 'from keras import backend as K\n'), ((23933, 23988), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['signals', '([1, 0, 2] + vector_shape)'], {}), '(signals, [1, 0, 2] + vector_shape)\n', (23953, 23988), True, 'from keras import backend as K\n'), ((28803, 28839), 'keras.backend.name_scope', 'K.name_scope', (['"""signal_preprocessing"""'], {}), "('signal_preprocessing')\n", (28815, 28839), True, 'from keras import backend as K\n'), ((29140, 29194), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['signals', '([1, 0, 2] + vector_axes)'], {}), '(signals, [1, 0, 2] + vector_axes)\n', (29160, 29194), True, 'from keras import backend as K\n'), ((29261, 29299), 'keras.backend.name_scope', 'K.name_scope', (['"""subspace_preprocessing"""'], {}), "('subspace_preprocessing')\n", (29273, 29299), True, 'from keras import backend as K\n'), ((29328, 29376), 'keras.backend.gather', 'K.gather', (['self.matrices', 'self._capsule_extension'], {}), '(self.matrices, self._capsule_extension)\n', (29336, 29376), True, 'from keras import backend as K\n'), ((30281, 30319), 'keras.backend.name_scope', 'K.name_scope', (['"""subspace_preprocessing"""'], {}), "('subspace_preprocessing')\n", (30293, 30319), True, 'from keras import backend as K\n'), ((30348, 30396), 'keras.backend.gather', 'K.gather', (['self.matrices', 'self._capsule_extension'], {}), '(self.matrices, self._capsule_extension)\n', (30356, 30396), True, 'from keras import backend as K\n'), ((30925, 30961), 'keras.backend.name_scope', 'K.name_scope', (['"""matrix_preprocessing"""'], {}), "('matrix_preprocessing')\n", (30937, 30961), True, 'from keras import backend as K\n'), ((30990, 31038), 'keras.backend.gather', 'K.gather', (['self.matrices', 'self._capsule_extension'], {}), '(self.matrices, self._capsule_extension)\n', (30998, 31038), True, 'from keras import backend as K\n'), ((33412, 33448), 'keras.backend.name_scope', 'K.name_scope', (['"""matrix_preprocessing"""'], {}), "('matrix_preprocessing')\n", (33424, 33448), True, 'from keras import backend as K\n'), ((33477, 33525), 'keras.backend.gather', 'K.gather', (['self.matrices', 'self._capsule_extension'], {}), '(self.matrices, self._capsule_extension)\n', (33485, 33525), True, 'from keras import backend as K\n'), ((36189, 36210), 'numpy.prod', 'np.prod', (['signal_shape'], {}), '(signal_shape)\n', (36196, 36210), True, 'import numpy as np\n'), ((7803, 7821), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (7812, 7821), True, 'import numpy as np\n'), ((8913, 8932), 'keras.backend.variable', 'K.variable', (['centers'], {}), '(centers)\n', (8923, 8932), True, 'from keras import backend as K\n'), ((16841, 16859), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (16850, 16859), True, 'import numpy as np\n'), ((18022, 18041), 'keras.backend.variable', 'K.variable', (['centers'], {}), '(centers)\n', (18032, 18041), True, 'from keras import backend as K\n'), ((18185, 18212), 'numpy.concatenate', 'np.concatenate', (['clusters', '(0)'], {}), '(clusters, 0)\n', (18199, 18212), True, 'import numpy as np\n'), ((23790, 23845), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['signals', '([1, 0, 2] + vector_shape)'], {}), '(signals, [1, 0, 2] + vector_shape)\n', (23810, 23845), True, 'from keras import backend as K\n'), ((28998, 29052), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['signals', '([1, 0, 2] + vector_axes)'], {}), '(signals, [1, 0, 2] + vector_axes)\n', (29018, 29052), True, 'from keras import backend as K\n'), ((31686, 31711), 'keras.backend.name_scope', 'K.name_scope', (['"""projector"""'], {}), "('projector')\n", (31698, 31711), True, 'from keras import backend as K\n'), ((31806, 31846), 'keras.backend.name_scope', 'K.name_scope', (['"""tangentspace_projections"""'], {}), "('tangentspace_projections')\n", (31818, 31846), True, 'from keras import backend as K\n'), ((31875, 31948), 'keras.backend.reshape', 'K.reshape', (['diff', '(signal_shape[0] * signal_shape[2], signal_shape[1], -1)'], {}), '(diff, (signal_shape[0] * signal_shape[2], signal_shape[1], -1))\n', (31884, 31948), True, 'from keras import backend as K\n'), ((31986, 32008), 'keras.backend.dot', 'K.dot', (['diff', 'projector'], {}), '(diff, projector)\n', (31991, 32008), True, 'from keras import backend as K\n'), ((32046, 32148), 'keras.backend.reshape', 'K.reshape', (['projected_diff', '((signal_shape[0], signal_shape[2], signal_shape[1]) + signal_shape[3:])'], {}), '(projected_diff, (signal_shape[0], signal_shape[2], signal_shape[1\n ]) + signal_shape[3:])\n', (32055, 32148), True, 'from keras import backend as K\n'), ((32301, 32361), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['matching_protos', '([0, 2, 1] + atom_axes)'], {}), '(matching_protos, [0, 2, 1] + atom_axes)\n', (32321, 32361), True, 'from keras import backend as K\n'), ((32427, 32452), 'keras.backend.name_scope', 'K.name_scope', (['"""projector"""'], {}), "('projector')\n", (32439, 32452), True, 'from keras import backend as K\n'), ((32486, 32525), 'keras.backend.batch_dot', 'K.batch_dot', (['matrices', 'matrices', '[2, 2]'], {}), '(matrices, matrices, [2, 2])\n', (32497, 32525), True, 'from keras import backend as K\n'), ((32548, 32588), 'keras.backend.name_scope', 'K.name_scope', (['"""tangentspace_projections"""'], {}), "('tangentspace_projections')\n", (32560, 32588), True, 'from keras import backend as K\n'), ((32617, 32690), 'keras.backend.reshape', 'K.reshape', (['diff', '(signal_shape[0] * signal_shape[2], signal_shape[1], -1)'], {}), '(diff, (signal_shape[0] * signal_shape[2], signal_shape[1], -1))\n', (32626, 32690), True, 'from keras import backend as K\n'), ((32718, 32755), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['diff', '[1, 0, 2]'], {}), '(diff, [1, 0, 2])\n', (32738, 32755), True, 'from keras import backend as K\n'), ((32793, 32821), 'keras.backend.batch_dot', 'K.batch_dot', (['diff', 'projector'], {}), '(diff, projector)\n', (32804, 32821), True, 'from keras import backend as K\n'), ((32859, 32906), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['projected_diff', '[1, 0, 2]'], {}), '(projected_diff, [1, 0, 2])\n', (32879, 32906), True, 'from keras import backend as K\n'), ((32944, 33046), 'keras.backend.reshape', 'K.reshape', (['projected_diff', '((signal_shape[0], signal_shape[2], signal_shape[1]) + signal_shape[3:])'], {}), '(projected_diff, (signal_shape[0], signal_shape[2], signal_shape[1\n ]) + signal_shape[3:])\n', (32953, 33046), True, 'from keras import backend as K\n'), ((33199, 33259), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['matching_protos', '([0, 2, 1] + atom_axes)'], {}), '(matching_protos, [0, 2, 1] + atom_axes)\n', (33219, 33259), True, 'from keras import backend as K\n'), ((34162, 34201), 'keras.backend.name_scope', 'K.name_scope', (['"""tangentspace_parameters"""'], {}), "('tangentspace_parameters')\n", (34174, 34201), True, 'from keras import backend as K\n'), ((34230, 34303), 'keras.backend.reshape', 'K.reshape', (['diff', '(signal_shape[0] * signal_shape[2], signal_shape[1], -1)'], {}), '(diff, (signal_shape[0] * signal_shape[2], signal_shape[1], -1))\n', (34239, 34303), True, 'from keras import backend as K\n'), ((34333, 34354), 'keras.backend.dot', 'K.dot', (['diff', 'matrices'], {}), '(diff, matrices)\n', (34338, 34354), True, 'from keras import backend as K\n'), ((34385, 34487), 'keras.backend.reshape', 'K.reshape', (['params', '((signal_shape[0], signal_shape[2], signal_shape[1]) + self.\n projected_atom_shape)'], {}), '(params, (signal_shape[0], signal_shape[2], signal_shape[1]) +\n self.projected_atom_shape)\n', (34394, 34487), True, 'from keras import backend as K\n'), ((34552, 34603), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['params', '([0, 2, 1] + atom_axes)'], {}), '(params, [0, 2, 1] + atom_axes)\n', (34572, 34603), True, 'from keras import backend as K\n'), ((34669, 34708), 'keras.backend.name_scope', 'K.name_scope', (['"""tangentspace_parameters"""'], {}), "('tangentspace_parameters')\n", (34681, 34708), True, 'from keras import backend as K\n'), ((34737, 34810), 'keras.backend.reshape', 'K.reshape', (['diff', '(signal_shape[0] * signal_shape[2], signal_shape[1], -1)'], {}), '(diff, (signal_shape[0] * signal_shape[2], signal_shape[1], -1))\n', (34746, 34810), True, 'from keras import backend as K\n'), ((34838, 34875), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['diff', '[1, 0, 2]'], {}), '(diff, [1, 0, 2])\n', (34858, 34875), True, 'from keras import backend as K\n'), ((34905, 34932), 'keras.backend.batch_dot', 'K.batch_dot', (['diff', 'matrices'], {}), '(diff, matrices)\n', (34916, 34932), True, 'from keras import backend as K\n'), ((34963, 35065), 'keras.backend.reshape', 'K.reshape', (['params', '((signal_shape[1], signal_shape[0], signal_shape[2]) + self.\n projected_atom_shape)'], {}), '(params, (signal_shape[1], signal_shape[0], signal_shape[2]) +\n self.projected_atom_shape)\n', (34972, 35065), True, 'from keras import backend as K\n'), ((35130, 35181), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['params', '([1, 0, 2] + atom_axes)'], {}), '(params, [1, 0, 2] + atom_axes)\n', (35150, 35181), True, 'from keras import backend as K\n'), ((36896, 36917), 'numpy.prod', 'np.prod', (['signal_shape'], {}), '(signal_shape)\n', (36903, 36917), True, 'import numpy as np\n'), ((12498, 12537), 'numpy.array', 'np.array', (['input_shape[0][3:]'], {'dtype': 'int'}), '(input_shape[0][3:], dtype=int)\n', (12506, 12537), True, 'import numpy as np\n'), ((12593, 12639), 'numpy.array', 'np.array', (['self.projected_atom_shape'], {'dtype': 'int'}), '(self.projected_atom_shape, dtype=int)\n', (12601, 12639), True, 'import numpy as np\n'), ((18219, 18267), 'numpy.prod', 'np.prod', (['self.input_spec[0].shape[3:]'], {'dtype': 'int'}), '(self.input_spec[0].shape[3:], dtype=int)\n', (18226, 18267), True, 'import numpy as np\n'), ((19898, 20274), 'warnings.warn', 'warnings.warn', (['"""Some of the matrices were not be pre-tarined successfully. The old matrix instantiation is reused and the pre-trained matrix is skipped. One reason for that could be that your cluster doesn\'t contain enough points regarding your matrix dimension. If you are not pre-training on the whole dataset try to increase the number of the pre-training batch."""'], {}), '(\n "Some of the matrices were not be pre-tarined successfully. The old matrix instantiation is reused and the pre-trained matrix is skipped. One reason for that could be that your cluster doesn\'t contain enough points regarding your matrix dimension. If you are not pre-training on the whole dataset try to increase the number of the pre-training batch."\n )\n', (19911, 20274), False, 'import warnings\n'), ((20880, 20900), 'keras.backend.variable', 'K.variable', (['matrices'], {}), '(matrices)\n', (20890, 20900), True, 'from keras import backend as K\n'), ((24525, 24558), 'keras.backend.expand_dims', 'K.expand_dims', (['self.prototypes', '(1)'], {}), '(self.prototypes, 1)\n', (24538, 24558), True, 'from keras import backend as K\n'), ((28610, 28644), 'numpy.prod', 'np.prod', (['self.projected_atom_shape'], {}), '(self.projected_atom_shape)\n', (28617, 28644), True, 'import numpy as np\n'), ((31761, 31782), 'keras.backend.transpose', 'K.transpose', (['matrices'], {}), '(matrices)\n', (31772, 31782), True, 'from keras import backend as K\n'), ((24679, 24729), 'numpy.ones', 'np.ones', (['(self.input_spec[0].ndim - 3,)'], {'dtype': 'int'}), '((self.input_spec[0].ndim - 3,), dtype=int)\n', (24686, 24729), True, 'import numpy as np\n'), ((18501, 18549), 'numpy.prod', 'np.prod', (['self.input_spec[0].shape[3:]'], {'dtype': 'int'}), '(self.input_spec[0].shape[3:], dtype=int)\n', (18508, 18549), True, 'import numpy as np\n'), ((18804, 18852), 'numpy.prod', 'np.prod', (['self.input_spec[0].shape[3:]'], {'dtype': 'int'}), '(self.input_spec[0].shape[3:], dtype=int)\n', (18811, 18852), True, 'import numpy as np\n'), ((24598, 24614), 'keras.backend.shape', 'K.shape', (['signals'], {}), '(signals)\n', (24605, 24614), True, 'from keras import backend as K\n'), ((19293, 19319), 'keras.backend.int_shape', 'K.int_shape', (['self.matrices'], {}), '(self.matrices)\n', (19304, 19319), True, 'from keras import backend as K\n'), ((28498, 28525), 'numpy.prod', 'np.prod', (['input_shape[0][3:]'], {}), '(input_shape[0][3:])\n', (28505, 28525), True, 'import numpy as np\n'), ((19190, 19216), 'keras.backend.int_shape', 'K.int_shape', (['self.matrices'], {}), '(self.matrices)\n', (19201, 19216), True, 'from keras import backend as K\n')] |
#This code is for making plots from FLASH output files
#Created by <NAME>
import h5py
import numpy as np
import pylab
from math import sqrt
from matplotlib.colors import LogNorm
from matplotlib.colors import Normalize
import os
import re
import sys
import optparse
def read_option():
usage = "usage: [%prog] [options] Flash_outputs \n"
usage += "Takes all the outputs and makes plots."
parser = optparse.OptionParser(usage=usage)
parser.add_option("-d", "--output_path",
dest = "output_path",
type = "string",
help = "Directory for the output files. Default is plots.",
default = "plots/")
option,args = parser.parse_args()
option.args = args
if not option.output_path.endswith("/"):
option.output_path += "/"
if not os.path.exists(option.output_path):
os.makedirs(option.output_path)
return option
if __name__ == "__main__":
option = read_option()
for flash_output in option.args:
filename = flash_output
output_path = option.output_path
index = flash_output[-4:]
#Read in hdf5 file
h5file = h5py.File(filename, 'r')
#get the number of blocks and the dimensions of each block
numblocks = np.shape(h5file['temp'])[0]
blk_r = np.shape(h5file['temp'])[3]
blk_z = np.shape(h5file['temp'])[2]
#First find the r and z values of the centers of blocks
#And the r and z ranges of the total grid
x = []
y = []
rmin, rmax, = h5file['bounding box'][0][0] #prefilled with the values from block 0
zmin, zmax, = h5file['bounding box'][0][1]
for i in range(0,numblocks):
x.append(h5file['coordinates'][i][0])
y.append(h5file['coordinates'][i][1])
if h5file['bounding box'][i][0][0] < rmin:
rmin = h5file['bounding box'][i][0][0]
if h5file['bounding box'][i][0][1] > rmax:
rmax = h5file['bounding box'][i][0][1]
if h5file['bounding box'][i][1][0] < zmin:
zmin = h5file['bounding box'][i][1][0]
if h5file['bounding box'][i][1][1] > zmax:
zmax = h5file['bounding box'][i][1][1]
r_centers = np.sort(np.unique(x))
z_centers = np.sort(np.unique(y))
r_centers = r_centers.tolist()
z_centers = z_centers.tolist()
#Make the r and z arrays
rstep = (rmax - rmin)/(sqrt(numblocks)*blk_r)
zstep = (zmax - zmin)/(sqrt(numblocks)*blk_z)
r = []
z = []
#I am making the r and z arrays together because they are the same size
#if this changes this will need to be split to two loops
for j in range(int(sqrt(numblocks)*blk_r)):
r.append(rmin + rstep*j)
z.append(zmin + zstep*j)
r = np.asarray(r)
z = np.asarray(z)
#Make empty numpy arrays for coordinates and variables
var_shape = (len(r),len(z))
temp = np.zeros(var_shape)
#Fill in the variable arrays
for n in range(numblocks):
center = h5file['coordinates'][n]
r_ind = r_centers.index(center[0])
z_ind = z_centers.index(center[1])
r_start = r_ind*blk_r
z_start = z_ind*blk_z
temp0 = np.asarray(h5file['temp'][n][0])
for l in range(blk_r):
r0 = r_start + l
for m in range(blk_z):
z0 = z_start + m
temp[r0,z0] = temp0[m,l]
h5file.close()
#The variable arrays got transposed by hdf5 so correct that
temp = np.transpose(temp)
#make plots
golden = (1.0 + sqrt(5.0)) / 2.0
figprops = dict(figsize=(8., 8./golden), dpi=128)
adjustprops = dict(left=0.1, bottom=0.12, right=0.97, top=0.93, wspace=0.3, hspace=0.3)
fig2 = pylab.figure(**figprops)
fig2.subplots_adjust(**adjustprops)
ax1 = fig2.add_subplot(111)
cax = ax1.pcolormesh(r, z, temp, norm=LogNorm(vmin=5e7, vmax=5e9), cmap='OrRd')
ax1.set_xlabel("r", fontsize = 20)
ax1.set_ylabel("z", fontsize = 20)
ax1.set_xlim([0,8e9])
ax1.set_ylim([-4e9,4e9])
fig2.colorbar(cax)
fig2.suptitle('Temperature', fontsize = 20)
name = output_path + 'cyl_grid_temp' + index
pngname = name + ".png"
fig2.savefig(pngname)
pylab.close
pylab.close('all')
| [
"os.path.exists",
"numpy.unique",
"os.makedirs",
"numpy.asarray",
"optparse.OptionParser",
"math.sqrt",
"h5py.File",
"pylab.figure",
"pylab.close",
"numpy.zeros",
"numpy.shape",
"numpy.transpose",
"matplotlib.colors.LogNorm"
] | [((417, 451), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (438, 451), False, 'import optparse\n'), ((865, 899), 'os.path.exists', 'os.path.exists', (['option.output_path'], {}), '(option.output_path)\n', (879, 899), False, 'import os\n'), ((909, 940), 'os.makedirs', 'os.makedirs', (['option.output_path'], {}), '(option.output_path)\n', (920, 940), False, 'import os\n'), ((1212, 1236), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1221, 1236), False, 'import h5py\n'), ((2946, 2959), 'numpy.asarray', 'np.asarray', (['r'], {}), '(r)\n', (2956, 2959), True, 'import numpy as np\n'), ((2972, 2985), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (2982, 2985), True, 'import numpy as np\n'), ((3101, 3120), 'numpy.zeros', 'np.zeros', (['var_shape'], {}), '(var_shape)\n', (3109, 3120), True, 'import numpy as np\n'), ((3775, 3793), 'numpy.transpose', 'np.transpose', (['temp'], {}), '(temp)\n', (3787, 3793), True, 'import numpy as np\n'), ((4031, 4055), 'pylab.figure', 'pylab.figure', ([], {}), '(**figprops)\n', (4043, 4055), False, 'import pylab\n'), ((4605, 4623), 'pylab.close', 'pylab.close', (['"""all"""'], {}), "('all')\n", (4616, 4623), False, 'import pylab\n'), ((1325, 1349), 'numpy.shape', 'np.shape', (["h5file['temp']"], {}), "(h5file['temp'])\n", (1333, 1349), True, 'import numpy as np\n'), ((1369, 1393), 'numpy.shape', 'np.shape', (["h5file['temp']"], {}), "(h5file['temp'])\n", (1377, 1393), True, 'import numpy as np\n'), ((1413, 1437), 'numpy.shape', 'np.shape', (["h5file['temp']"], {}), "(h5file['temp'])\n", (1421, 1437), True, 'import numpy as np\n'), ((2346, 2358), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (2355, 2358), True, 'import numpy as np\n'), ((2388, 2400), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2397, 2400), True, 'import numpy as np\n'), ((3423, 3455), 'numpy.asarray', 'np.asarray', (["h5file['temp'][n][0]"], {}), "(h5file['temp'][n][0])\n", (3433, 3455), True, 'import numpy as np\n'), ((2553, 2568), 'math.sqrt', 'sqrt', (['numblocks'], {}), '(numblocks)\n', (2557, 2568), False, 'from math import sqrt\n'), ((2607, 2622), 'math.sqrt', 'sqrt', (['numblocks'], {}), '(numblocks)\n', (2611, 2622), False, 'from math import sqrt\n'), ((3841, 3850), 'math.sqrt', 'sqrt', (['(5.0)'], {}), '(5.0)\n', (3845, 3850), False, 'from math import sqrt\n'), ((4186, 4229), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {'vmin': '(50000000.0)', 'vmax': '(5000000000.0)'}), '(vmin=50000000.0, vmax=5000000000.0)\n', (4193, 4229), False, 'from matplotlib.colors import LogNorm\n'), ((2834, 2849), 'math.sqrt', 'sqrt', (['numblocks'], {}), '(numblocks)\n', (2838, 2849), False, 'from math import sqrt\n')] |
def wavelet(Y,dt,pad=0.,dj=0.25,s0=-1,J1=-1,mother="MORLET",param=-1):
"""
This function is the translation of wavelet.m by Torrence and Compo
import wave_bases from wave_bases.py
The following is the original comment in wavelet.m
#WAVELET 1D Wavelet transform with optional singificance testing
%
% [WAVE,PERIOD,SCALE,COI] = wavelet(Y,DT,PAD,DJ,S0,J1,MOTHER,PARAM)
%
% Computes the wavelet transform of the vector Y (length N),
% with sampling rate DT.
%
% By default, the Morlet wavelet (k0=6) is used.
% The wavelet basis is normalized to have total energy=1 at all scales.
%
%
% INPUTS:
%
% Y = the time series of length N.
% DT = amount of time between each Y value, i.e. the sampling time.
%
% OUTPUTS:
%
% WAVE is the WAVELET transform of Y. This is a complex array
% of dimensions (N,J1+1). FLOAT(WAVE) gives the WAVELET amplitude,
% ATAN(IMAGINARY(WAVE),FLOAT(WAVE) gives the WAVELET phase.
% The WAVELET power spectrum is ABS(WAVE)^2.
% Its units are sigma^2 (the time series variance).
%
%
% OPTIONAL INPUTS:
%
% *** Note *** setting any of the following to -1 will cause the default
% value to be used.
%
% PAD = if set to 1 (default is 0), pad time series with enough zeroes to get
% N up to the next higher power of 2. This prevents wraparound
% from the end of the time series to the beginning, and also
% speeds up the FFT's used to do the wavelet transform.
% This will not eliminate all edge effects (see COI below).
%
% DJ = the spacing between discrete scales. Default is 0.25.
% A smaller # will give better scale resolution, but be slower to plot.
%
% S0 = the smallest scale of the wavelet. Default is 2*DT.
%
% J1 = the # of scales minus one. Scales range from S0 up to S0*2^(J1*DJ),
% to give a total of (J1+1) scales. Default is J1 = (LOG2(N DT/S0))/DJ.
%
% MOTHER = the mother wavelet function.
% The choices are 'MORLET', 'PAUL', or 'DOG'
%
% PARAM = the mother wavelet parameter.
% For 'MORLET' this is k0 (wavenumber), default is 6.
% For 'PAUL' this is m (order), default is 4.
% For 'DOG' this is m (m-th derivative), default is 2.
%
%
% OPTIONAL OUTPUTS:
%
% PERIOD = the vector of "Fourier" periods (in time units) that corresponds
% to the SCALEs.
%
% SCALE = the vector of scale indices, given by S0*2^(j*DJ), j=0...J1
% where J1+1 is the total # of scales.
%
% COI = if specified, then return the Cone-of-Influence, which is a vector
% of N points that contains the maximum period of useful information
% at that particular time.
% Periods greater than this are subject to edge effects.
% This can be used to plot COI lines on a contour plot by doing:
%
% contour(time,log(period),log(power))
% plot(time,log(coi),'k')
%
%----------------------------------------------------------------------------
% Copyright (C) 1995-2004, <NAME> and <NAME>
%
% This software may be used, copied, or redistributed as long as it is not
% sold and this copyright notice is reproduced on each copy made. This
% routine is provided as is without any express or implied warranties
% whatsoever.
%
% Notice: Please acknowledge the use of the above software in any publications:
% ``Wavelet software was provided by <NAME> and <NAME>,
% and is available at URL: http://paos.colorado.edu/research/wavelets/''.
%
% Reference: <NAME>. and <NAME>, 1998: A Practical Guide to
% Wavelet Analysis. <I>Bull. Amer. Meteor. Soc.</I>, 79, 61-78.
%
% Please send a copy of such publications to either <NAME> or G. Compo:
% Dr. <NAME> Dr. <NAME>
% Research Systems, Inc. Climate Diagnostics Center
% 4990 Pearl East Circle 325 Broadway R/CDC1
% Boulder, CO 80301, USA Boulder, CO 80305-3328, USA
% E-mail: chris[AT]rsinc[DOT]com E-mail: compo[AT]colorado[DOT]edu
%----------------------------------------------------------------------------"""
#modules
import numpy as np
from wave_bases import wave_bases
#set default
n1 = len(Y)
if (s0 == -1): s0=2.*dt
if (dj == -1): dj = 1./4.
if (J1 == -1): J1=np.fix((np.log(n1*dt/s0)/np.log(2))/dj)
if (mother == -1): mother = 'MORLET'
#print "s0=",s0
#print "J1=",J1
#....construct time series to analyze, pad if necessary
x = Y - np.mean(Y);
if (pad == 1):
base2 = np.fix(np.log(n1)/np.log(2) + 0.4999) # power of 2 nearest to N
temp=np.zeros((2**(int(base2)+1)-n1,))
x=np.concatenate((x,temp))
n = len(x)
#....construct wavenumber array used in transform [Eqn(5)]
k = np.arange(1,np.fix(n/2)+1)
k = k*(2.*np.pi)/(n*dt)
k = np.concatenate((np.zeros((1,)),k, -k[-2::-1]));
#....compute FFT of the (padded) time series
f = np.fft.fft(x) # [Eqn(3)]
#....construct SCALE array & empty PERIOD & WAVE arrays
scale=np.array([s0*2**(i*dj) for i in range(0,int(J1)+1)])
period = scale.copy()
wave = np.zeros((int(J1)+1,n),dtype=np.complex) # define the wavelet array # make it complex
# loop through all scales and compute transform
for a1 in range(0,int(J1)+1):
daughter,fourier_factor,coi,dofmin=wave_bases(mother,k,scale[a1],param)
wave[a1,:] = np.fft.ifft(f*daughter) # wavelet transform[Eqn(4)]
period = fourier_factor*scale
coi=coi*dt*np.concatenate(([1.E-5],np.arange(1.,(n1+1.)/2.-1),np.flipud(np.arange(1,n1/2.)),[1.E-5])) # COI [Sec.3g]
wave = wave[:,:n1] # get rid of padding before returning
return wave,period,scale,coi
# end of code | [
"wave_bases.wave_bases",
"numpy.mean",
"numpy.fix",
"numpy.fft.fft",
"numpy.log",
"numpy.zeros",
"numpy.concatenate",
"numpy.fft.ifft",
"numpy.arange"
] | [((5072, 5085), 'numpy.fft.fft', 'np.fft.fft', (['x'], {}), '(x)\n', (5082, 5085), True, 'import numpy as np\n'), ((4602, 4612), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (4609, 4612), True, 'import numpy as np\n'), ((4776, 4801), 'numpy.concatenate', 'np.concatenate', (['(x, temp)'], {}), '((x, temp))\n', (4790, 4801), True, 'import numpy as np\n'), ((5490, 5529), 'wave_bases.wave_bases', 'wave_bases', (['mother', 'k', 'scale[a1]', 'param'], {}), '(mother, k, scale[a1], param)\n', (5500, 5529), False, 'from wave_bases import wave_bases\n'), ((5549, 5574), 'numpy.fft.ifft', 'np.fft.ifft', (['(f * daughter)'], {}), '(f * daughter)\n', (5560, 5574), True, 'import numpy as np\n'), ((4910, 4923), 'numpy.fix', 'np.fix', (['(n / 2)'], {}), '(n / 2)\n', (4916, 4923), True, 'import numpy as np\n'), ((4979, 4993), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (4987, 4993), True, 'import numpy as np\n'), ((5677, 5713), 'numpy.arange', 'np.arange', (['(1.0)', '((n1 + 1.0) / 2.0 - 1)'], {}), '(1.0, (n1 + 1.0) / 2.0 - 1)\n', (5686, 5713), True, 'import numpy as np\n'), ((4410, 4430), 'numpy.log', 'np.log', (['(n1 * dt / s0)'], {}), '(n1 * dt / s0)\n', (4416, 4430), True, 'import numpy as np\n'), ((4427, 4436), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4433, 4436), True, 'import numpy as np\n'), ((4658, 4668), 'numpy.log', 'np.log', (['n1'], {}), '(n1)\n', (4664, 4668), True, 'import numpy as np\n'), ((4669, 4678), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4675, 4678), True, 'import numpy as np\n'), ((5714, 5736), 'numpy.arange', 'np.arange', (['(1)', '(n1 / 2.0)'], {}), '(1, n1 / 2.0)\n', (5723, 5736), True, 'import numpy as np\n')] |
import numpy as np
teamscsv = file("Teams.csv",'r')
tourneycsv = file("TourneyDetailedResults.csv",'r')
seasoncsv = file("RegularSeasonDetailedResults.csv",'r')
bracketID = file("bracketID.csv",'r')
keyToEnum = {}
enumToTeamName = {}
def readbrackets():
bracketlst = []
for team in bracketID.readlines():
bracketlst.append(int(team.strip()))
return bracketlst
def readteams():
teams = {}
lines = teamscsv.readlines()
enum = 0
for line in lines[1:]:
teamid,teamname = line[:-1].split(',')
teams[int(teamid)] = teamname
keyToEnum[int(teamid)] = enum
enumToTeamName[enum] = teamname
enum+=1
return teams
#there is a bunch of data, lets just look at score for now
# 0 1 2 3 4 5
#2003, 134 ,1421, 92, 1411, 84, N,1,32,69,11,29,17,26,14,30,17,12,5,3,22,29,67,12,31,14,31,17,28,16,15,5,0,22
#2003, 10 ,1104, 68, 1328, 62, N,0,27,58, 3,14,11,18,14,24,13,23,7,1,22,22,53, 2,10,16,22,10,22, 8,18,9,2,20
#season,day, Wteam,Wscore,Lteam,Lscore
season = 2016
def readresults(teams,fromyears,computeval):
n = len(teams)
incidencematrix = np.zeros([n,n])-np.eye(n,n)
#read tourneys
lines = tourneycsv.readlines()
for line in lines[1:]:
vals = line[:-1].split(',')
year = int(vals[0])
if year >= fromyears:
wteam = keyToEnum[int(vals[2])]
lteam = keyToEnum[int(vals[4])]
[winval,loseval] = computeval(vals)
incidencematrix[wteam][lteam] += winval
incidencematrix[lteam][wteam] += loseval
#read regular season
lines = seasoncsv.readlines()
for line in lines[1:]:
vals = line[:-1].split(',')
year = int(vals[0])
if year >= fromyears:#basic filter
wteam = keyToEnum[int(vals[2])]
lteam = keyToEnum[int(vals[4])]
[winval,loseval] = computeval(vals)
incidencematrix[wteam][lteam] += winval
incidencematrix[lteam][wteam] += loseval
return incidencematrix
| [
"numpy.eye",
"numpy.zeros"
] | [((1152, 1168), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (1160, 1168), True, 'import numpy as np\n'), ((1168, 1180), 'numpy.eye', 'np.eye', (['n', 'n'], {}), '(n, n)\n', (1174, 1180), True, 'import numpy as np\n')] |
import numpy as np
def convolution2d_multichannel(image, kernel, bias):
_, y, x = image.shape
# kernel shape: (output channels, input channels, x, y)
chO, chI, _, _ = kernel.shape
new_image = np.empty([chO, y, x])
# for adding the images when num channel out < channel in
layer_image = np.empty([chI, y, x])
for i, kernel_arr in enumerate(kernel):
# i ... iteration no.
# kernel_arr shape: (input channels, x, y)
print("i: %d" % i)
padding = 9//2
if chO < chI: # Layers 2 and 3
padding = 5//2
for j, subkernel in enumerate(kernel_arr):
layer_image[j] = convolution2d(
image[0, ...], subkernel, bias[i], padding)
new_image[i] = np.sum(layer_image, axis=0) + bias[i]
else: # Layer 1
new_image[i] = convolution2d(
image[0, ...], kernel_arr[0, ...], bias[i], padding) + bias[i]
new_image = np.clip(new_image, 0.0, None)
return new_image
def convolution2d(image, kernel, bias, padding):
m, n = kernel.shape
if (m == n): # if kernel is quadratic
y, x = image.shape
new_image = np.zeros((y, x), dtype='float32') # create new temp array
image = np.pad(image, padding, 'edge')
for i in range(y):
for j in range(x):
new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel) + bias
return new_image
| [
"numpy.clip",
"numpy.sum",
"numpy.zeros",
"numpy.empty",
"numpy.pad"
] | [((211, 232), 'numpy.empty', 'np.empty', (['[chO, y, x]'], {}), '([chO, y, x])\n', (219, 232), True, 'import numpy as np\n'), ((314, 335), 'numpy.empty', 'np.empty', (['[chI, y, x]'], {}), '([chI, y, x])\n', (322, 335), True, 'import numpy as np\n'), ((978, 1007), 'numpy.clip', 'np.clip', (['new_image', '(0.0)', 'None'], {}), '(new_image, 0.0, None)\n', (985, 1007), True, 'import numpy as np\n'), ((1194, 1227), 'numpy.zeros', 'np.zeros', (['(y, x)'], {'dtype': '"""float32"""'}), "((y, x), dtype='float32')\n", (1202, 1227), True, 'import numpy as np\n'), ((1269, 1299), 'numpy.pad', 'np.pad', (['image', 'padding', '"""edge"""'], {}), "(image, padding, 'edge')\n", (1275, 1299), True, 'import numpy as np\n'), ((777, 804), 'numpy.sum', 'np.sum', (['layer_image'], {'axis': '(0)'}), '(layer_image, axis=0)\n', (783, 804), True, 'import numpy as np\n'), ((1393, 1433), 'numpy.sum', 'np.sum', (['(image[i:i + m, j:j + m] * kernel)'], {}), '(image[i:i + m, j:j + m] * kernel)\n', (1399, 1433), True, 'import numpy as np\n')] |
###########################
# AE 모델링
###########################
from keras import layers, models # (Input, Dense), (Model)
class AE(models.Model):
def __init__(self, x_nodes=784, z_dim=36):
x_shape = (x_nodes,)
x = layers.Input(shape=x_shape)
z = layers.Dense(z_dim, activation='relu')(x)
y = layers.Dense(x_nodes, activation='sigmoid')(z)
super().__init__(x, y)
self.x = x
self.z = z
self.z_dim = z_dim
# Encoder, Decoder ??
self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
def Encoder(self):
return models.Model(self.x, self.z)
def Decoder(self):
z_shape = (self.z_dim,)
z = layers.Input(shape=z_shape)
y_layer = self.layers[-1]
y = y_layer(z)
return models.Model(z, y)
###########################
# 데이터 준비
###########################
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
###########################
# 학습 효과 분석
###########################
from ann_mnist_cl import plot_loss, plot_acc
import matplotlib.pyplot as plt
###########################
# AE 동작 확인
###########################
def show_ae(autoencoder):
encoder = autoencoder.Encoder()
decoder = autoencoder.Decoder()
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
n = 10
plt.figure(figsize=(20, 6))
for i in range(n):
ax = plt.subplot(3, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n)
plt.stem(encoded_imgs[i].reshape(-1))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
###########################
# 학습
###########################
def main():
x_nodes = 784
z_dim = 36
autoencoder = AE(x_nodes, z_dim)
history = autoencoder.fit(x_train, x_train,
epochs=10,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
plot_acc(history)
plt.show()
plot_loss(history)
plt.show()
show_ae(autoencoder)
plt.show()
if __name__ == '__main__':
main()
| [
"numpy.prod",
"matplotlib.pyplot.gray",
"keras.datasets.mnist.load_data",
"ann_mnist_cl.plot_loss",
"matplotlib.pyplot.figure",
"keras.layers.Input",
"ann_mnist_cl.plot_acc",
"keras.models.Model",
"keras.layers.Dense",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((1006, 1023), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (1021, 1023), False, 'from keras.datasets import mnist\n'), ((1708, 1735), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 6)'}), '(figsize=(20, 6))\n', (1718, 1735), True, 'import matplotlib.pyplot as plt\n'), ((2346, 2356), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2354, 2356), True, 'import matplotlib.pyplot as plt\n'), ((2752, 2769), 'ann_mnist_cl.plot_acc', 'plot_acc', (['history'], {}), '(history)\n', (2760, 2769), False, 'from ann_mnist_cl import plot_loss, plot_acc\n'), ((2774, 2784), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2782, 2784), True, 'import matplotlib.pyplot as plt\n'), ((2789, 2807), 'ann_mnist_cl.plot_loss', 'plot_loss', (['history'], {}), '(history)\n', (2798, 2807), False, 'from ann_mnist_cl import plot_loss, plot_acc\n'), ((2812, 2822), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2820, 2822), True, 'import matplotlib.pyplot as plt\n'), ((2853, 2863), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2861, 2863), True, 'import matplotlib.pyplot as plt\n'), ((240, 267), 'keras.layers.Input', 'layers.Input', ([], {'shape': 'x_shape'}), '(shape=x_shape)\n', (252, 267), False, 'from keras import layers, models\n'), ((643, 671), 'keras.models.Model', 'models.Model', (['self.x', 'self.z'], {}), '(self.x, self.z)\n', (655, 671), False, 'from keras import layers, models\n'), ((740, 767), 'keras.layers.Input', 'layers.Input', ([], {'shape': 'z_shape'}), '(shape=z_shape)\n', (752, 767), False, 'from keras import layers, models\n'), ((840, 858), 'keras.models.Model', 'models.Model', (['z', 'y'], {}), '(z, y)\n', (852, 858), False, 'from keras import layers, models\n'), ((1150, 1176), 'numpy.prod', 'np.prod', (['x_train.shape[1:]'], {}), '(x_train.shape[1:])\n', (1157, 1176), True, 'import numpy as np\n'), ((1217, 1242), 'numpy.prod', 'np.prod', (['x_test.shape[1:]'], {}), '(x_test.shape[1:])\n', (1224, 1242), True, 'import numpy as np\n'), ((1773, 1797), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'n', '(i + 1)'], {}), '(3, n, i + 1)\n', (1784, 1797), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1862), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (1860, 1862), True, 'import matplotlib.pyplot as plt\n'), ((1961, 1989), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'n', '(i + 1 + n)'], {}), '(3, n, i + 1 + n)\n', (1972, 1989), True, 'import matplotlib.pyplot as plt\n'), ((2044, 2054), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (2052, 2054), True, 'import matplotlib.pyplot as plt\n'), ((2153, 2185), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'n', '(i + 1 + n + n)'], {}), '(3, n, i + 1 + n + n)\n', (2164, 2185), True, 'import matplotlib.pyplot as plt\n'), ((2246, 2256), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (2254, 2256), True, 'import matplotlib.pyplot as plt\n'), ((280, 318), 'keras.layers.Dense', 'layers.Dense', (['z_dim'], {'activation': '"""relu"""'}), "(z_dim, activation='relu')\n", (292, 318), False, 'from keras import layers, models\n'), ((334, 377), 'keras.layers.Dense', 'layers.Dense', (['x_nodes'], {'activation': '"""sigmoid"""'}), "(x_nodes, activation='sigmoid')\n", (346, 377), False, 'from keras import layers, models\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import random
from keras import backend as K
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout
from keras.callbacks import ModelCheckpoint
from keras.optimizers import SGD
from src.gpopy import FlowTunning
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from src.constants import VARS, LOGS_DIR, DATA_DIR
PARAMS = {
'epochs': [20, 50],
'batch_size': [8, 16, 32],
'dense_layers_1': [32, 64, 128],
'dense_layers_2': [32, 64, 128],
'dense_layers_3': [32, 64, 128],
'init': 'normal',
'dropout_1': {
'func': random.uniform,
'params': [0, 0.5]
},
'dropout_2': {
'func': random.uniform,
'params': [0, 0.5]
},
'dropout_3': {
'func': random.uniform,
'params': [0, 0.5]
},
'activation': 'relu',
'learning_rate': {
'func': random.uniform,
'params': [0.0001, 0.01]
},
'momentum': {
'func': random.uniform,
'params': [0.1, 0.9]
},
'decay': {
'func': random.uniform,
'params': [0.001, 0.01]
},
}
def root_mean_squared_error(y_true, y_pred):
# Custom loss function for keras
return K.sqrt(K.mean(K.square(y_pred - y_true)))
class Nn_model(object):
def __init__(self, X, y):
self.X = X
self.y = y
def init_model(self, learning_rate=0.0045, decay=0.0018, momentum=0.87):
def i_m():
model = Sequential()
model.add(Dense(32, input_dim=40, activation='relu'))
model.add(Dropout(0.29))
model.add(
Dense(128, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.45))
model.add(
Dense(128, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.33))
model.add(
Dense(1, kernel_initializer='normal', activation='linear'))
sgd = SGD(lr=learning_rate,
momentum=momentum,
decay=decay,
nesterov=True)
model.compile(loss='mae', optimizer=sgd)
return model
return i_m
def i_m(self, data):
layer_1 = data['dense_layers_1']
layer_2 = data['dense_layers_2']
layer_3 = data['dense_layers_3']
dropout_1 = data['dropout_1']
dropout_2 = data['dropout_2']
dropout_3 = data['dropout_3']
activation = data['activation']
learning_rate = data['learning_rate']
momentum = data['momentum']
decay = data['decay']
init = data['init']
model = Sequential()
model.add(Dense(layer_1, input_dim=40, activation=activation))
model.add(Dropout(dropout_1))
model.add(
Dense(layer_2, kernel_initializer=init, activation=activation))
model.add(Dropout(dropout_2))
model.add(
Dense(layer_3, kernel_initializer=init, activation=activation))
model.add(Dropout(dropout_3))
model.add(Dense(1, kernel_initializer=init, activation='linear'))
sgd = SGD(lr=learning_rate,
momentum=momentum,
decay=decay,
nesterov=True)
model.compile(loss=root_mean_squared_error, optimizer=sgd)
return model
def run_models(self):
# model = KerasRegressor(
# build_fn=self.init_model,
# epochs=100,
# batch_size=8,
# validation_split=0.2,
# shuffle=True,
# verbose=1,
# )
init = self.init_model()
model = init()
# estimators.append(('mlp', model))
X_transform = self.pipeline_x(self.X)
# kfold = KFold(n_splits=3)
# results = cross_val_score(pipeline, X, y, cv=kfold)
# estimators = []
# print("Wider: %.2f (%.2f) RMSE" % (results.mean(), results.std()))
filepath = os.path.join(LOGS_DIR, 'Models/NNModel/weights',
'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
save_best_only=True,
verbose=1,
mode='min')
callbacks_list = [checkpoint]
model.fit(X_transform,
self.y,
callbacks=callbacks_list,
epochs=100,
batch_size=8,
validation_split=0.2,
shuffle=True,
verbose=1)
model.model.save(
os.path.join(LOGS_DIR, 'Models/NNModel', 'saved_model.h5'))
# pipeline.save(os.path.join(LOGS_DIR, 'Models/NNModel',
# 'saved_model.h5'))
def pipeline_x(self, X=None):
if X is None:
X = self.X
estimators = []
estimators.append(('standardize', MinMaxScaler()))
estimators.append(('pca', PCA(n_components=40)))
pipeline = Pipeline(estimators, verbose=True)
X_transform = pipeline.fit_transform(X)
return X_transform
def run_saved_model(self, X=None, y=None):
# model = KerasRegressor(
# build_fn=self.init_model,
# epochs=100,
# batch_size=8,
# validation_split=0.2,
# shuffle=True,
# verbose=1,
# )
model = load_model(
os.path.join(LOGS_DIR, 'Models/NNModel', 'saved_model.h5'))
if np.any([X, y]) is None:
X, y = self.X, self.y
X_transform = self.pipeline_x(X)
model.fit(X_transform, y)
model.model.save(
os.path.join(LOGS_DIR, 'Models/NNModel', 'saved_model.h5'))
return model
def run_models_mlflow(self, data):
# estimators.append(('mlp', model))
# kfold = KFold(n_splits=3)
# results = cross_val_score(pipeline, X, y, cv=kfold)
# estimators = []
# print("Wider: %.2f (%.2f) RMSE" % (results.mean(), results.std()))
X_transform = self.pipeline_x(self.X)
X_train, X_test, y_train, y_test = train_test_split(X_transform, y)
# filepath = os.path.join(LOGS_DIR, 'Models/NNModel/weights',
# 'weights_tunning.{epoch:02d}-{val_loss:.2f}.hdf5')
# checkpoint = ModelCheckpoint(filepath,
# monitor='val_loss',
# save_best_only=True,
# verbose=1,
# mode='min')
# callbacks_list = [checkpoint]
model = self.i_m(data)
model.fit(X_train,
y_train,
epochs=data['epochs'],
batch_size=data['batch_size'],
validation_data=(X_test, y_test),
verbose=1)
score = model.evaluate(X_test, y_test)
print(
"#######################- RESULTS -###############################"
)
print('Test loss:', score)
print(
"#################################################################"
)
return (-score, model)
def mlflow_run(self,
params,
maximum_generation=20,
population_size=10,
auto_track=True,
experiment_name="Default"):
tunning = FlowTunning(params=params,
population_size=population_size,
maximum_generation=maximum_generation,
auto_track=auto_track,
experiment_name=experiment_name)
tunning.set_score(self.run_models_mlflow)
tunning.run()
if __name__ == '__main__':
df_path = os.path.join(DATA_DIR, 'season_2018_cleaned.csv')
df = pd.read_csv(df_path)
X, y = df[VARS], df['ttfl']
# hist = run_models(X, y)
model = Nn_model(X, y)
# model.mlflow_run(params=PARAMS,
# population_size=5,
# maximum_generation=20,
# experiment_name="pts")
model.run_models()
| [
"keras.callbacks.ModelCheckpoint",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.decomposition.PCA",
"keras.backend.square",
"os.path.join",
"numpy.any",
"keras.models.Sequential",
"src.gpopy.FlowTunning",
"keras.layers.Dense",
"keras.optimizers.SGD",
"sklearn.pipelin... | [((8155, 8204), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""season_2018_cleaned.csv"""'], {}), "(DATA_DIR, 'season_2018_cleaned.csv')\n", (8167, 8204), False, 'import os\n'), ((8214, 8234), 'pandas.read_csv', 'pd.read_csv', (['df_path'], {}), '(df_path)\n', (8225, 8234), True, 'import pandas as pd\n'), ((2869, 2881), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2879, 2881), False, 'from keras.models import Sequential, load_model\n'), ((3345, 3413), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'learning_rate', 'momentum': 'momentum', 'decay': 'decay', 'nesterov': '(True)'}), '(lr=learning_rate, momentum=momentum, decay=decay, nesterov=True)\n', (3348, 3413), False, 'from keras.optimizers import SGD\n'), ((4180, 4275), 'os.path.join', 'os.path.join', (['LOGS_DIR', '"""Models/NNModel/weights"""', '"""weights.{epoch:02d}-{val_loss:.2f}.hdf5"""'], {}), "(LOGS_DIR, 'Models/NNModel/weights',\n 'weights.{epoch:02d}-{val_loss:.2f}.hdf5')\n", (4192, 4275), False, 'import os\n'), ((4325, 4419), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_loss"""', 'save_best_only': '(True)', 'verbose': '(1)', 'mode': '"""min"""'}), "(filepath, monitor='val_loss', save_best_only=True, verbose=\n 1, mode='min')\n", (4340, 4419), False, 'from keras.callbacks import ModelCheckpoint\n'), ((5326, 5360), 'sklearn.pipeline.Pipeline', 'Pipeline', (['estimators'], {'verbose': '(True)'}), '(estimators, verbose=True)\n', (5334, 5360), False, 'from sklearn.pipeline import Pipeline\n'), ((6454, 6486), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_transform', 'y'], {}), '(X_transform, y)\n', (6470, 6486), False, 'from sklearn.model_selection import train_test_split\n'), ((7765, 7927), 'src.gpopy.FlowTunning', 'FlowTunning', ([], {'params': 'params', 'population_size': 'population_size', 'maximum_generation': 'maximum_generation', 'auto_track': 'auto_track', 'experiment_name': 'experiment_name'}), '(params=params, population_size=population_size,\n maximum_generation=maximum_generation, auto_track=auto_track,\n experiment_name=experiment_name)\n', (7776, 7927), False, 'from src.gpopy import FlowTunning\n'), ((1430, 1455), 'keras.backend.square', 'K.square', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (1438, 1455), True, 'from keras import backend as K\n'), ((1670, 1682), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1680, 1682), False, 'from keras.models import Sequential, load_model\n'), ((2175, 2243), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'learning_rate', 'momentum': 'momentum', 'decay': 'decay', 'nesterov': '(True)'}), '(lr=learning_rate, momentum=momentum, decay=decay, nesterov=True)\n', (2178, 2243), False, 'from keras.optimizers import SGD\n'), ((2900, 2951), 'keras.layers.Dense', 'Dense', (['layer_1'], {'input_dim': '(40)', 'activation': 'activation'}), '(layer_1, input_dim=40, activation=activation)\n', (2905, 2951), False, 'from keras.layers import Dense, Dropout\n'), ((2971, 2989), 'keras.layers.Dropout', 'Dropout', (['dropout_1'], {}), '(dropout_1)\n', (2978, 2989), False, 'from keras.layers import Dense, Dropout\n'), ((3022, 3084), 'keras.layers.Dense', 'Dense', (['layer_2'], {'kernel_initializer': 'init', 'activation': 'activation'}), '(layer_2, kernel_initializer=init, activation=activation)\n', (3027, 3084), False, 'from keras.layers import Dense, Dropout\n'), ((3104, 3122), 'keras.layers.Dropout', 'Dropout', (['dropout_2'], {}), '(dropout_2)\n', (3111, 3122), False, 'from keras.layers import Dense, Dropout\n'), ((3155, 3217), 'keras.layers.Dense', 'Dense', (['layer_3'], {'kernel_initializer': 'init', 'activation': 'activation'}), '(layer_3, kernel_initializer=init, activation=activation)\n', (3160, 3217), False, 'from keras.layers import Dense, Dropout\n'), ((3237, 3255), 'keras.layers.Dropout', 'Dropout', (['dropout_3'], {}), '(dropout_3)\n', (3244, 3255), False, 'from keras.layers import Dense, Dropout\n'), ((3275, 3329), 'keras.layers.Dense', 'Dense', (['(1)'], {'kernel_initializer': 'init', 'activation': '"""linear"""'}), "(1, kernel_initializer=init, activation='linear')\n", (3280, 3329), False, 'from keras.layers import Dense, Dropout\n'), ((4903, 4961), 'os.path.join', 'os.path.join', (['LOGS_DIR', '"""Models/NNModel"""', '"""saved_model.h5"""'], {}), "(LOGS_DIR, 'Models/NNModel', 'saved_model.h5')\n", (4915, 4961), False, 'import os\n'), ((5753, 5811), 'os.path.join', 'os.path.join', (['LOGS_DIR', '"""Models/NNModel"""', '"""saved_model.h5"""'], {}), "(LOGS_DIR, 'Models/NNModel', 'saved_model.h5')\n", (5765, 5811), False, 'import os\n'), ((5824, 5838), 'numpy.any', 'np.any', (['[X, y]'], {}), '([X, y])\n', (5830, 5838), True, 'import numpy as np\n'), ((5997, 6055), 'os.path.join', 'os.path.join', (['LOGS_DIR', '"""Models/NNModel"""', '"""saved_model.h5"""'], {}), "(LOGS_DIR, 'Models/NNModel', 'saved_model.h5')\n", (6009, 6055), False, 'import os\n'), ((1705, 1747), 'keras.layers.Dense', 'Dense', (['(32)'], {'input_dim': '(40)', 'activation': '"""relu"""'}), "(32, input_dim=40, activation='relu')\n", (1710, 1747), False, 'from keras.layers import Dense, Dropout\n'), ((1771, 1784), 'keras.layers.Dropout', 'Dropout', (['(0.29)'], {}), '(0.29)\n', (1778, 1784), False, 'from keras.layers import Dense, Dropout\n'), ((1825, 1883), 'keras.layers.Dense', 'Dense', (['(128)'], {'kernel_initializer': '"""normal"""', 'activation': '"""relu"""'}), "(128, kernel_initializer='normal', activation='relu')\n", (1830, 1883), False, 'from keras.layers import Dense, Dropout\n'), ((1907, 1920), 'keras.layers.Dropout', 'Dropout', (['(0.45)'], {}), '(0.45)\n', (1914, 1920), False, 'from keras.layers import Dense, Dropout\n'), ((1961, 2019), 'keras.layers.Dense', 'Dense', (['(128)'], {'kernel_initializer': '"""normal"""', 'activation': '"""relu"""'}), "(128, kernel_initializer='normal', activation='relu')\n", (1966, 2019), False, 'from keras.layers import Dense, Dropout\n'), ((2043, 2056), 'keras.layers.Dropout', 'Dropout', (['(0.33)'], {}), '(0.33)\n', (2050, 2056), False, 'from keras.layers import Dense, Dropout\n'), ((2097, 2155), 'keras.layers.Dense', 'Dense', (['(1)'], {'kernel_initializer': '"""normal"""', 'activation': '"""linear"""'}), "(1, kernel_initializer='normal', activation='linear')\n", (2102, 2155), False, 'from keras.layers import Dense, Dropout\n'), ((5233, 5247), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (5245, 5247), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((5284, 5304), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(40)'}), '(n_components=40)\n', (5287, 5304), False, 'from sklearn.decomposition import PCA\n')] |
# MIT License
#
# Copyright (C) IBM Corporation 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Implementation of the standard exponential mechanism, and its derivative, the hierarchical mechanism.
"""
from numbers import Real
import numpy as np
from numpy.random import random
from diffprivlib.mechanisms.base import DPMechanism
from diffprivlib.mechanisms.binary import Binary
from diffprivlib.utils import copy_docstring
class Exponential(DPMechanism):
"""
The exponential mechanism for achieving differential privacy on categorical inputs, as first proposed by McSherry
and Talwar.
The exponential mechanism achieves differential privacy by randomly choosing an output value for a given input
value, with greater probability given to values 'closer' to the input, as measured by a given utility function.
Paper link: https://www.cs.drexel.edu/~greenie/privacy/mdviadp.pdf
"""
def __init__(self):
super().__init__()
self._domain_values = None
self._utility_values = None
self._normalising_constant = None
self._sensitivity = None
self._balanced_tree = False
def __repr__(self):
output = super().__repr__()
output += ".set_utility(" + str(self.get_utility_list()) + ")" if self._utility_values is not None else ""
return output
def set_utility(self, utility_list):
"""Sets the utility function of the mechanism. The utility function is used to determine the probability of
selecting an output for a given input.
The utility function is set by `utility_list`, which is a list of pairwise 'distances' between values in the
mechanism's domain. As the mechanisms's domain is set by the values in `utility_list`, all possible pairs in
`utility_list` must be accounted for. The utility function is symmetric, meaning the distance from `a` to
`b` is the same as the distance from `b` to `a`. Setting the second distance will overwrite the first.
Parameters
----------
utility_list : list of tuples
The utility list of the mechanism. Must be specified as a list of tuples, of the form ("value1", "value2",
utility), where each `value` is a string and `utility` is a strictly positive float. A `utility` must be
specified for every pair of values given in the `utility_list`.
Returns
-------
self : class
Raises
------
TypeError
If the `value` components of each tuple are not strings of if the `utility` component is not a float.
ValueError
If the `utility` component is zero or negative.
"""
if not isinstance(utility_list, list):
raise TypeError("Utility must be given in a list")
self._normalising_constant = None
utility_values = {}
domain_values = []
sensitivity = 0
for _utility_sub_list in utility_list:
value1, value2, utility_value = _utility_sub_list
if not isinstance(value1, str) or not isinstance(value2, str):
raise TypeError("Utility keys must be strings")
if not isinstance(utility_value, Real):
raise TypeError("Utility value must be a number")
if utility_value < 0.0:
raise ValueError("Utility values must be non-negative")
sensitivity = max(sensitivity, utility_value)
if value1 not in domain_values:
domain_values.append(value1)
if value2 not in domain_values:
domain_values.append(value2)
if value1 == value2:
continue
if value1 < value2:
utility_values[(value1, value2)] = utility_value
else:
utility_values[(value2, value1)] = utility_value
self._utility_values = utility_values
self._sensitivity = sensitivity
self._domain_values = domain_values
self._check_utility_full(domain_values)
return self
def _check_utility_full(self, domain_values):
for val1 in domain_values:
for val2 in domain_values:
if val1 >= val2:
continue
if (val1, val2) not in self._utility_values:
raise ValueError("Utility value for (%s) missing" % (val1 + ", " + val2))
return True
def get_utility_list(self):
"""Gets the utility list of the mechanism, in the same form as accepted by `.set_utility_list`.
Returns
-------
utility_list : list of tuples (str, str, float), or None
Returns a list of tuples of the form ("value1", "value2", utility), or `None` if the utility has not yet
been set.
"""
if self._utility_values is None:
return None
utility_list = []
for _key, _utility in self._utility_values.items():
value1, value2 = _key
utility_list.append((value1, value2, _utility))
return utility_list
def _build_normalising_constant(self, re_eval=False):
balanced_tree = True
first_constant_value = None
normalising_constant = {}
for _base_leaf in self._domain_values:
constant_value = 0.0
for _target_leaf in self._domain_values:
constant_value += self._get_prob(_base_leaf, _target_leaf)
normalising_constant[_base_leaf] = constant_value
if first_constant_value is None:
first_constant_value = constant_value
elif not np.isclose(constant_value, first_constant_value):
balanced_tree = False
# If the tree is balanced, we can eliminate the doubling factor
if balanced_tree and not re_eval:
self._balanced_tree = True
return self._build_normalising_constant(True)
return normalising_constant
def _get_utility(self, value1, value2):
if value1 == value2:
return 0
if value1 > value2:
return self._get_utility(value1=value2, value2=value1)
return self._utility_values[(value1, value2)]
def _get_prob(self, value1, value2):
if value1 == value2:
return 1.0
balancing_factor = 1 if self._balanced_tree else 2
return np.exp(- self._epsilon * self._get_utility(value1, value2) / balancing_factor / self._sensitivity)
@copy_docstring(Binary.check_inputs)
def check_inputs(self, value):
super().check_inputs(value)
if self._utility_values is None:
raise ValueError("Utility function must be set")
if self._normalising_constant is None:
self._normalising_constant = self._build_normalising_constant()
if not isinstance(value, str):
raise TypeError("Value to be randomised must be a string")
if value not in self._domain_values:
raise ValueError("Value \"%s\" not in domain" % value)
return True
def set_epsilon_delta(self, epsilon, delta):
r"""Sets the value of :math:`\epsilon` and :math:`\delta` to be used by the mechanism.
For the exponential mechanism, `delta` must be zero and `epsilon` must be strictly positive.
Parameters
----------
epsilon : float
The value of epsilon for achieving :math:`(\epsilon,\delta)`-differential privacy with the mechanism. Must
have `epsilon > 0`.
delta : float
For the exponential mechanism, `delta` must be zero.
Returns
-------
self : class
Raises
------
ValueError
If `epsilon` is zero or negative, or if `delta` is non-zero.
"""
if not delta == 0:
raise ValueError("Delta must be zero")
self._normalising_constant = None
return super().set_epsilon_delta(epsilon, delta)
@copy_docstring(DPMechanism.get_bias)
def get_bias(self, value):
raise NotImplementedError
@copy_docstring(DPMechanism.get_variance)
def get_variance(self, value):
raise NotImplementedError
@copy_docstring(Binary.randomise)
def randomise(self, value):
self.check_inputs(value)
unif_rv = random() * self._normalising_constant[value]
cum_prob = 0
_target_value = None
for _target_value in self._normalising_constant.keys():
cum_prob += self._get_prob(value, _target_value)
if unif_rv <= cum_prob:
return _target_value
return _target_value
class ExponentialHierarchical(Exponential):
"""
Adaptation of the exponential mechanism to hierarchical data. Simplifies the process of specifying utility values,
as the values can be inferred from the hierarchy.
"""
def __init__(self):
super().__init__()
self._list_hierarchy = None
def __repr__(self):
output = super().__repr__()
output += ".set_hierarchy(" + str(self._list_hierarchy) + ")" if self._list_hierarchy is not None else ""
return output
def _build_hierarchy(self, nested_list, parent_node=None):
if parent_node is None:
parent_node = []
hierarchy = {}
for _i, _value in enumerate(nested_list):
if isinstance(_value, str):
hierarchy[_value] = parent_node + [_i]
elif not isinstance(_value, list):
raise TypeError("All leaves of the hierarchy must be a string " +
"(see node " + (parent_node + [_i]).__str__() + ")")
else:
hierarchy.update(self._build_hierarchy(_value, parent_node + [_i]))
self._check_hierarchy_height(hierarchy)
return hierarchy
@staticmethod
def _check_hierarchy_height(hierarchy):
hierarchy_height = None
for _value, _hierarchy_locator in hierarchy.items():
if hierarchy_height is None:
hierarchy_height = len(_hierarchy_locator)
elif len(_hierarchy_locator) != hierarchy_height:
raise ValueError("Leaves of the hierarchy must all be at the same level " +
"(node %s is at level %d instead of hierarchy height %d)" %
(_hierarchy_locator.__str__(), len(_hierarchy_locator), hierarchy_height))
@staticmethod
def _build_utility_list(hierarchy):
if not isinstance(hierarchy, dict):
raise TypeError("Hierarchy for _build_utility_list must be a dict")
utility_list = []
hierarchy_height = None
for _root_value, _root_hierarchy_locator in hierarchy.items():
if hierarchy_height is None:
hierarchy_height = len(_root_hierarchy_locator)
for _target_value, _target_hierarchy_locator in hierarchy.items():
if _root_value >= _target_value:
continue
i = 0
while (i < len(_root_hierarchy_locator) and
_root_hierarchy_locator[i] == _target_hierarchy_locator[i]):
i += 1
utility_list.append([_root_value, _target_value, hierarchy_height - i])
return utility_list
def set_hierarchy(self, list_hierarchy):
"""Sets the hierarchy of the hierarchical exponential mechanism.
The hierarchy is specified as a list of lists, where each leaf node is a string, and lies at the same depth as
each other leaf node. The utility between each leaf node is then calculated as
Parameters
----------
list_hierarchy : nested list of str
The hierarchy as specified as a nested list of string. Each string must be a leaf node, and each leaf node
must lie at the same depth in the hierarchy.
Returns
-------
self : class
"""
if not isinstance(list_hierarchy, list):
raise TypeError("Hierarchy must be a list")
self._list_hierarchy = list_hierarchy
hierarchy = self._build_hierarchy(list_hierarchy)
self.set_utility(self._build_utility_list(hierarchy))
return self
@copy_docstring(DPMechanism.get_bias)
def get_bias(self, value):
raise NotImplementedError
@copy_docstring(DPMechanism.get_variance)
def get_variance(self, value):
raise NotImplementedError
| [
"numpy.random.random",
"numpy.isclose",
"diffprivlib.utils.copy_docstring"
] | [((7550, 7585), 'diffprivlib.utils.copy_docstring', 'copy_docstring', (['Binary.check_inputs'], {}), '(Binary.check_inputs)\n', (7564, 7585), False, 'from diffprivlib.utils import copy_docstring\n'), ((9054, 9090), 'diffprivlib.utils.copy_docstring', 'copy_docstring', (['DPMechanism.get_bias'], {}), '(DPMechanism.get_bias)\n', (9068, 9090), False, 'from diffprivlib.utils import copy_docstring\n'), ((9162, 9202), 'diffprivlib.utils.copy_docstring', 'copy_docstring', (['DPMechanism.get_variance'], {}), '(DPMechanism.get_variance)\n', (9176, 9202), False, 'from diffprivlib.utils import copy_docstring\n'), ((9278, 9310), 'diffprivlib.utils.copy_docstring', 'copy_docstring', (['Binary.randomise'], {}), '(Binary.randomise)\n', (9292, 9310), False, 'from diffprivlib.utils import copy_docstring\n'), ((13381, 13417), 'diffprivlib.utils.copy_docstring', 'copy_docstring', (['DPMechanism.get_bias'], {}), '(DPMechanism.get_bias)\n', (13395, 13417), False, 'from diffprivlib.utils import copy_docstring\n'), ((13489, 13529), 'diffprivlib.utils.copy_docstring', 'copy_docstring', (['DPMechanism.get_variance'], {}), '(DPMechanism.get_variance)\n', (13503, 13529), False, 'from diffprivlib.utils import copy_docstring\n'), ((9395, 9403), 'numpy.random.random', 'random', ([], {}), '()\n', (9401, 9403), False, 'from numpy.random import random\n'), ((6693, 6741), 'numpy.isclose', 'np.isclose', (['constant_value', 'first_constant_value'], {}), '(constant_value, first_constant_value)\n', (6703, 6741), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Implementation of reasoning layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Layer, InputSpec, conv_utils
from keras import regularizers, initializers, constraints
import keras.backend as K
import numpy as np
class Reasoning(Layer):
"""Simple reasoning over one detection possibility vector.
This layer computes the class-wise hypothesis probabilities based on a
reasoning process over one detection possibility vector. The output is
the class hypothesis possibility vector. Hence, this layer is in favor
similar to a traditional dense layer.
The `reasoning_intializer` and `reasoning_regularizer` is applied on the
encoded reasoning probabilities. Moreover, the component probabilities
are trained over IR and squashed by softmax to a probability vector.
Hence, the intializer/regualrizer/constraint applies over IR.
# Arguments
n_classes: Integer, specifying the number of classes.
n_replicas: Integer, specifying the number of trainable reasoning
processes for each class. A value greater than 1 realizes
multiple reasoning. A respective handling of the replicas must
performed manually afterwards.
reasoning_initializer: Initializer for the encoded
reasoning probabilities which is interpretable by Keras
`initializers.get()` routine.
reasoning_regularizer: Regularizer for the encoded
reasoning probabilities which is interpretable by Keras
`regularizers.get()` routine.
use_component_probabilities: Boolean, specifying if the reasoning
process has trainable component probabilities. If false,
the model assumes a probability of 1/number_of_components for
all components.
component_probabilities_initializer: Initializer for the component
probabilities which is interpretable by Keras
`initializers.get()` routine.
component_probabilities_regularizer: Regularizer for the component
probabilities which is interpretable by Keras
`regularizers.get()` routine.
component_probabilities_constraint: Constraint for the component
probabilities which is interpretable by Keras
`constraints.get()` routine.
# Input shape
2-dimensional tensor with shape:
`(batch, number of components)`.
This tensor is the detection possibility vector for each batch.
# Output shape
2-dimensional tensor with shape
`(batch, n_classes)` if `n_replicas` == 1 or
3-dimensional tensor with shape:
`(batch, n_classes, n_relpicas)` otherwise.
This tensor is the class hypothesis possibility vector for each batch.
"""
def __init__(self,
n_classes,
n_replicas=1,
reasoning_initializer='zeros',
reasoning_regularizer=None,
use_component_probabilities=False,
component_probabilities_initializer='zeros',
component_probabilities_regularizer=None,
component_probabilities_constraint=None,
**kwargs):
super(Reasoning, self).__init__(**kwargs)
self.n_classes = n_classes
self.n_replicas = n_replicas
self.reasoning_initializer = initializers.get(reasoning_initializer)
self.reasoning_regularizer = regularizers.get(reasoning_regularizer)
self.use_component_probabilities = use_component_probabilities
self.component_probabilities_initializer = initializers.get(
component_probabilities_initializer)
self.component_probabilities_regularizer = regularizers.get(
component_probabilities_regularizer)
self.component_probabilities_constraint = constraints.get(
component_probabilities_constraint)
def build(self, input_shape):
self.input_spec = InputSpec(shape=(None,) + tuple(input_shape[1:]))
# encoded trainable tensors
self.reasoning_probabilities = self.add_weight(
shape=(2,
self.n_replicas,
input_shape[-1],
self.n_classes),
initializer=self.reasoning_initializer,
regularizer=self.reasoning_regularizer,
constraint=lambda x: K.clip(x, 0., 1.),
name='reasoning_probabilities')
if self.use_component_probabilities:
self.component_probabilities = self.add_weight(
shape=(1, input_shape[-1], 1),
initializer=self.component_probabilities_initializer,
regularizer=self.component_probabilities_regularizer,
constraint=self.component_probabilities_constraint,
name='component_probabilities')
self.built = True
def call(self, inputs, **kwargs):
# decode the reasoning probabilities
positive_kernel = self.reasoning_probabilities[0]
negative_kernel = (1 - positive_kernel) * \
self.reasoning_probabilities[1]
if self.use_component_probabilities:
# squash component probabilities
components_probabilities = softmax(self.component_probabilities)
positive_kernel = positive_kernel * components_probabilities
negative_kernel = negative_kernel * components_probabilities
# stabilize the division with a small epsilon
probs = (K.dot(inputs, (positive_kernel - negative_kernel)) \
+ K.sum(negative_kernel, 1)) \
/ (K.sum(positive_kernel + negative_kernel, 1) + K.epsilon())
# squeeze replica dimension if one.
if self.n_replicas == 1:
probs = K.squeeze(probs, axis=1)
else:
probs = K.permute_dimensions(probs, (0, 2, 1))
return probs
def compute_output_shape(self, input_shape):
if self.n_replicas != 1:
return (None, self.n_classes, self.n_replicas)
else:
return (None, self.n_classes)
class Reasoning2D(Layer):
"""Spatial reasoning over a detection possibility stack.
This layer computes the class-wise hypothesis probabilities based on
spatial reasoning. The output is a class hypothesis possibility stack.
This implementation is the extension of the simple reasoning process to
a sliding operation.
The `reasoning_intializer` and `reasoning_regularizer` is applied on the
encoded reasoning probabilities. Moreover, the component probabilities
are trained over IR and squashed by softmax to a probability vector.
Hence, the intializer/regualrizer/constraint applies over IR. The same
holds for the pixel probabilities.
The documentation of the arguments `strides`, `padding`, `dilation_rate`
and `activation` is copied from the Keras documentation of the `Conv2D`
layer.
# Arguments
n_classes: Integer, specifying the number of classes.
n_replicas: Integer, specifying the number of trainable reasoning
processes for each class. A value greater than 1 realizes
multiple reasoning. A respective handling of the replicas must
performed manually afterwards.
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D spatial reasoning stack.
Can be a single integer to specify the same value for
all spatial dimensions. If 'None', then the kernel_size is
automatically defined as the spatial input dimension size.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution
along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
Note that `"same"` is slightly inconsistent across backends with
`strides` != 1, as described
[here](https://github.com/keras-team/keras/pull/9473#issuecomment-372166860)
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
reasoning_initializer: Initializer for the encoded
reasoning probabilities which is interpretable by Keras
`initializers.get()` routine.
reasoning_regularizer: Regularizer for the encoded
reasoning probabilities which is interpretable by Keras
`regularizers.get()` routine.
use_component_probabilities: Boolean, specifying if the reasoning
process has trainable component probabilities. If false,
the model assumes a probability of 1/number_of_components for
all components.
component_probabilities_initializer: Initializer for the component
probabilities which is interpretable by Keras
`initializers.get()` routine.
component_probabilities_regularizer: Regularizer for the component
probabilities which is interpretable by Keras
`regularizers.get()` routine.
component_probabilities_constraint: Constraint for the component
probabilities which is interpretable by Keras
`constraints.get()` routine.
use_pixel_probabilities: Boolean, specifying if the reasoning
process has trainable pixel probabilities. If false,
the model assumes a probability of 1/kernel_size.
pixel_probabilities_initializer: Initializer for the component
probabilities which is interpretable by Keras
`initializers.get()` routine.
pixel_probabilities_regularizer: Regularizer for the component
probabilities which is interpretable by Keras
`regularizers.get()` routine.
pixel_probabilities_constraint: Constraint for the component
probabilities which is interpretable by Keras
`constraints.get()` routine.
# Input shape
4-dimensional tensor with shape:
`(batch, rows, cols, number of components)`.
This tensor is the detection possibility stack for each batch.
# Output shape
4-dimensional tensor with shape:
`(batch, new_rows, new_cols, n_classes)` if `n_replicas` == 1 or
5-dimensional tensor with shape:
`(batch, new_rows, new_cols, n_classes, n_replicas)` otherwise.
`rows` and `cols` values might have changed due to padding.
This tensor is the class hypothesis possibility stack for each batch.
"""
def __init__(self,
n_classes,
n_replicas=1,
kernel_size=None,
strides=(1, 1),
padding='valid',
dilation_rate=(1, 1),
reasoning_initializer='zeros',
reasoning_regularizer=None,
use_component_probabilities=False,
component_probabilities_initializer='zeros',
component_probabilities_regularizer=None,
component_probabilities_constraint=None,
use_pixel_probabilities=False,
pixel_probabilities_initializer='zeros',
pixel_probabilities_regularizer=None,
pixel_probabilities_constraint=None,
**kwargs):
super(Reasoning2D, self).__init__(**kwargs)
self.n_classes = n_classes
self.n_replicas = n_replicas
self.rank = 2
if kernel_size is not None:
self.kernel_size = conv_utils.normalize_tuple(kernel_size,
self.rank,
'kernel_size')
else:
self.kernel_size = None
self.strides = conv_utils.normalize_tuple(strides,
self.rank,
'strides')
self.padding = conv_utils.normalize_padding(padding)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate,
self.rank,
'dilation_rate')
self.reasoning_initializer = initializers.get(reasoning_initializer)
self.reasoning_regularizer = regularizers.get(reasoning_regularizer)
self.use_component_probabilities = use_component_probabilities
self.component_probabilities_initializer = initializers.get(
component_probabilities_initializer)
self.component_probabilities_regularizer = regularizers.get(
component_probabilities_regularizer)
self.component_probabilities_constraint = constraints.get(
component_probabilities_constraint)
self.use_pixel_probabilities = use_pixel_probabilities
self.pixel_probabilities_initializer = initializers.get(
pixel_probabilities_initializer)
self.pixel_probabilities_regularizer = regularizers.get(
pixel_probabilities_regularizer)
self.pixel_probabilities_constraint = constraints.get(
pixel_probabilities_constraint)
def build(self, input_shape):
self.input_spec = InputSpec(shape=(None,) + tuple(input_shape[1:]))
# define kernel_size as full-image if not provided
if self.kernel_size is None:
self.kernel_size = input_shape[1:3]
kernel_shape = (2,) \
+ self.kernel_size \
+ (input_shape[-1], self.n_classes * self.n_replicas)
# encoded trainable tensors
self.reasoning_probabilities = self.add_weight(
shape=kernel_shape,
initializer=self.reasoning_initializer,
regularizer=self.reasoning_regularizer,
constraint=lambda x: K.clip(x, 0., 1.),
name='reasoning_probabilities')
if self.use_pixel_probabilities:
self.pixel_probabilities = self.add_weight(
shape=self.kernel_size + (1, self.n_classes * self.n_replicas),
initializer=self.pixel_probabilities_initializer,
regularizer=self.pixel_probabilities_regularizer,
constraint=self.pixel_probabilities_constraint,
name='pixel_probabilities')
if self.use_component_probabilities:
self.component_probabilities = self.add_weight(
shape=(1, 1, input_shape[-1], 1),
initializer=self.component_probabilities_initializer,
regularizer=self.component_probabilities_regularizer,
constraint=self.component_probabilities_constraint,
name='component_probabilities')
self.built = True
def call(self, inputs, **kwargs):
# decode the reasoning probabilities
positive_kernel = self.reasoning_probabilities[0]
negative_kernel = (1 - positive_kernel) * \
self.reasoning_probabilities[1]
if self.use_component_probabilities:
# squash component probabilities
components_probabilities = softmax(self.component_probabilities,
axis=2)
positive_kernel = positive_kernel * components_probabilities
negative_kernel = negative_kernel * components_probabilities
# get normalization tensor
# stabilize the division with a small epsilon
normalization = K.sum(positive_kernel + negative_kernel,
axis=2,
keepdims=True) + K.epsilon()
# get sliding kernel and bias
if self.use_pixel_probabilities:
pixel_probabilities = softmax(self.pixel_probabilities,
axis=(0, 1))
# scale kernel with priors
kernel = (positive_kernel - negative_kernel) / normalization \
* pixel_probabilities
bias = K.sum(negative_kernel / normalization
* pixel_probabilities,
axis=(0, 1, 2),
keepdims=True)
else:
kernel = (positive_kernel - negative_kernel) / normalization
bias = K.sum(negative_kernel / normalization,
axis=(0, 1, 2),
keepdims=True)
# compute probabilities by a sliding operation
probs = K.conv2d(inputs, kernel,
strides=self.strides,
padding=self.padding,
data_format='channels_last',
dilation_rate=self.dilation_rate) + bias
if not self.use_pixel_probabilities:
# divide by number of kernel_size
probs = probs / np.prod(self.kernel_size)
# reshape to m x n x #classes x #replicas
probs = K.reshape(probs,
(-1,) + K.int_shape(probs)[1:3]
+ (self.n_classes, self.n_replicas))
# squeeze replica dimension if one.
if self.n_replicas == 1:
probs = K.squeeze(probs, axis=-1)
return probs
def compute_output_shape(self, input_shape):
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
shape = list((input_shape[0],) + tuple(new_space) + (self.n_classes,))
if self.n_replicas != 1:
shape = shape + [self.n_replicas]
return tuple(shape)
def softmax(tensors, axis=-1):
"""Implementation of softmax with maximum stabilization and multiple
axis support.
# Arguments
tensors: Input tensor.
axis: An integer or tuple/list of integers, specifying the
axis for the normalization
# Input shape
tensor with arbitrary shape
# Output shape
tensor with the same shape as the input tensor
"""
with K.name_scope('softmax'):
tensors = tensors - K.max(tensors, axis=axis, keepdims=True)
exp = K.exp(tensors)
return exp / K.sum(exp, axis=axis, keepdims=True)
| [
"keras.constraints.get",
"numpy.prod",
"keras.backend.permute_dimensions",
"keras.backend.sum",
"keras.backend.conv2d",
"keras.regularizers.get",
"keras.backend.clip",
"keras.backend.max",
"keras.layers.conv_utils.normalize_padding",
"keras.layers.conv_utils.normalize_tuple",
"keras.backend.sque... | [((3496, 3535), 'keras.initializers.get', 'initializers.get', (['reasoning_initializer'], {}), '(reasoning_initializer)\n', (3512, 3535), False, 'from keras import regularizers, initializers, constraints\n'), ((3573, 3612), 'keras.regularizers.get', 'regularizers.get', (['reasoning_regularizer'], {}), '(reasoning_regularizer)\n', (3589, 3612), False, 'from keras import regularizers, initializers, constraints\n'), ((3736, 3789), 'keras.initializers.get', 'initializers.get', (['component_probabilities_initializer'], {}), '(component_probabilities_initializer)\n', (3752, 3789), False, 'from keras import regularizers, initializers, constraints\n'), ((3854, 3907), 'keras.regularizers.get', 'regularizers.get', (['component_probabilities_regularizer'], {}), '(component_probabilities_regularizer)\n', (3870, 3907), False, 'from keras import regularizers, initializers, constraints\n'), ((3971, 4022), 'keras.constraints.get', 'constraints.get', (['component_probabilities_constraint'], {}), '(component_probabilities_constraint)\n', (3986, 4022), False, 'from keras import regularizers, initializers, constraints\n'), ((12457, 12514), 'keras.layers.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['strides', 'self.rank', '"""strides"""'], {}), "(strides, self.rank, 'strides')\n", (12483, 12514), False, 'from keras.layers import Layer, InputSpec, conv_utils\n'), ((12638, 12675), 'keras.layers.conv_utils.normalize_padding', 'conv_utils.normalize_padding', (['padding'], {}), '(padding)\n', (12666, 12675), False, 'from keras.layers import Layer, InputSpec, conv_utils\n'), ((12705, 12774), 'keras.layers.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['dilation_rate', 'self.rank', '"""dilation_rate"""'], {}), "(dilation_rate, self.rank, 'dilation_rate')\n", (12731, 12774), False, 'from keras.layers import Layer, InputSpec, conv_utils\n'), ((12925, 12964), 'keras.initializers.get', 'initializers.get', (['reasoning_initializer'], {}), '(reasoning_initializer)\n', (12941, 12964), False, 'from keras import regularizers, initializers, constraints\n'), ((13002, 13041), 'keras.regularizers.get', 'regularizers.get', (['reasoning_regularizer'], {}), '(reasoning_regularizer)\n', (13018, 13041), False, 'from keras import regularizers, initializers, constraints\n'), ((13165, 13218), 'keras.initializers.get', 'initializers.get', (['component_probabilities_initializer'], {}), '(component_probabilities_initializer)\n', (13181, 13218), False, 'from keras import regularizers, initializers, constraints\n'), ((13283, 13336), 'keras.regularizers.get', 'regularizers.get', (['component_probabilities_regularizer'], {}), '(component_probabilities_regularizer)\n', (13299, 13336), False, 'from keras import regularizers, initializers, constraints\n'), ((13400, 13451), 'keras.constraints.get', 'constraints.get', (['component_probabilities_constraint'], {}), '(component_probabilities_constraint)\n', (13415, 13451), False, 'from keras import regularizers, initializers, constraints\n'), ((13576, 13625), 'keras.initializers.get', 'initializers.get', (['pixel_probabilities_initializer'], {}), '(pixel_probabilities_initializer)\n', (13592, 13625), False, 'from keras import regularizers, initializers, constraints\n'), ((13686, 13735), 'keras.regularizers.get', 'regularizers.get', (['pixel_probabilities_regularizer'], {}), '(pixel_probabilities_regularizer)\n', (13702, 13735), False, 'from keras import regularizers, initializers, constraints\n'), ((13795, 13842), 'keras.constraints.get', 'constraints.get', (['pixel_probabilities_constraint'], {}), '(pixel_probabilities_constraint)\n', (13810, 13842), False, 'from keras import regularizers, initializers, constraints\n'), ((18936, 18959), 'keras.backend.name_scope', 'K.name_scope', (['"""softmax"""'], {}), "('softmax')\n", (18948, 18959), True, 'import keras.backend as K\n'), ((19044, 19058), 'keras.backend.exp', 'K.exp', (['tensors'], {}), '(tensors)\n', (19049, 19058), True, 'import keras.backend as K\n'), ((5922, 5946), 'keras.backend.squeeze', 'K.squeeze', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (5931, 5946), True, 'import keras.backend as K\n'), ((5981, 6019), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['probs', '(0, 2, 1)'], {}), '(probs, (0, 2, 1))\n', (6001, 6019), True, 'import keras.backend as K\n'), ((12202, 12267), 'keras.layers.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['kernel_size', 'self.rank', '"""kernel_size"""'], {}), "(kernel_size, self.rank, 'kernel_size')\n", (12228, 12267), False, 'from keras.layers import Layer, InputSpec, conv_utils\n'), ((16182, 16245), 'keras.backend.sum', 'K.sum', (['(positive_kernel + negative_kernel)'], {'axis': '(2)', 'keepdims': '(True)'}), '(positive_kernel + negative_kernel, axis=2, keepdims=True)\n', (16187, 16245), True, 'import keras.backend as K\n'), ((16308, 16319), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (16317, 16319), True, 'import keras.backend as K\n'), ((16699, 16794), 'keras.backend.sum', 'K.sum', (['(negative_kernel / normalization * pixel_probabilities)'], {'axis': '(0, 1, 2)', 'keepdims': '(True)'}), '(negative_kernel / normalization * pixel_probabilities, axis=(0, 1, 2),\n keepdims=True)\n', (16704, 16794), True, 'import keras.backend as K\n'), ((16972, 17041), 'keras.backend.sum', 'K.sum', (['(negative_kernel / normalization)'], {'axis': '(0, 1, 2)', 'keepdims': '(True)'}), '(negative_kernel / normalization, axis=(0, 1, 2), keepdims=True)\n', (16977, 17041), True, 'import keras.backend as K\n'), ((17164, 17299), 'keras.backend.conv2d', 'K.conv2d', (['inputs', 'kernel'], {'strides': 'self.strides', 'padding': 'self.padding', 'data_format': '"""channels_last"""', 'dilation_rate': 'self.dilation_rate'}), "(inputs, kernel, strides=self.strides, padding=self.padding,\n data_format='channels_last', dilation_rate=self.dilation_rate)\n", (17172, 17299), True, 'import keras.backend as K\n'), ((17852, 17877), 'keras.backend.squeeze', 'K.squeeze', (['probs'], {'axis': '(-1)'}), '(probs, axis=-1)\n', (17861, 17877), True, 'import keras.backend as K\n'), ((18065, 18208), 'keras.layers.conv_utils.conv_output_length', 'conv_utils.conv_output_length', (['space[i]', 'self.kernel_size[i]'], {'padding': 'self.padding', 'stride': 'self.strides[i]', 'dilation': 'self.dilation_rate[i]'}), '(space[i], self.kernel_size[i], padding=self.\n padding, stride=self.strides[i], dilation=self.dilation_rate[i])\n', (18094, 18208), False, 'from keras.layers import Layer, InputSpec, conv_utils\n'), ((18989, 19029), 'keras.backend.max', 'K.max', (['tensors'], {'axis': 'axis', 'keepdims': '(True)'}), '(tensors, axis=axis, keepdims=True)\n', (18994, 19029), True, 'import keras.backend as K\n'), ((19080, 19116), 'keras.backend.sum', 'K.sum', (['exp'], {'axis': 'axis', 'keepdims': '(True)'}), '(exp, axis=axis, keepdims=True)\n', (19085, 19116), True, 'import keras.backend as K\n'), ((5645, 5693), 'keras.backend.dot', 'K.dot', (['inputs', '(positive_kernel - negative_kernel)'], {}), '(inputs, positive_kernel - negative_kernel)\n', (5650, 5693), True, 'import keras.backend as K\n'), ((5717, 5742), 'keras.backend.sum', 'K.sum', (['negative_kernel', '(1)'], {}), '(negative_kernel, 1)\n', (5722, 5742), True, 'import keras.backend as K\n'), ((5765, 5808), 'keras.backend.sum', 'K.sum', (['(positive_kernel + negative_kernel)', '(1)'], {}), '(positive_kernel + negative_kernel, 1)\n', (5770, 5808), True, 'import keras.backend as K\n'), ((5811, 5822), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (5820, 5822), True, 'import keras.backend as K\n'), ((17523, 17548), 'numpy.prod', 'np.prod', (['self.kernel_size'], {}), '(self.kernel_size)\n', (17530, 17548), True, 'import numpy as np\n'), ((4507, 4526), 'keras.backend.clip', 'K.clip', (['x', '(0.0)', '(1.0)'], {}), '(x, 0.0, 1.0)\n', (4513, 4526), True, 'import keras.backend as K\n'), ((14526, 14545), 'keras.backend.clip', 'K.clip', (['x', '(0.0)', '(1.0)'], {}), '(x, 0.0, 1.0)\n', (14532, 14545), True, 'import keras.backend as K\n'), ((17667, 17685), 'keras.backend.int_shape', 'K.int_shape', (['probs'], {}), '(probs)\n', (17678, 17685), True, 'import keras.backend as K\n')] |
# [reference]
# https://github.com/matthiasplappert/keras-rl/blob/master/rl/random.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class RandomProcess(object):
def reset_states(self):
pass
class AnnealedGaussianProcess(RandomProcess):
def __init__(self, mu, sigma, sigma_min, n_steps_annealing):
self.mu = mu
self.sigma = sigma
self.n_steps = 0
if sigma_min is not None:
self.m = -float(sigma - sigma_min) / float(n_steps_annealing)
self.c = sigma
self.sigma_min = sigma_min
else:
self.m = 0.
self.c = sigma
self.sigma_min = sigma
@property
def current_sigma(self):
sigma = max(self.sigma_min, self.m * float(self.n_steps) + self.c)
return sigma
# Based on
# http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckProcess(AnnealedGaussianProcess):
def __init__(self, theta, mu=0., sigma=1., dt=1e-2,
x0=None, size=1, sigma_min=None, n_steps_annealing=1000):
super(OrnsteinUhlenbeckProcess, self).__init__(
mu=mu,
sigma=sigma,
sigma_min=sigma_min,
n_steps_annealing=n_steps_annealing)
self.theta = theta
self.mu = mu
self.dt = dt
self.x0 = x0
self.size = size
self.reset_states()
def sample(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \
self.current_sigma * np.sqrt(self.dt) * \
np.random.normal(size=self.size)
self.x_prev = x
self.n_steps += 1
return x
def reset_states(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros(self.size)
| [
"numpy.random.normal",
"numpy.zeros",
"numpy.sqrt"
] | [((1860, 1879), 'numpy.zeros', 'np.zeros', (['self.size'], {}), '(self.size)\n', (1868, 1879), True, 'import numpy as np\n'), ((1673, 1705), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'self.size'}), '(size=self.size)\n', (1689, 1705), True, 'import numpy as np\n'), ((1640, 1656), 'numpy.sqrt', 'np.sqrt', (['self.dt'], {}), '(self.dt)\n', (1647, 1656), True, 'import numpy as np\n')] |
import numpy as np
from copy import deepcopy
from rlcard.games.mahjong import Dealer
from rlcard.games.mahjong import Player
from rlcard.games.mahjong import Round
from rlcard.games.mahjong import Judger
class MahjongGame:
def __init__(self, allow_step_back=False):
'''Initialize the class MajongGame
'''
self.allow_step_back = allow_step_back
self.np_random = np.random.RandomState()
self.num_players = 4
def init_game(self):
''' Initialilze the game of Mahjong
This version supports two-player Mahjong
Returns:
(tuple): Tuple containing:
(dict): The first state of the game
(int): Current player's id
'''
# Initialize a dealer that can deal cards
self.dealer = Dealer(self.np_random)
# Initialize four players to play the game
self.players = [Player(i, self.np_random) for i in range(self.num_players)]
self.judger = Judger(self.np_random)
self.round = Round(self.judger, self.dealer, self.num_players, self.np_random)
# Deal 13 cards to each player to prepare for the game
for player in self.players:
self.dealer.deal_cards(player, 13)
# Save the hisory for stepping back to the last state.
self.history = []
self.dealer.deal_cards(self.players[self.round.current_player], 1)
state = self.get_state(self.round.current_player)
self.cur_state = state
return state, self.round.current_player
def step(self, action):
''' Get the next state
Args:
action (str): a specific action. (call, raise, fold, or check)
Returns:
(tuple): Tuple containing:
(dict): next player's state
(int): next plater's id
'''
# First snapshot the current state
if self.allow_step_back:
hist_dealer = deepcopy(self.dealer)
hist_round = deepcopy(self.round)
hist_players = deepcopy(self.players)
self.history.append((hist_dealer, hist_players, hist_round))
self.round.proceed_round(self.players, action)
state = self.get_state(self.round.current_player)
self.cur_state = state
return state, self.round.current_player
def step_back(self):
''' Return to the previous state of the game
Returns:
(bool): True if the game steps back successfully
'''
if not self.history:
return False
self.dealer, self.players, self.round = self.history.pop()
return True
def get_state(self, player_id):
''' Return player's state
Args:
player_id (int): player id
Returns:
(dict): The state of the player
'''
state = self.round.get_state(self.players, player_id)
return state
@staticmethod
def get_legal_actions(state):
''' Return the legal actions for current player
Returns:
(list): A list of legal actions
'''
if state['valid_act'] == ['play']:
state['valid_act'] = state['action_cards']
return state['action_cards']
else:
return state['valid_act']
@staticmethod
def get_num_actions():
''' Return the number of applicable actions
Returns:
(int): The number of actions. There are 4 actions (call, raise, check and fold)
'''
return 38
def get_num_players(self):
''' return the number of players in Mahjong
returns:
(int): the number of players in the game
'''
return self.num_players
def get_player_id(self):
''' return the id of current player in Mahjong
returns:
(int): the number of players in the game
'''
return self.round.current_player
def is_over(self):
''' Check if the game is over
Returns:
(boolean): True if the game is over
'''
win, player, _ = self.judger.judge_game(self)
#pile =[sorted([c.get_str() for c in s ]) for s in self.players[player].pile if self.players[player].pile != None]
#cards = sorted([c.get_str() for c in self.players[player].hand])
#count = len(cards) + sum([len(p) for p in pile])
self.winner = player
#print(win, player, players_val)
#print(win, self.round.current_player, player, cards, pile, count)
return win
| [
"copy.deepcopy",
"rlcard.games.mahjong.Judger",
"rlcard.games.mahjong.Dealer",
"rlcard.games.mahjong.Player",
"numpy.random.RandomState",
"rlcard.games.mahjong.Round"
] | [((400, 423), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (421, 423), True, 'import numpy as np\n'), ((810, 832), 'rlcard.games.mahjong.Dealer', 'Dealer', (['self.np_random'], {}), '(self.np_random)\n', (816, 832), False, 'from rlcard.games.mahjong import Dealer\n'), ((992, 1014), 'rlcard.games.mahjong.Judger', 'Judger', (['self.np_random'], {}), '(self.np_random)\n', (998, 1014), False, 'from rlcard.games.mahjong import Judger\n'), ((1036, 1101), 'rlcard.games.mahjong.Round', 'Round', (['self.judger', 'self.dealer', 'self.num_players', 'self.np_random'], {}), '(self.judger, self.dealer, self.num_players, self.np_random)\n', (1041, 1101), False, 'from rlcard.games.mahjong import Round\n'), ((909, 934), 'rlcard.games.mahjong.Player', 'Player', (['i', 'self.np_random'], {}), '(i, self.np_random)\n', (915, 934), False, 'from rlcard.games.mahjong import Player\n'), ((1958, 1979), 'copy.deepcopy', 'deepcopy', (['self.dealer'], {}), '(self.dealer)\n', (1966, 1979), False, 'from copy import deepcopy\n'), ((2005, 2025), 'copy.deepcopy', 'deepcopy', (['self.round'], {}), '(self.round)\n', (2013, 2025), False, 'from copy import deepcopy\n'), ((2053, 2075), 'copy.deepcopy', 'deepcopy', (['self.players'], {}), '(self.players)\n', (2061, 2075), False, 'from copy import deepcopy\n')] |
from pathlib import Path
import numpy as np
import pandas as pd
import pkgutil
import cupy
# Simulation parameters
seed = 131
noise_level = 1
# Technical settings
ROOT_DIR = Path(__file__).parent.parent.absolute()
DATA_DIR = ROOT_DIR / "data"
SIM_DIR = DATA_DIR / "simulations_output"
TEST_SIM_DIR = ROOT_DIR / "test" / "data" / "simulations_output"
np.set_printoptions(threshold=np.inf, linewidth=np.inf)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
GPU_AVAILABLE = pkgutil.find_loader('cupy')
# Use this to switch GPU if one is in use.
# If all GPUs are used you are in bad luck, wait for your turn
cupy.cuda.Device(3).use()
| [
"cupy.cuda.Device",
"pathlib.Path",
"pkgutil.find_loader",
"pandas.set_option",
"numpy.set_printoptions"
] | [((354, 409), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf', 'linewidth': 'np.inf'}), '(threshold=np.inf, linewidth=np.inf)\n', (373, 409), True, 'import numpy as np\n'), ((410, 448), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(500)'], {}), "('display.max_rows', 500)\n", (423, 448), True, 'import pandas as pd\n'), ((449, 490), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(500)'], {}), "('display.max_columns', 500)\n", (462, 490), True, 'import pandas as pd\n'), ((508, 535), 'pkgutil.find_loader', 'pkgutil.find_loader', (['"""cupy"""'], {}), "('cupy')\n", (527, 535), False, 'import pkgutil\n'), ((642, 661), 'cupy.cuda.Device', 'cupy.cuda.Device', (['(3)'], {}), '(3)\n', (658, 661), False, 'import cupy\n'), ((176, 190), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (180, 190), False, 'from pathlib import Path\n')] |
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by pre_process on 19-1-30
import tensorflow as tf
import os
import numpy as np
import coms.utils as utils
import coms.tfrecords as tfrecords
tf.app.flags.DEFINE_string('dataset_dir', './data', '')
FLAGS = tf.app.flags.FLAGS
def get_cifar10_batch(is_train, batch_size, num_cls,img_prob):
train_dir = r''
test_dir = r''
aim_dir = r''
if utils.isLinuxSys():
train_dir = os.path.join(FLAGS.dataset_dir, "train.tfrecords")
#train_dir = r'data/train.tfrecords'
test_dir = os.path.join(FLAGS.dataset_dir, "test.tfrecords")
#test_dir = r'data/test.tfrecords'
else:
train_dir = r'D:\DataSets\cifar\cifar\tfrecords\train.tfrecords'
test_dir = r'D:\DataSets\cifar\cifar\tfrecords\test.tfrecords'
'''
if is_train:
aim_dir = train_dir
print(aim_dir)
train_img_tfrecords, train_label_tfrecords = tfrecords.get_tfrecords(aim_dir,img_prob=img_prob)
train_img_batch, train_label_batch = get_batch_tfrecords(train_img_tfrecords,train_label_tfrecords,img_prob[0],img_prob[1],batch_size,10)
train_label_batch = tf.one_hot(train_label_batch,depth=num_cls)
return train_img_batch,train_label_batch
#yield train_img_batch,train_label_batch
else:
aim_dir = test_dir
test_img_tfrecords, test_label_tfrecords = tfrecords.get_tfrecords(aim_dir,img_prob=img_prob)
test_img_batch, test_label_batch = get_batch_tfrecords(test_img_tfrecords,test_label_tfrecords,img_prob[0],img_prob[1],batch_size,1,False)
test_label_batch = tf.one_hot(test_label_batch,depth=num_cls)
return test_img_batch,test_label_batch
#yield test_img_batch,test_label_batch
'''
# for npu
if is_train:
aim_dir = train_dir
print(aim_dir)
ds = tfrecords.get_tfrecords_npu(aim_dir, img_prob=img_prob, batch_size=batch_size, num_cls=num_cls)
return ds
else:
aim_dir = test_dir
print(aim_dir)
ds = tfrecords.get_tfrecords_npu(aim_dir, img_prob=img_prob, batch_size=batch_size, num_cls=num_cls)
return ds
# for npu
def get_dogcat_img(file_dir):
cls_list = ['dog','cat']
cls_img_path , cls_img_label = [],[]
for file in os.listdir(file_dir):
for index , name in enumerate(cls_list):
if name in file:
cls_img_path.append(file_dir + '/' + file)
cls_img_label.append(index)
temp = np.array([cls_img_path,cls_img_label])
temp = temp.transpose()
np.random.shuffle(temp)
img_list = list(temp[:,0])
label_list = list(temp[:,1])
label_list = [int (i) for i in label_list]
return img_list, label_list
def get_cifar10_img(file_dir):
cls_list = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
cls_img_path = []
cls_img_label = []
# for index , name in enumerate(cls_list):
# print(index, name)
# print(cls_list.index(name))
cout = 0
for file in os.listdir(file_dir):
for index , name in enumerate(cls_list):
if name in file:
cls_img_path.append(file_dir+'/'+file)
cls_img_label.append(index)
break
# if cout == 10:
# break
# else:
# cout = cout + 1
temp = np.array([cls_img_path,cls_img_label])
temp = temp.transpose()
np.random.shuffle(temp)
img_list = list(temp[:,0])
label_list = list(temp[:,1])
label_list = [int (i) for i in label_list]
return img_list, label_list
def get_batch_tfrecords(imgs,label,img_w,img_h,batch_size,num_threads,shuffle=True):
imgs = tf.image.resize_images(images=imgs,size=[img_w,img_h],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# capacity=5+batch_size*3, min_after_dequeue=5
if shuffle:
img_batch, label_batch = tf.train.shuffle_batch(
[imgs, label]
, batch_size=batch_size
, capacity=5+batch_size*3
, num_threads=num_threads
, min_after_dequeue=10
)
else:
# num_thread = 1时,每次去除的样本顺序固定不变
img_batch, label_batch = tf.train.batch(
[imgs, label]
, batch_size=batch_size
, capacity=5+batch_size*3
, num_threads=num_threads
)
img_batch = tf.cast(img_batch, tf.float32)
return img_batch,label_batch
'''
生成相同大小的批次,使用此函数将图片分批次,原因为一次性将大量图片读入内存可能会存在内存不足,同时也是性能浪费
@:param img get_cat_and_dog_files()返回的img_list
@:param label get_cat_and_dog_files()返回的label_list
@:param img_w, img_h 设置好固定的宽和高
@:param batch_size 每个batch的大小
@:param capacity 一个队列最大容量
@:return 包含图像和标签的batch
'''
def get_batch(img, label, img_w, img_h, batch_size, capacity):
# 格式化为tf需要的格式
img = tf.cast(img, tf.string)
label = tf.cast(label, tf.int32)
# 生产队列
input_queue = tf.train.slice_input_producer([img,label])
# 从队列中读取图
img_contents = tf.read_file(input_queue[0])
label = input_queue[1]
# 图像解码,不同类型图像不要混在一起
img = tf.image.decode_jpeg(img_contents, channels=3)
# 图像统一预处理,缩放,旋转,裁剪,归一化等
img = tf.image.resize_images(images=img,size=[img_h,img_w],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
img = tf.cast(img, tf.float32) / 255. # 转换数据类型并归一化
# 图片标准化
# img = tf.image.per_image_standardization(img)
img_batch, label_batch = tf.train.batch(
[img,label],
batch_size= batch_size,
num_threads= 64,
capacity=capacity
)
# label_batch = tf.reshape(label_batch,[batch_size])
img_batch = tf.cast(img_batch, tf.float32)
return img_batch, label_batch
if __name__ == '__main__':
get_cifar10_img('1')
| [
"tensorflow.image.resize_images",
"os.listdir",
"coms.tfrecords.get_tfrecords_npu",
"os.path.join",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.train.slice_input_producer",
"numpy.array",
"tensorflow.image.decode_jpeg",
"tensorflow.train.batch",
"coms.utils.isLinuxSys",
"tensorflow.cast",
... | [((1705, 1760), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_dir"""', '"""./data"""', '""""""'], {}), "('dataset_dir', './data', '')\n", (1731, 1760), True, 'import tensorflow as tf\n'), ((1916, 1934), 'coms.utils.isLinuxSys', 'utils.isLinuxSys', ([], {}), '()\n', (1932, 1934), True, 'import coms.utils as utils\n'), ((3804, 3824), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (3814, 3824), False, 'import os\n'), ((4018, 4057), 'numpy.array', 'np.array', (['[cls_img_path, cls_img_label]'], {}), '([cls_img_path, cls_img_label])\n', (4026, 4057), True, 'import numpy as np\n'), ((4090, 4113), 'numpy.random.shuffle', 'np.random.shuffle', (['temp'], {}), '(temp)\n', (4107, 4113), True, 'import numpy as np\n'), ((4578, 4598), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (4588, 4598), False, 'import os\n'), ((4901, 4940), 'numpy.array', 'np.array', (['[cls_img_path, cls_img_label]'], {}), '([cls_img_path, cls_img_label])\n', (4909, 4940), True, 'import numpy as np\n'), ((4973, 4996), 'numpy.random.shuffle', 'np.random.shuffle', (['temp'], {}), '(temp)\n', (4990, 4996), True, 'import numpy as np\n'), ((5240, 5348), 'tensorflow.image.resize_images', 'tf.image.resize_images', ([], {'images': 'imgs', 'size': '[img_w, img_h]', 'method': 'tf.image.ResizeMethod.NEAREST_NEIGHBOR'}), '(images=imgs, size=[img_w, img_h], method=tf.image.\n ResizeMethod.NEAREST_NEIGHBOR)\n', (5262, 5348), True, 'import tensorflow as tf\n'), ((5912, 5942), 'tensorflow.cast', 'tf.cast', (['img_batch', 'tf.float32'], {}), '(img_batch, tf.float32)\n', (5919, 5942), True, 'import tensorflow as tf\n'), ((6347, 6370), 'tensorflow.cast', 'tf.cast', (['img', 'tf.string'], {}), '(img, tf.string)\n', (6354, 6370), True, 'import tensorflow as tf\n'), ((6383, 6407), 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), '(label, tf.int32)\n', (6390, 6407), True, 'import tensorflow as tf\n'), ((6438, 6481), 'tensorflow.train.slice_input_producer', 'tf.train.slice_input_producer', (['[img, label]'], {}), '([img, label])\n', (6467, 6481), True, 'import tensorflow as tf\n'), ((6515, 6543), 'tensorflow.read_file', 'tf.read_file', (['input_queue[0]'], {}), '(input_queue[0])\n', (6527, 6543), True, 'import tensorflow as tf\n'), ((6606, 6652), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['img_contents'], {'channels': '(3)'}), '(img_contents, channels=3)\n', (6626, 6652), True, 'import tensorflow as tf\n'), ((6692, 6799), 'tensorflow.image.resize_images', 'tf.image.resize_images', ([], {'images': 'img', 'size': '[img_h, img_w]', 'method': 'tf.image.ResizeMethod.NEAREST_NEIGHBOR'}), '(images=img, size=[img_h, img_w], method=tf.image.\n ResizeMethod.NEAREST_NEIGHBOR)\n', (6714, 6799), True, 'import tensorflow as tf\n'), ((6945, 7035), 'tensorflow.train.batch', 'tf.train.batch', (['[img, label]'], {'batch_size': 'batch_size', 'num_threads': '(64)', 'capacity': 'capacity'}), '([img, label], batch_size=batch_size, num_threads=64,\n capacity=capacity)\n', (6959, 7035), True, 'import tensorflow as tf\n'), ((7146, 7176), 'tensorflow.cast', 'tf.cast', (['img_batch', 'tf.float32'], {}), '(img_batch, tf.float32)\n', (7153, 7176), True, 'import tensorflow as tf\n'), ((1956, 2006), 'os.path.join', 'os.path.join', (['FLAGS.dataset_dir', '"""train.tfrecords"""'], {}), "(FLAGS.dataset_dir, 'train.tfrecords')\n", (1968, 2006), False, 'import os\n'), ((2071, 2120), 'os.path.join', 'os.path.join', (['FLAGS.dataset_dir', '"""test.tfrecords"""'], {}), "(FLAGS.dataset_dir, 'test.tfrecords')\n", (2083, 2120), False, 'import os\n'), ((3368, 3468), 'coms.tfrecords.get_tfrecords_npu', 'tfrecords.get_tfrecords_npu', (['aim_dir'], {'img_prob': 'img_prob', 'batch_size': 'batch_size', 'num_cls': 'num_cls'}), '(aim_dir, img_prob=img_prob, batch_size=\n batch_size, num_cls=num_cls)\n', (3395, 3468), True, 'import coms.tfrecords as tfrecords\n'), ((3555, 3655), 'coms.tfrecords.get_tfrecords_npu', 'tfrecords.get_tfrecords_npu', (['aim_dir'], {'img_prob': 'img_prob', 'batch_size': 'batch_size', 'num_cls': 'num_cls'}), '(aim_dir, img_prob=img_prob, batch_size=\n batch_size, num_cls=num_cls)\n', (3582, 3655), True, 'import coms.tfrecords as tfrecords\n'), ((5442, 5583), 'tensorflow.train.shuffle_batch', 'tf.train.shuffle_batch', (['[imgs, label]'], {'batch_size': 'batch_size', 'capacity': '(5 + batch_size * 3)', 'num_threads': 'num_threads', 'min_after_dequeue': '(10)'}), '([imgs, label], batch_size=batch_size, capacity=5 + \n batch_size * 3, num_threads=num_threads, min_after_dequeue=10)\n', (5464, 5583), True, 'import tensorflow as tf\n'), ((5732, 5843), 'tensorflow.train.batch', 'tf.train.batch', (['[imgs, label]'], {'batch_size': 'batch_size', 'capacity': '(5 + batch_size * 3)', 'num_threads': 'num_threads'}), '([imgs, label], batch_size=batch_size, capacity=5 + \n batch_size * 3, num_threads=num_threads)\n', (5746, 5843), True, 'import tensorflow as tf\n'), ((6803, 6827), 'tensorflow.cast', 'tf.cast', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (6810, 6827), True, 'import tensorflow as tf\n')] |
from __future__ import print_function
import argparse
import os
import random
import chainer
import numpy as np
from chainer import training, Variable
from chainer.training import extensions
from dataset import H5pyDataset
from model import DCGAN_G, DCGAN_D, init_bn, init_conv
from sampler import sampler
from updater import WassersteinUpdater
def make_optimizer(model, lr):
optimizer = chainer.optimizers.RMSprop(lr=lr)
optimizer.setup(model)
return optimizer
def main():
parser = argparse.ArgumentParser(description='Train Unsupervised Blending GAN')
parser.add_argument('--nz', type=int, default=100, help='Size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64, help='# of base filters in G')
parser.add_argument('--ndf', type=int, default=64, help='# of base filters in D')
parser.add_argument('--nc', type=int, default=3, help='# of output channels in G')
parser.add_argument('--load_size', type=int, default=64, help='Scale image to load_size')
parser.add_argument('--image_size', type=int, default=64, help='The height / width of the input image to network')
parser.add_argument('--gpu', type=int, default=0, help='GPU ID (negative value indicates CPU)')
parser.add_argument('--lr_d', type=float, default=0.00005, help='Learning rate for Critic, default=0.00005')
parser.add_argument('--lr_g', type=float, default=0.00005, help='Learning rate for Generator, default=0.00005')
parser.add_argument('--d_iters', type=int, default=5, help='# of D iters per each G iter')
parser.add_argument('--n_epoch', type=int, default=25, help='# of epochs to train for')
parser.add_argument('--clamp_lower', type=float, default=-0.01, help='Lower bound for clipping')
parser.add_argument('--clamp_upper', type=float, default=0.01, help='Upper bound for clipping')
parser.add_argument('--data_root', help='Path to dataset')
parser.add_argument('--experiment', default='Wasserstein_GAN_result', help='Where to store samples and models')
parser.add_argument('--workers', type=int, default=10, help='# of data loading workers')
parser.add_argument('--batch_size', type=int, default=128, help='input batch size')
parser.add_argument('--test_size', type=int, default=64, help='Batch size for testing')
parser.add_argument('--manual_seed', type=int, default=5, help='Manul seed')
parser.add_argument('--resume', default='', help='Resume the training from snapshot')
parser.add_argument('--snapshot_interval', type=int, default=1, help='Interval of snapshot (epoch)')
parser.add_argument('--print_interval', type=int, default=1, help='Interval of printing log to console (iteration)')
parser.add_argument('--plot_interval', type=int, default=10, help='Interval of plot (iteration)')
args = parser.parse_args()
random.seed(args.manual_seed)
print('Input arguments:')
for key, value in vars(args).items():
print('\t{}: {}'.format(key, value))
print('')
# Set up G & D
print('Create & Init models ...')
print('\tInit G network ...')
G = DCGAN_G(args.image_size, args.nc, args.ngf, init_conv, init_bn)
print('\tInit D network ...')
D = DCGAN_D(args.image_size, args.ndf, 1, init_conv, init_bn)
if args.gpu >= 0:
print('\tCopy models to gpu {} ...'.format(args.gpu))
chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current
G.to_gpu() # Copy the model to the GPU
D.to_gpu()
print('Init models done ...\n')
# Setup an optimizer
optimizer_d = make_optimizer(D, args.lr_d)
optimizer_g = make_optimizer(G, args.lr_g)
########################################################################################################################
# Setup dataset & iterator
print('Load images from {} ...'.format(args.data_root))
trainset = H5pyDataset(args.data_root, load_size=args.load_size, crop_size=args.image_size)
print('\tTrainset contains {} image files'.format(len(trainset)))
print('')
train_iter = chainer.iterators.MultiprocessIterator(trainset, args.batch_size, n_processes=args.workers,
n_prefetch=args.workers)
########################################################################################################################
# Set up a trainer
updater = WassersteinUpdater(
models=(G, D),
args=args,
iterator=train_iter,
optimizer={'main': optimizer_g, 'D': optimizer_d},
device=args.gpu
)
trainer = training.Trainer(updater, (args.n_epoch, 'epoch'), out=args.experiment)
# Snapshot
snapshot_interval = (args.snapshot_interval, 'epoch')
trainer.extend(
extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}.npz'),
trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
G, 'g_epoch_{.updater.epoch}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
D, 'd_epoch_{.updater.epoch}.npz'), trigger=snapshot_interval)
# Display
print_interval = (args.print_interval, 'iteration')
trainer.extend(extensions.LogReport(trigger=print_interval))
trainer.extend(extensions.PrintReport([
'iteration', 'main/loss', 'D/loss', 'D/loss_real', 'D/loss_fake'
]), trigger=print_interval)
trainer.extend(extensions.ProgressBar(update_interval=args.print_interval))
trainer.extend(extensions.dump_graph('D/loss', out_name='TrainGraph.dot'))
# Plot
plot_interval = (args.plot_interval, 'iteration')
trainer.extend(
extensions.PlotReport(['main/loss'], 'iteration', file_name='loss.png', trigger=plot_interval),
trigger=plot_interval)
trainer.extend(
extensions.PlotReport(['D/loss'], 'iteration', file_name='d_loss.png', trigger=plot_interval),
trigger=plot_interval)
trainer.extend(
extensions.PlotReport(['D/loss_real'], 'iteration', file_name='loss_real.png', trigger=plot_interval),
trigger=plot_interval)
trainer.extend(
extensions.PlotReport(['D/loss_fake'], 'iteration', file_name='loss_fake.png', trigger=plot_interval),
trigger=plot_interval)
# Eval
path = os.path.join(args.experiment, 'samples')
if not os.path.isdir(path):
os.makedirs(path)
print('Saving samples to {} ...\n'.format(path))
noisev = Variable(np.asarray(np.random.normal(size=(args.test_size, args.nz, 1, 1)), dtype=np.float32))
noisev.to_gpu(args.gpu)
trainer.extend(sampler(G, path, noisev, 'fake_samples_{}.png'), trigger=plot_interval)
if args.resume:
# Resume from a snapshot
print('Resume from {} ... \n'.format(args.resume))
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
print('Training start ...\n')
trainer.run()
if __name__ == '__main__':
main()
| [
"chainer.training.extensions.PrintReport",
"chainer.optimizers.RMSprop",
"chainer.training.extensions.snapshot_object",
"model.DCGAN_D",
"model.DCGAN_G",
"argparse.ArgumentParser",
"chainer.training.Trainer",
"os.path.isdir",
"dataset.H5pyDataset",
"chainer.training.extensions.ProgressBar",
"num... | [((397, 430), 'chainer.optimizers.RMSprop', 'chainer.optimizers.RMSprop', ([], {'lr': 'lr'}), '(lr=lr)\n', (423, 430), False, 'import chainer\n'), ((506, 576), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Unsupervised Blending GAN"""'}), "(description='Train Unsupervised Blending GAN')\n", (529, 576), False, 'import argparse\n'), ((2848, 2877), 'random.seed', 'random.seed', (['args.manual_seed'], {}), '(args.manual_seed)\n', (2859, 2877), False, 'import random\n'), ((3110, 3173), 'model.DCGAN_G', 'DCGAN_G', (['args.image_size', 'args.nc', 'args.ngf', 'init_conv', 'init_bn'], {}), '(args.image_size, args.nc, args.ngf, init_conv, init_bn)\n', (3117, 3173), False, 'from model import DCGAN_G, DCGAN_D, init_bn, init_conv\n'), ((3216, 3273), 'model.DCGAN_D', 'DCGAN_D', (['args.image_size', 'args.ndf', '(1)', 'init_conv', 'init_bn'], {}), '(args.image_size, args.ndf, 1, init_conv, init_bn)\n', (3223, 3273), False, 'from model import DCGAN_G, DCGAN_D, init_bn, init_conv\n'), ((3892, 3977), 'dataset.H5pyDataset', 'H5pyDataset', (['args.data_root'], {'load_size': 'args.load_size', 'crop_size': 'args.image_size'}), '(args.data_root, load_size=args.load_size, crop_size=args.image_size\n )\n', (3903, 3977), False, 'from dataset import H5pyDataset\n'), ((4074, 4194), 'chainer.iterators.MultiprocessIterator', 'chainer.iterators.MultiprocessIterator', (['trainset', 'args.batch_size'], {'n_processes': 'args.workers', 'n_prefetch': 'args.workers'}), '(trainset, args.batch_size,\n n_processes=args.workers, n_prefetch=args.workers)\n', (4112, 4194), False, 'import chainer\n'), ((4410, 4548), 'updater.WassersteinUpdater', 'WassersteinUpdater', ([], {'models': '(G, D)', 'args': 'args', 'iterator': 'train_iter', 'optimizer': "{'main': optimizer_g, 'D': optimizer_d}", 'device': 'args.gpu'}), "(models=(G, D), args=args, iterator=train_iter, optimizer\n ={'main': optimizer_g, 'D': optimizer_d}, device=args.gpu)\n", (4428, 4548), False, 'from updater import WassersteinUpdater\n'), ((4604, 4675), 'chainer.training.Trainer', 'training.Trainer', (['updater', "(args.n_epoch, 'epoch')"], {'out': 'args.experiment'}), "(updater, (args.n_epoch, 'epoch'), out=args.experiment)\n", (4620, 4675), False, 'from chainer import training, Variable\n'), ((6286, 6326), 'os.path.join', 'os.path.join', (['args.experiment', '"""samples"""'], {}), "(args.experiment, 'samples')\n", (6298, 6326), False, 'import os\n'), ((4778, 4845), 'chainer.training.extensions.snapshot', 'extensions.snapshot', ([], {'filename': '"""snapshot_epoch_{.updater.epoch}.npz"""'}), "(filename='snapshot_epoch_{.updater.epoch}.npz')\n", (4797, 4845), False, 'from chainer.training import extensions\n'), ((4901, 4962), 'chainer.training.extensions.snapshot_object', 'extensions.snapshot_object', (['G', '"""g_epoch_{.updater.epoch}.npz"""'], {}), "(G, 'g_epoch_{.updater.epoch}.npz')\n", (4927, 4962), False, 'from chainer.training import extensions\n'), ((5019, 5080), 'chainer.training.extensions.snapshot_object', 'extensions.snapshot_object', (['D', '"""d_epoch_{.updater.epoch}.npz"""'], {}), "(D, 'd_epoch_{.updater.epoch}.npz')\n", (5045, 5080), False, 'from chainer.training import extensions\n'), ((5208, 5252), 'chainer.training.extensions.LogReport', 'extensions.LogReport', ([], {'trigger': 'print_interval'}), '(trigger=print_interval)\n', (5228, 5252), False, 'from chainer.training import extensions\n'), ((5273, 5367), 'chainer.training.extensions.PrintReport', 'extensions.PrintReport', (["['iteration', 'main/loss', 'D/loss', 'D/loss_real', 'D/loss_fake']"], {}), "(['iteration', 'main/loss', 'D/loss', 'D/loss_real',\n 'D/loss_fake'])\n", (5295, 5367), False, 'from chainer.training import extensions\n'), ((5422, 5481), 'chainer.training.extensions.ProgressBar', 'extensions.ProgressBar', ([], {'update_interval': 'args.print_interval'}), '(update_interval=args.print_interval)\n', (5444, 5481), False, 'from chainer.training import extensions\n'), ((5503, 5561), 'chainer.training.extensions.dump_graph', 'extensions.dump_graph', (['"""D/loss"""'], {'out_name': '"""TrainGraph.dot"""'}), "('D/loss', out_name='TrainGraph.dot')\n", (5524, 5561), False, 'from chainer.training import extensions\n'), ((5658, 5756), 'chainer.training.extensions.PlotReport', 'extensions.PlotReport', (["['main/loss']", '"""iteration"""'], {'file_name': '"""loss.png"""', 'trigger': 'plot_interval'}), "(['main/loss'], 'iteration', file_name='loss.png',\n trigger=plot_interval)\n", (5679, 5756), False, 'from chainer.training import extensions\n'), ((5813, 5910), 'chainer.training.extensions.PlotReport', 'extensions.PlotReport', (["['D/loss']", '"""iteration"""'], {'file_name': '"""d_loss.png"""', 'trigger': 'plot_interval'}), "(['D/loss'], 'iteration', file_name='d_loss.png',\n trigger=plot_interval)\n", (5834, 5910), False, 'from chainer.training import extensions\n'), ((5967, 6073), 'chainer.training.extensions.PlotReport', 'extensions.PlotReport', (["['D/loss_real']", '"""iteration"""'], {'file_name': '"""loss_real.png"""', 'trigger': 'plot_interval'}), "(['D/loss_real'], 'iteration', file_name=\n 'loss_real.png', trigger=plot_interval)\n", (5988, 6073), False, 'from chainer.training import extensions\n'), ((6129, 6235), 'chainer.training.extensions.PlotReport', 'extensions.PlotReport', (["['D/loss_fake']", '"""iteration"""'], {'file_name': '"""loss_fake.png"""', 'trigger': 'plot_interval'}), "(['D/loss_fake'], 'iteration', file_name=\n 'loss_fake.png', trigger=plot_interval)\n", (6150, 6235), False, 'from chainer.training import extensions\n'), ((6338, 6357), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (6351, 6357), False, 'import os\n'), ((6367, 6384), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6378, 6384), False, 'import os\n'), ((6594, 6641), 'sampler.sampler', 'sampler', (['G', 'path', 'noisev', '"""fake_samples_{}.png"""'], {}), "(G, path, noisev, 'fake_samples_{}.png')\n", (6601, 6641), False, 'from sampler import sampler\n'), ((6787, 6837), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.resume', 'trainer'], {}), '(args.resume, trainer)\n', (6815, 6837), False, 'import chainer\n'), ((6472, 6526), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(args.test_size, args.nz, 1, 1)'}), '(size=(args.test_size, args.nz, 1, 1))\n', (6488, 6526), True, 'import numpy as np\n'), ((3366, 3399), 'chainer.cuda.get_device', 'chainer.cuda.get_device', (['args.gpu'], {}), '(args.gpu)\n', (3389, 3399), False, 'import chainer\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import locale
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tsfresh import select_features
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix
def main():
if len(sys.argv) < 3:
print('Usage: ./predict.py features labels')
exit(1)
features = pd.read_csv(sys.argv[1], index_col=None, header=0)
labels = pd.read_csv(sys.argv[2], index_col=None, header=None, squeeze=True)
predict(features, labels, 0.9)
def predict(features, labels, cutoff):
cutoff = int(len(features) * cutoff)
labels = labels[:len(features)].astype(int)
features = select_features(features, labels)
features_train = features.loc[:cutoff]
labels_train = labels.loc[:cutoff]
features_test = features.loc[cutoff:].reset_index(drop=True)
labels_test = labels.loc[cutoff:].reset_index(drop=True)
# labels_test = labels.loc[1300:]
# labels_test = []
# for i in range(cutoff, len(labels)):
# labels_test.append(1 if labels.loc[i] > labels.loc[i - 1] else 0)
lr = LogisticRegression(C=1e5)
lr.fit(features_train, labels_train)
predictions = lr.predict(features_test)
score = lr.score(features_test, labels_test)
# actual_prices = labels[cutoff - 1:-1].tolist()
# for i, val in enumerate(predictions):
# predictions[i] = 1 if val > actual_prices[i] else 0
# Writing the predictions to an output file:
np.savetxt('predictions.csv', predictions, fmt='%i', delimiter=',')
print('Accuracy: %s' % accuracy_score(labels_test, predictions))
print('Precision: %s' % precision_score(labels_test, predictions))
print('Recall: %s' % recall_score(labels_test, predictions))
cnf_matrix = confusion_matrix(labels_test, predictions, [0, 1])
plt.figure()
plot_confusion_matrix(cnf_matrix, normalize=True, classes=[0, 1], title='Nomalized Confusion Matrix')
plt.show()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if __name__ == '__main__':
main() | [
"matplotlib.pyplot.imshow",
"pandas.read_csv",
"tsfresh.select_features",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.precision_score",
"skl... | [((600, 650), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[1]'], {'index_col': 'None', 'header': '(0)'}), '(sys.argv[1], index_col=None, header=0)\n', (611, 650), True, 'import pandas as pd\n'), ((661, 728), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[2]'], {'index_col': 'None', 'header': 'None', 'squeeze': '(True)'}), '(sys.argv[2], index_col=None, header=None, squeeze=True)\n', (672, 728), True, 'import pandas as pd\n'), ((899, 932), 'tsfresh.select_features', 'select_features', (['features', 'labels'], {}), '(features, labels)\n', (914, 932), False, 'from tsfresh import select_features\n'), ((1303, 1333), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(100000.0)'}), '(C=100000.0)\n', (1321, 1333), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression, Lasso\n'), ((1652, 1719), 'numpy.savetxt', 'np.savetxt', (['"""predictions.csv"""', 'predictions'], {'fmt': '"""%i"""', 'delimiter': '""","""'}), "('predictions.csv', predictions, fmt='%i', delimiter=',')\n", (1662, 1719), True, 'import numpy as np\n'), ((1932, 1982), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels_test', 'predictions', '[0, 1]'], {}), '(labels_test, predictions, [0, 1])\n', (1948, 1982), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix\n'), ((1984, 1996), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1994, 1996), True, 'import matplotlib.pyplot as plt\n'), ((2101, 2111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2109, 2111), True, 'import matplotlib.pyplot as plt\n'), ((2546, 2596), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (2556, 2596), True, 'import matplotlib.pyplot as plt\n'), ((2598, 2614), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2607, 2614), True, 'import matplotlib.pyplot as plt\n'), ((2616, 2630), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2628, 2630), True, 'import matplotlib.pyplot as plt\n'), ((2670, 2714), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (2680, 2714), True, 'import matplotlib.pyplot as plt\n'), ((2716, 2747), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (2726, 2747), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3029), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3027, 3029), True, 'import matplotlib.pyplot as plt\n'), ((3031, 3055), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (3041, 3055), True, 'import matplotlib.pyplot as plt\n'), ((3057, 3086), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (3067, 3086), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1785), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (1759, 1785), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix\n'), ((1812, 1853), 'sklearn.metrics.precision_score', 'precision_score', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (1827, 1853), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix\n'), ((1877, 1915), 'sklearn.metrics.recall_score', 'recall_score', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (1889, 1915), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix\n')] |
from hierarc.LensPosterior.ddt_kin_constraints import DdtKinConstraints
from lenstronomy.Analysis.kinematics_api import KinematicsAPI
from hierarc.Likelihood.hierarchy_likelihood import LensLikelihood
from lenstronomy.Cosmo.lens_cosmo import LensCosmo
import numpy as np
import numpy.testing as npt
import pytest
class TestDdtKinGaussConstraints(object):
def setup(self):
pass
def test_likelihoodconfiguration_om(self):
anisotropy_model = 'OM'
kwargs_aperture = {'aperture_type': 'shell', 'r_in': 0, 'r_out': 3 / 2., 'center_ra': 0.0, 'center_dec': 0}
kwargs_seeing = {'psf_type': 'GAUSSIAN', 'fwhm': 1.4}
# numerical settings (not needed if power-law profiles with Hernquist light distribution is computed)
kwargs_numerics_galkin = {'interpol_grid_num': 1000, # numerical interpolation, should converge -> infinity
'log_integration': True,
# log or linear interpolation of surface brightness and mass models
'max_integrate': 100,
'min_integrate': 0.001} # lower/upper bound of numerical integrals
# redshift
z_lens = 0.5
z_source = 1.5
# lens model
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
lens_cosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=cosmo)
ddt_mean = lens_cosmo.ddt
ddt_sigma = ddt_mean/50
ddt_samples = np.random.normal(loc=ddt_mean, scale=ddt_sigma, size=50000)
theta_E = 1.
r_eff = 1
gamma = 2.1
# kwargs_model
lens_light_model_list = ['HERNQUIST']
lens_model_list = ['SPP']
kwargs_model = {'lens_model_list': lens_model_list, 'lens_light_model_list': lens_light_model_list}
# settings for kinematics calculation with KinematicsAPI of lenstronomy
kwargs_kin_api_settings = {'multi_observations': False, 'kwargs_numerics_galkin': kwargs_numerics_galkin,
'MGE_light': False, 'kwargs_mge_light': None, 'sampling_number': 1000,
'num_kin_sampling': 1000, 'num_psf_sampling': 100}
kin_api = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seeing, anisotropy_model,
cosmo=cosmo, **kwargs_kin_api_settings)
# compute kinematics with fiducial cosmology
kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'center_x': 0, 'center_y': 0}]
kwargs_lens_light = [{'Rs': r_eff * 0.551, 'amp': 1.}]
kwargs_anisotropy = {'r_ani': r_eff}
sigma_v = kin_api.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=r_eff,
theta_E=theta_E, gamma=gamma, kappa_ext=0)
# compute likelihood
kin_constraints = DdtKinConstraints(z_lens=z_lens, z_source=z_source, theta_E=theta_E, theta_E_error=0.01,
ddt_samples=ddt_samples, ddt_weights=None,
gamma=gamma, gamma_error=0.02, r_eff=r_eff, r_eff_error=0.05, sigma_v=[sigma_v],
sigma_v_error_independent=[10], sigma_v_error_covariant=0,
kwargs_aperture=kwargs_aperture, kwargs_seeing=kwargs_seeing,
kwargs_lens_light=kwargs_lens_light,
anisotropy_model=anisotropy_model, **kwargs_kin_api_settings)
kwargs_likelihood = kin_constraints.hierarchy_configuration(num_sample_model=5)
kwargs_likelihood['normalized'] = False
ln_class = LensLikelihood(**kwargs_likelihood)
kwargs_kin = {'a_ani': 1}
ln_likelihood = ln_class.lens_log_likelihood(cosmo, kwargs_lens={}, kwargs_kin=kwargs_kin)
npt.assert_almost_equal(ln_likelihood, 0, decimal=1)
if __name__ == '__main__':
pytest.main()
| [
"numpy.random.normal",
"lenstronomy.Cosmo.lens_cosmo.LensCosmo",
"hierarc.LensPosterior.ddt_kin_constraints.DdtKinConstraints",
"lenstronomy.Analysis.kinematics_api.KinematicsAPI",
"hierarc.Likelihood.hierarchy_likelihood.LensLikelihood",
"astropy.cosmology.FlatLambdaCDM",
"pytest.main",
"numpy.testin... | [((4061, 4074), 'pytest.main', 'pytest.main', ([], {}), '()\n', (4072, 4074), False, 'import pytest\n'), ((1351, 1380), 'astropy.cosmology.FlatLambdaCDM', 'FlatLambdaCDM', ([], {'H0': '(70)', 'Om0': '(0.3)'}), '(H0=70, Om0=0.3)\n', (1364, 1380), False, 'from astropy.cosmology import FlatLambdaCDM\n'), ((1403, 1459), 'lenstronomy.Cosmo.lens_cosmo.LensCosmo', 'LensCosmo', ([], {'z_lens': 'z_lens', 'z_source': 'z_source', 'cosmo': 'cosmo'}), '(z_lens=z_lens, z_source=z_source, cosmo=cosmo)\n', (1412, 1459), False, 'from lenstronomy.Cosmo.lens_cosmo import LensCosmo\n'), ((1548, 1607), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'ddt_mean', 'scale': 'ddt_sigma', 'size': '(50000)'}), '(loc=ddt_mean, scale=ddt_sigma, size=50000)\n', (1564, 1607), True, 'import numpy as np\n'), ((2268, 2407), 'lenstronomy.Analysis.kinematics_api.KinematicsAPI', 'KinematicsAPI', (['z_lens', 'z_source', 'kwargs_model', 'kwargs_aperture', 'kwargs_seeing', 'anisotropy_model'], {'cosmo': 'cosmo'}), '(z_lens, z_source, kwargs_model, kwargs_aperture,\n kwargs_seeing, anisotropy_model, cosmo=cosmo, **kwargs_kin_api_settings)\n', (2281, 2407), False, 'from lenstronomy.Analysis.kinematics_api import KinematicsAPI\n'), ((2952, 3410), 'hierarc.LensPosterior.ddt_kin_constraints.DdtKinConstraints', 'DdtKinConstraints', ([], {'z_lens': 'z_lens', 'z_source': 'z_source', 'theta_E': 'theta_E', 'theta_E_error': '(0.01)', 'ddt_samples': 'ddt_samples', 'ddt_weights': 'None', 'gamma': 'gamma', 'gamma_error': '(0.02)', 'r_eff': 'r_eff', 'r_eff_error': '(0.05)', 'sigma_v': '[sigma_v]', 'sigma_v_error_independent': '[10]', 'sigma_v_error_covariant': '(0)', 'kwargs_aperture': 'kwargs_aperture', 'kwargs_seeing': 'kwargs_seeing', 'kwargs_lens_light': 'kwargs_lens_light', 'anisotropy_model': 'anisotropy_model'}), '(z_lens=z_lens, z_source=z_source, theta_E=theta_E,\n theta_E_error=0.01, ddt_samples=ddt_samples, ddt_weights=None, gamma=\n gamma, gamma_error=0.02, r_eff=r_eff, r_eff_error=0.05, sigma_v=[\n sigma_v], sigma_v_error_independent=[10], sigma_v_error_covariant=0,\n kwargs_aperture=kwargs_aperture, kwargs_seeing=kwargs_seeing,\n kwargs_lens_light=kwargs_lens_light, anisotropy_model=anisotropy_model,\n **kwargs_kin_api_settings)\n', (2969, 3410), False, 'from hierarc.LensPosterior.ddt_kin_constraints import DdtKinConstraints\n'), ((3798, 3833), 'hierarc.Likelihood.hierarchy_likelihood.LensLikelihood', 'LensLikelihood', ([], {}), '(**kwargs_likelihood)\n', (3812, 3833), False, 'from hierarc.Likelihood.hierarchy_likelihood import LensLikelihood\n'), ((3975, 4027), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ln_likelihood', '(0)'], {'decimal': '(1)'}), '(ln_likelihood, 0, decimal=1)\n', (3998, 4027), True, 'import numpy.testing as npt\n')] |
"""
Copyright 2021 Max-Planck-Gesellschaft
Code author: <NAME>, <EMAIL>
Embodied Vision Group, Max Planck Institute for Intelligent Systems, Tübingen
This source code is licensed under the MIT license found in the
LICENSE.md file in the root directory of this source tree or at
https://opensource.org/licenses/MIT.
"""
import gym
import numpy as np
from gym.spaces import Box
from gym.utils import seeding
from context_exploration.data.envs.envs import (
MaxDurationWrapper,
ParametrizedEnvWrapper,
RandomSampleExcitationController,
SampleActionMixin,
SizeProperties,
)
class ProfileMountainCarEnv(gym.Env):
def __init__(self, profile_fcn=None, grad_fcn=None):
# the profile should range from 0 to 1
# in the boundaries of -1/1
if profile_fcn is None:
self.profile_fcn = lambda x: 0.5 + 0.45 * np.sin(np.pi * x)
self.grad_fcn = lambda x: 0.25 * np.pi * np.cos(np.pi * x)
else:
assert grad_fcn is not None
self.profile_fcn = profile_fcn
self.grad_fcn = grad_fcn
x = np.linspace(-1, 1, 500)
profile = self.profile_fcn(x)
self.max_height = np.max(profile)
self.dt = 0.05
self.g = -10
self.fric = 0.001
self.state = None
self.max_u = 3
self.action_space = Box(
low=-self.max_u, high=self.max_u, shape=(1,), dtype=np.float32
)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self, init_mode="random"):
if init_mode == "calibration":
self.state = np.array([0, 0])
elif init_mode == "random":
self.state = np.array(
[self.np_random.uniform(-0.8, 0.8), self.np_random.uniform(-2, 2)]
)
else:
raise ValueError(f"Unknown init_mode {init_mode}")
return self.state
def get_reward(self, state, action):
height = self.profile_fcn(state[..., 0])
costs = (height - self.max_height) ** 2
return -costs
def step(self, action: np.ndarray):
u = action[0]
u = np.clip(u, -self.max_u, self.max_u)
x, xdot = self.state
reward = self.get_reward(self.state, action)
grad_x = self.grad_fcn(x)
# euler integration
for step in range(2):
grad_angle = np.arctan(grad_x)
tang_accel = self.g * np.sin(grad_angle) + u
max_fric_accel = np.abs(self.g * np.cos(grad_angle) * self.fric)
abs_fric_accel = min(max_fric_accel, np.abs(tang_accel))
accel_x = (tang_accel - np.sign(xdot) * abs_fric_accel) * np.cos(grad_angle)
x_new = x + self.dt * xdot + 0.5 * accel_x * self.dt ** 2
grad_x = 0.5 * grad_x + 0.5 * self.grad_fcn(x_new)
xdot_new = xdot + accel_x * self.dt
if x_new >= 1:
x_new = 1
xdot_new = 0
if x_new <= -1:
x_new = -1
xdot_new = 0
obs = np.array([x_new, xdot_new])
self.state = obs
done = False
info = {}
return obs, reward, done, info
def render(self, mode="human"):
pass
def close(self):
pass
class MountainCarProfileContextSpace(gym.Space):
def __init__(self):
super(MountainCarProfileContextSpace, self).__init__(
shape=(0,), dtype=np.object
)
def sample(self):
n_points = self.np_random.choice(np.arange(2, 7 + 1))
locations = self.np_random.uniform(-1.5, 1.5, n_points)
heights = self.np_random.uniform(0.1, 0.3, n_points)
widths = self.np_random.uniform(0.1, 0.5, n_points)
locations = np.concatenate((locations, np.array([-1, 1])))
heights = np.concatenate((heights, np.array([0.5, 0.5])))
widths = np.concatenate((widths, np.array([0.3, 0.3])))
return np.stack((locations, heights, widths))
class MountainCarEnvRandomProfile(
ParametrizedEnvWrapper, SizeProperties, SampleActionMixin
):
def __init__(self):
self.max_duration = 100
state_dim = 2
action_dim = 1
context_space = MountainCarProfileContextSpace()
self.excitation_controller = RandomSampleExcitationController(self)
ParametrizedEnvWrapper.__init__(self, context_space)
SizeProperties.__init__(self, state_dim, action_dim)
@property
def profile_fcn(self):
return self.env.profile_fcn
@property
def grad_fcn(self):
return self.env.grad_fcn
def get_domain(self):
return 0
def reset(self, init_mode="random"):
return super().reset(init_mode=init_mode)
@staticmethod
def profile_from_context(context_sample):
locations = context_sample[0, :]
heights = context_sample[1, :]
widths = context_sample[2, :]
profile_fcn = lambda x: sum(
h * np.exp(-0.5 * ((x - l) ** 2 / w ** 2))
for l, h, w in zip(locations, heights, widths)
)
grad_fcn = lambda x: sum(
h * np.exp(-0.5 * ((x - l) ** 2 / w ** 2)) * ((-0.5 / w ** 2) * 2 * (x - l))
for l, h, w in zip(locations, heights, widths)
)
return profile_fcn, grad_fcn
def _construct_env(self, context):
profile_fcn, grad_fcn = MountainCarEnvRandomProfile.profile_from_context(
context
)
env = ProfileMountainCarEnv(profile_fcn=profile_fcn, grad_fcn=grad_fcn)
env = MaxDurationWrapper(env, self.max_duration)
return env
def get_reward(self, state, action):
return self.env.get_reward(state, action)
def seed(self, seed=None):
super().seed(seed)
def is_transition_informative(self, x, u, x_next):
return np.ones(*x.shape[:-1])
def run_mountaincar():
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=1)
x = np.linspace(-1, 1, 200)
env = ProfileMountainCarEnv()
profile = env.profile_fcn(x)
ax.plot(x, profile)
env.reset()
env.state = np.array([-0.5, 0])
for idx in range(50):
if idx < 15:
action = -3
else:
action = 3
obs, _, _, _ = env.step(np.array([action]))
print(obs[1])
ax.scatter(obs[0], env.profile_fcn(obs[0]))
plt.savefig(f"mountaincar/mountaincar_{idx}.png")
plt.show()
env.close()
def sample_profiles():
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=1)
context_space = MountainCarProfileContextSpace()
profile_fcn, grad_fcn = MountainCarEnvRandomProfile.profile_from_context(
context_space.sample()
)
x = np.linspace(-1, 1, 200)
profile = profile_fcn(x)
grad = grad_fcn(x)
ax.plot(x, profile)
ax.plot(x, grad)
# ax.set_ylim(0, 1)
plt.show()
def run_random_mountaincar():
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=1)
x = np.linspace(-1, 1, 200)
env = MountainCarEnvRandomProfile()
env.initialize_context(42)
profile = env.profile_fcn(x)
ax.plot(x, profile)
env.reset()
for idx in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
print(obs[1])
ax.scatter(obs[0], env.profile_fcn(obs[0]))
plt.savefig(f"mountaincar/random_mountaincar_{idx}.png")
plt.show()
env.close()
if __name__ == "__main__":
run_random_mountaincar()
| [
"numpy.clip",
"numpy.array",
"numpy.sin",
"numpy.arange",
"gym.utils.seeding.np_random",
"context_exploration.data.envs.envs.RandomSampleExcitationController",
"context_exploration.data.envs.envs.SizeProperties.__init__",
"numpy.max",
"numpy.exp",
"numpy.stack",
"numpy.linspace",
"numpy.arctan... | [((5952, 5982), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (5964, 5982), True, 'import matplotlib.pyplot as plt\n'), ((5991, 6014), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(200)'], {}), '(-1, 1, 200)\n', (6002, 6014), True, 'import numpy as np\n'), ((6139, 6158), 'numpy.array', 'np.array', (['[-0.5, 0]'], {}), '([-0.5, 0])\n', (6147, 6158), True, 'import numpy as np\n'), ((6455, 6465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6463, 6465), True, 'import matplotlib.pyplot as plt\n'), ((6558, 6588), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (6570, 6588), True, 'import matplotlib.pyplot as plt\n'), ((6765, 6788), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(200)'], {}), '(-1, 1, 200)\n', (6776, 6788), True, 'import numpy as np\n'), ((6914, 6924), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6922, 6924), True, 'import matplotlib.pyplot as plt\n'), ((7008, 7038), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (7020, 7038), True, 'import matplotlib.pyplot as plt\n'), ((7047, 7070), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(200)'], {}), '(-1, 1, 200)\n', (7058, 7070), True, 'import numpy as np\n'), ((7444, 7454), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7452, 7454), True, 'import matplotlib.pyplot as plt\n'), ((1094, 1117), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(500)'], {}), '(-1, 1, 500)\n', (1105, 1117), True, 'import numpy as np\n'), ((1182, 1197), 'numpy.max', 'np.max', (['profile'], {}), '(profile)\n', (1188, 1197), True, 'import numpy as np\n'), ((1348, 1415), 'gym.spaces.Box', 'Box', ([], {'low': '(-self.max_u)', 'high': 'self.max_u', 'shape': '(1,)', 'dtype': 'np.float32'}), '(low=-self.max_u, high=self.max_u, shape=(1,), dtype=np.float32)\n', (1351, 1415), False, 'from gym.spaces import Box\n'), ((1522, 1545), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (1539, 1545), False, 'from gym.utils import seeding\n'), ((2198, 2233), 'numpy.clip', 'np.clip', (['u', '(-self.max_u)', 'self.max_u'], {}), '(u, -self.max_u, self.max_u)\n', (2205, 2233), True, 'import numpy as np\n'), ((3082, 3109), 'numpy.array', 'np.array', (['[x_new, xdot_new]'], {}), '([x_new, xdot_new])\n', (3090, 3109), True, 'import numpy as np\n'), ((3967, 4005), 'numpy.stack', 'np.stack', (['(locations, heights, widths)'], {}), '((locations, heights, widths))\n', (3975, 4005), True, 'import numpy as np\n'), ((4303, 4341), 'context_exploration.data.envs.envs.RandomSampleExcitationController', 'RandomSampleExcitationController', (['self'], {}), '(self)\n', (4335, 4341), False, 'from context_exploration.data.envs.envs import MaxDurationWrapper, ParametrizedEnvWrapper, RandomSampleExcitationController, SampleActionMixin, SizeProperties\n'), ((4350, 4402), 'context_exploration.data.envs.envs.ParametrizedEnvWrapper.__init__', 'ParametrizedEnvWrapper.__init__', (['self', 'context_space'], {}), '(self, context_space)\n', (4381, 4402), False, 'from context_exploration.data.envs.envs import MaxDurationWrapper, ParametrizedEnvWrapper, RandomSampleExcitationController, SampleActionMixin, SizeProperties\n'), ((4411, 4463), 'context_exploration.data.envs.envs.SizeProperties.__init__', 'SizeProperties.__init__', (['self', 'state_dim', 'action_dim'], {}), '(self, state_dim, action_dim)\n', (4434, 4463), False, 'from context_exploration.data.envs.envs import MaxDurationWrapper, ParametrizedEnvWrapper, RandomSampleExcitationController, SampleActionMixin, SizeProperties\n'), ((5569, 5611), 'context_exploration.data.envs.envs.MaxDurationWrapper', 'MaxDurationWrapper', (['env', 'self.max_duration'], {}), '(env, self.max_duration)\n', (5587, 5611), False, 'from context_exploration.data.envs.envs import MaxDurationWrapper, ParametrizedEnvWrapper, RandomSampleExcitationController, SampleActionMixin, SizeProperties\n'), ((5853, 5875), 'numpy.ones', 'np.ones', (['*x.shape[:-1]'], {}), '(*x.shape[:-1])\n', (5860, 5875), True, 'import numpy as np\n'), ((6401, 6450), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""mountaincar/mountaincar_{idx}.png"""'], {}), "(f'mountaincar/mountaincar_{idx}.png')\n", (6412, 6450), True, 'import matplotlib.pyplot as plt\n'), ((7383, 7439), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""mountaincar/random_mountaincar_{idx}.png"""'], {}), "(f'mountaincar/random_mountaincar_{idx}.png')\n", (7394, 7439), True, 'import matplotlib.pyplot as plt\n'), ((1674, 1690), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1682, 1690), True, 'import numpy as np\n'), ((2436, 2453), 'numpy.arctan', 'np.arctan', (['grad_x'], {}), '(grad_x)\n', (2445, 2453), True, 'import numpy as np\n'), ((3549, 3568), 'numpy.arange', 'np.arange', (['(2)', '(7 + 1)'], {}), '(2, 7 + 1)\n', (3558, 3568), True, 'import numpy as np\n'), ((6299, 6317), 'numpy.array', 'np.array', (['[action]'], {}), '([action])\n', (6307, 6317), True, 'import numpy as np\n'), ((2637, 2655), 'numpy.abs', 'np.abs', (['tang_accel'], {}), '(tang_accel)\n', (2643, 2655), True, 'import numpy as np\n'), ((2727, 2745), 'numpy.cos', 'np.cos', (['grad_angle'], {}), '(grad_angle)\n', (2733, 2745), True, 'import numpy as np\n'), ((3802, 3819), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (3810, 3819), True, 'import numpy as np\n'), ((3865, 3885), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (3873, 3885), True, 'import numpy as np\n'), ((3929, 3949), 'numpy.array', 'np.array', (['[0.3, 0.3]'], {}), '([0.3, 0.3])\n', (3937, 3949), True, 'import numpy as np\n'), ((929, 946), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (935, 946), True, 'import numpy as np\n'), ((2488, 2506), 'numpy.sin', 'np.sin', (['grad_angle'], {}), '(grad_angle)\n', (2494, 2506), True, 'import numpy as np\n'), ((858, 875), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (864, 875), True, 'import numpy as np\n'), ((2556, 2574), 'numpy.cos', 'np.cos', (['grad_angle'], {}), '(grad_angle)\n', (2562, 2574), True, 'import numpy as np\n'), ((2693, 2706), 'numpy.sign', 'np.sign', (['xdot'], {}), '(xdot)\n', (2700, 2706), True, 'import numpy as np\n'), ((4986, 5024), 'numpy.exp', 'np.exp', (['(-0.5 * ((x - l) ** 2 / w ** 2))'], {}), '(-0.5 * ((x - l) ** 2 / w ** 2))\n', (4992, 5024), True, 'import numpy as np\n'), ((5144, 5182), 'numpy.exp', 'np.exp', (['(-0.5 * ((x - l) ** 2 / w ** 2))'], {}), '(-0.5 * ((x - l) ** 2 / w ** 2))\n', (5150, 5182), True, 'import numpy as np\n')] |
# Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we test the *behaviour* of trieste models against reference GPflow models (thus
implicitly assuming the latter are correct).
*NOTE:* Where GPflow models are used as the underlying model in an trieste model, we should
*not* test that the underlying model is used in any particular way. To do so would break
encapsulation. For example, we should *not* test that methods on the GPflow models are called
(except in the rare case that such behaviour is an explicitly documented behaviour of the
trieste model).
"""
from __future__ import annotations
import unittest.mock
from typing import Any, cast
import gpflow
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.models import SGPR, SVGP, VGP
from tests.util.misc import random_seed
from tests.util.models.gpflow.models import (
ModelFactoryType,
gpr_model,
mock_data,
sgpr_model,
svgp_model,
two_output_svgp_model,
vgp_matern_model,
vgp_model,
)
from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10
from trieste.data import Dataset
from trieste.logging import step_number, tensorboard_writer
from trieste.models import TrainableProbabilisticModel
from trieste.models.config import create_model
from trieste.models.gpflow import (
GaussianProcessRegression,
SparseVariational,
VariationalGaussianProcess,
)
from trieste.models.gpflow.models import NumDataPropertyMixin
from trieste.models.gpflow.sampler import RandomFourierFeatureTrajectorySampler
from trieste.models.optimizer import BatchOptimizer, DatasetTransformer, Optimizer
def _3x_plus_gaussian_noise(x: tf.Tensor) -> tf.Tensor:
return 3.0 * x + np.random.normal(scale=0.01, size=x.shape)
def test_gpflow_wrappers_loss(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
model, _reference_model = gpflow_interface_factory(x, y)
internal_model = model.model
reference_model = _reference_model(x, y)
if isinstance(internal_model, SVGP):
args = {"data": (x, y)}
else:
args = {}
npt.assert_allclose(
internal_model.training_loss(**args), reference_model.training_loss(**args), rtol=1e-6
)
def test_gpflow_wrappers_update(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
model, _reference_model = gpflow_interface_factory(x, y)
x_new = tf.concat([x, tf.constant([[10.0], [11.0]], dtype=gpflow.default_float())], 0)
new_data = Dataset(x_new, fnc_3x_plus_10(x_new))
# Would be nice if ModelFactoryType could return an intersection type of
# GPflowPredictor and TrainableProbabilisticModel but this isn't possible
cast(TrainableProbabilisticModel, model).update(new_data)
reference_model = _reference_model(x_new, fnc_3x_plus_10(x_new))
internal_model = model.model
if isinstance(internal_model, SVGP):
args = {"data": (new_data.query_points, new_data.observations)}
else:
args = {}
npt.assert_allclose(
internal_model.training_loss(**args), reference_model.training_loss(**args), rtol=1e-6
)
def test_gpflow_wrappers_ref_optimize(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_2sin_x_over_3(x)
data = Dataset(x, y)
model, _reference_model = gpflow_interface_factory(x, y)
reference_model = _reference_model(x, y)
model.optimize(data)
internal_model = model.model
if isinstance(internal_model, SVGP):
data_iter = iter(
tf.data.Dataset.from_tensor_slices(data.astuple())
.shuffle(len(data))
.batch(100)
.prefetch(tf.data.experimental.AUTOTUNE)
.repeat()
)
tf.optimizers.Adam().minimize(
reference_model.training_loss_closure(data=data_iter, compile=False),
reference_model.trainable_variables,
)
# there is a difference here and the code is pretty much the same
# not sure where it comes from
npt.assert_allclose(
internal_model.training_loss(next(data_iter)),
reference_model.training_loss(next(data_iter)),
rtol=1e-1,
)
else:
reference_model.data = (
tf.Variable(
reference_model.data[0],
trainable=False,
shape=[None, *reference_model.data[0].shape[1:]],
),
tf.Variable(
reference_model.data[1],
trainable=False,
shape=[None, *reference_model.data[1].shape[1:]],
),
)
gpflow.optimizers.Scipy().minimize(
reference_model.training_loss_closure(compile=False),
reference_model.trainable_variables,
)
npt.assert_allclose(
internal_model.training_loss(), reference_model.training_loss(), rtol=1e-6
)
def test_gaussian_process_regression_raises_for_invalid_init() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
with pytest.raises(ValueError):
GaussianProcessRegression(gpr_model(x, y), num_kernel_samples=-1)
with pytest.raises(ValueError):
optimizer1 = BatchOptimizer(gpflow.optimizers.Scipy())
GaussianProcessRegression(gpr_model(x, y), optimizer=optimizer1)
with pytest.raises(ValueError):
optimizer2 = Optimizer(tf.optimizers.Adam())
GaussianProcessRegression(gpr_model(x, y), optimizer=optimizer2)
def test_gaussian_process_regression_raises_for_conditionals_with_sgpr() -> None:
data = mock_data()
model = GaussianProcessRegression(sgpr_model(*data))
with pytest.raises(NotImplementedError):
model.conditional_predict_f(data[0], additional_data=Dataset(data[0], data[1]))
with pytest.raises(NotImplementedError):
model.conditional_predict_joint(data[0], additional_data=Dataset(data[0], data[1]))
with pytest.raises(NotImplementedError):
model.conditional_predict_y(data[0], additional_data=Dataset(data[0], data[1]))
with pytest.raises(NotImplementedError):
model.conditional_predict_f_sample(
data[0], additional_data=Dataset(data[0], data[1]), num_samples=1
)
def test_gaussian_process_regression_correctly_returns_internal_data() -> None:
data = mock_data()
model = GaussianProcessRegression(gpr_model(*data))
returned_data = model.get_internal_data()
npt.assert_array_equal(returned_data.query_points, data[0])
npt.assert_array_equal(returned_data.observations, data[1])
@random_seed
@unittest.mock.patch(
"trieste.models.gpflow.models.GaussianProcessRegression.find_best_model_initialization"
)
@pytest.mark.parametrize("prior_for_lengthscale", [True, False])
def test_gaussian_process_regression_correctly_counts_params_that_can_be_sampled(
mocked_model_initializer: Any,
dim: int,
prior_for_lengthscale: bool,
) -> None:
x = tf.constant(np.arange(1, 5 * dim + 1).reshape(-1, dim), dtype=tf.float64) # shape: [5, d]
optimizer = Optimizer(
optimizer=gpflow.optimizers.Scipy(),
minimize_args={"options": dict(maxiter=10)},
)
model = GaussianProcessRegression(gpr_model(x, fnc_3x_plus_10(x)), optimizer=optimizer)
model.model.kernel = gpflow.kernels.RBF(lengthscales=tf.ones([dim], dtype=tf.float64))
model.model.likelihood.variance.assign(1.0)
gpflow.set_trainable(model.model.likelihood, True)
if prior_for_lengthscale:
model.model.kernel.lengthscales.prior = tfp.distributions.LogNormal(
loc=tf.math.log(model.model.kernel.lengthscales), scale=1.0
)
else:
upper = tf.cast([10.0] * dim, dtype=tf.float64)
lower = upper / 100
model.model.kernel.lengthscales = gpflow.Parameter(
model.model.kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
model.model.likelihood.variance.prior = tfp.distributions.LogNormal(
loc=tf.cast(-2.0, dtype=tf.float64), scale=tf.cast(5.0, dtype=tf.float64)
)
dataset = Dataset(x, tf.cast(fnc_3x_plus_10(x), dtype=tf.float64))
model.optimize(dataset)
mocked_model_initializer.assert_called_once()
num_samples = mocked_model_initializer.call_args[0][0]
npt.assert_array_equal(num_samples, 10 * (dim + 1))
def test_gaussian_process_regression_best_initialization_changes_params_with_priors(
dim: int,
) -> None:
x = tf.constant(
np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
) # shape: [10, dim]
model = GaussianProcessRegression(gpr_model(x, fnc_3x_plus_10(x)[:, 0:1]))
model.model.kernel = gpflow.kernels.RBF(lengthscales=[0.2] * dim)
model.model.kernel.lengthscales.prior = tfp.distributions.LogNormal(
loc=tf.math.log(model.model.kernel.lengthscales), scale=1.0
)
model.find_best_model_initialization(2)
npt.assert_allclose(1.0, model.model.kernel.variance)
npt.assert_array_equal(dim, model.model.kernel.lengthscales.shape)
npt.assert_raises(
AssertionError, npt.assert_allclose, [0.2, 0.2], model.model.kernel.lengthscales
)
def test_gaussian_process_regression_best_initialization_changes_params_with_sigmoid_bijectors(
dim: int,
) -> None:
x = tf.constant(
np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
) # shape: [10, dim]
model = GaussianProcessRegression(gpr_model(x, fnc_3x_plus_10(x)[:, 0:1]))
model.model.kernel = gpflow.kernels.RBF(lengthscales=[0.2] * dim)
upper = tf.cast([10.0] * dim, dtype=tf.float64)
lower = upper / 100
model.model.kernel.lengthscales = gpflow.Parameter(
model.model.kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
model.find_best_model_initialization(2)
npt.assert_allclose(1.0, model.model.kernel.variance)
npt.assert_array_equal(dim, model.model.kernel.lengthscales.shape)
npt.assert_raises(
AssertionError, npt.assert_allclose, [0.2, 0.2], model.model.kernel.lengthscales
)
@random_seed
def test_gaussian_process_regression_best_initialization_improves_training_loss(dim: int) -> None:
x = tf.constant(
np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
) # shape: [10, dim]
model = GaussianProcessRegression(gpr_model(x, fnc_3x_plus_10(x)[:, 0:1]))
model.model.kernel = gpflow.kernels.RBF(variance=0.01, lengthscales=[0.011] * dim)
upper = tf.cast([100.0] * dim, dtype=tf.float64)
lower = upper / 10000
model.model.kernel.lengthscales = gpflow.Parameter(
model.model.kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
pre_init_likelihood = -model.model.training_loss()
model.find_best_model_initialization(10)
post_init_likelihood = -model.model.training_loss()
npt.assert_array_less(pre_init_likelihood, post_init_likelihood)
@random_seed
def test_gaussian_process_regression_best_initialization_improves_likelihood(dim: int) -> None:
x = tf.constant(
np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
) # shape: [10, dim]
model = GaussianProcessRegression(gpr_model(x, fnc_3x_plus_10(x)[:, 0:1]))
model.model.kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2] * dim)
model.model.kernel.variance.prior = tfp.distributions.LogNormal(
loc=np.float64(-2.0), scale=np.float64(1.0)
)
upper = tf.cast([10.0] * dim, dtype=tf.float64)
lower = upper / 100
model.model.kernel.lengthscales = gpflow.Parameter(
model.model.kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
pre_init_loss = model.model.training_loss()
model.find_best_model_initialization(100)
post_init_loss = model.model.training_loss()
npt.assert_array_less(post_init_loss, pre_init_loss)
def test_gaussian_process_regression_default_optimizer_is_correct() -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
model = GaussianProcessRegression(gpr_model(x_observed[:10], y_observed[:10]))
assert isinstance(model.optimizer, Optimizer)
assert isinstance(model.optimizer.optimizer, gpflow.optimizers.Scipy)
def test_gpr_config_builds_and_default_optimizer_is_correct() -> None:
data = mock_data()
model_config = {"model": gpr_model(*data)}
model = create_model(model_config)
assert isinstance(model, GaussianProcessRegression)
assert isinstance(model.optimizer, Optimizer)
assert isinstance(model.optimizer.optimizer, gpflow.optimizers.Scipy)
def test_sgpr_config_builds_and_default_optimizer_is_correct() -> None:
data = mock_data()
model_config = {"model": sgpr_model(*data)}
model = create_model(model_config)
assert isinstance(model, GaussianProcessRegression)
assert isinstance(model.optimizer, Optimizer)
assert isinstance(model.optimizer.optimizer, gpflow.optimizers.Scipy)
@random_seed
def test_gaussian_process_regression_trajectory_sampler_returns_correct_trajectory_sampler(
dim: int,
) -> None:
x = tf.constant(
np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
) # shape: [10, dim]
model = GaussianProcessRegression(gpr_model(x, fnc_3x_plus_10(x)[:, 0:1]))
model.model.kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2] * dim)
trajectory_sampler = model.trajectory_sampler()
assert isinstance(trajectory_sampler, RandomFourierFeatureTrajectorySampler)
@random_seed
def test_gaussian_process_regression_trajectory_sampler_has_correct_samples() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = GaussianProcessRegression(gpr_model(x, _3x_plus_gaussian_noise(x)))
x_predict = tf.constant([[50.5]], gpflow.default_float())
samples = []
num_samples = 10
trajectory_sampler = model.trajectory_sampler()
trajectory = trajectory_sampler.get_trajectory()
samples.append(-1.0 * trajectory(tf.expand_dims(x_predict, -2)))
for _ in range(num_samples - 1):
trajectory.resample() # type: ignore
samples.append(trajectory(tf.expand_dims(x_predict, -2)))
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean((samples - sample_mean) ** 2)
true_mean, true_variance = model.predict(x_predict)
linear_error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean + 1.0, true_mean + 1.0, rtol=linear_error)
npt.assert_allclose(sample_variance, true_variance, rtol=2 * linear_error)
def test_gpflow_wrappers_predict_y(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model, _ = gpflow_interface_factory(x, _3x_plus_gaussian_noise(x))
x_predict = tf.constant([[50.5]], gpflow.default_float())
mean_f, variance_f = model.predict(x_predict)
mean_y, variance_y = model.predict_y(x_predict)
npt.assert_allclose(mean_f, mean_y)
npt.assert_array_less(variance_f, variance_y)
@unittest.mock.patch("trieste.models.gpflow.interface.tf.summary.scalar")
def test_gpflow_wrappers_log(
mocked_summary_scalar: unittest.mock.MagicMock, gpflow_interface_factory: ModelFactoryType
) -> None:
x = tf.constant(np.arange(1, 5).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [4, 1]
model, _ = gpflow_interface_factory(x, fnc_3x_plus_10(x))
mocked_summary_writer = unittest.mock.MagicMock()
with tensorboard_writer(mocked_summary_writer):
with step_number(42):
model.log()
assert len(mocked_summary_writer.method_calls) == 1
assert mocked_summary_writer.method_calls[0][0] == "as_default"
assert mocked_summary_writer.method_calls[0][-1]["step"] == 42
assert mocked_summary_scalar.call_count == 2
assert mocked_summary_scalar.call_args_list[0][0][0] == "kernel.variance"
assert mocked_summary_scalar.call_args_list[0][0][1].numpy() == 1
assert mocked_summary_scalar.call_args_list[1][0][0] == "kernel.lengthscale"
assert mocked_summary_scalar.call_args_list[1][0][1].numpy() == 1
def test_variational_gaussian_process_raises_for_invalid_init() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
with pytest.raises(ValueError):
VariationalGaussianProcess(vgp_model(x, y), natgrad_gamma=1)
with pytest.raises(ValueError):
optimizer = Optimizer(gpflow.optimizers.Scipy())
VariationalGaussianProcess(vgp_model(x, y), optimizer=optimizer, use_natgrads=True)
with pytest.raises(ValueError):
optimizer = BatchOptimizer(gpflow.optimizers.Scipy())
VariationalGaussianProcess(vgp_model(x, y), optimizer=optimizer, use_natgrads=True)
with pytest.raises(ValueError):
optimizer = Optimizer(tf.optimizers.Adam())
VariationalGaussianProcess(vgp_model(x, y), optimizer=optimizer, use_natgrads=False)
def test_variational_gaussian_process_correctly_returns_internal_data() -> None:
data = mock_data()
model = VariationalGaussianProcess(vgp_model(*data))
returned_data = model.get_internal_data()
npt.assert_array_equal(returned_data.query_points, data[0])
npt.assert_array_equal(returned_data.observations, data[1])
def test_variational_gaussian_process_update_updates_num_data() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
m = VariationalGaussianProcess(vgp_model(x, y))
num_data = m.model.num_data
x_new = tf.concat([x, [[10.0], [11.0]]], 0)
y_new = fnc_3x_plus_10(x_new)
m.update(Dataset(x_new, y_new))
new_num_data = m.model.num_data
assert new_num_data - num_data == 2
def test_variational_gaussian_process_update() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
data = Dataset(x, fnc_3x_plus_10(x))
m = VariationalGaussianProcess(vgp_model(data.query_points, data.observations))
reference_model = vgp_model(data.query_points, data.observations)
npt.assert_allclose(m.model.q_mu, reference_model.q_mu, atol=1e-5)
npt.assert_allclose(m.model.q_sqrt, reference_model.q_sqrt, atol=1e-5)
x_new = tf.concat([x, tf.constant([[10.0], [11.0]], dtype=gpflow.default_float())], 0)
new_data = Dataset(x_new, fnc_3x_plus_10(x_new))
m.update(new_data)
reference_model_new = vgp_model(new_data.query_points, new_data.observations)
npt.assert_allclose(m.model.q_mu, reference_model_new.q_mu, atol=1e-5)
npt.assert_allclose(m.model.q_sqrt, reference_model_new.q_sqrt, atol=1e-5)
@random_seed
def test_variational_gaussian_process_update_q_mu_sqrt_unchanged() -> None:
x_observed = tf.constant(np.arange(10).reshape((-1, 1)), dtype=gpflow.default_float())
y_observed = fnc_2sin_x_over_3(x_observed)
model = VariationalGaussianProcess(vgp_matern_model(x_observed, y_observed))
old_q_mu = model.model.q_mu.numpy()
old_q_sqrt = model.model.q_sqrt.numpy()
data = Dataset(x_observed, y_observed)
model.update(data)
new_q_mu = model.model.q_mu.numpy()
new_q_sqrt = model.model.q_sqrt.numpy()
npt.assert_allclose(old_q_mu, new_q_mu, atol=1e-5)
npt.assert_allclose(old_q_sqrt, new_q_sqrt, atol=1e-5)
@random_seed
def test_gpflow_wrappers_default_optimize(
gpflow_interface_factory: ModelFactoryType,
) -> None:
data = mock_data()
model, _ = gpflow_interface_factory(*data)
internal_model = model.model
if isinstance(internal_model, SVGP):
args = {"data": data}
else:
args = {}
loss = internal_model.training_loss(**args)
model.optimize(Dataset(*data))
assert internal_model.training_loss(**args) < loss
def test_gaussian_process_regression_optimize(compile: bool) -> None:
data = mock_data()
optimizer = Optimizer(gpflow.optimizers.Scipy(), compile=compile)
model = GaussianProcessRegression(gpr_model(*data), optimizer)
loss = model.model.training_loss()
model.optimize(Dataset(*data))
assert model.model.training_loss() < loss
@random_seed
def test_variational_gaussian_process_predict() -> None:
x_observed = tf.constant(np.arange(3).reshape((-1, 1)), dtype=gpflow.default_float())
y_observed = _3x_plus_gaussian_noise(x_observed)
model = VariationalGaussianProcess(vgp_model(x_observed, y_observed))
internal_model = model.model
gpflow.optimizers.Scipy().minimize(
internal_model.training_loss_closure(),
internal_model.trainable_variables,
)
x_predict = tf.constant([[1.5]], gpflow.default_float())
mean, variance = model.predict(x_predict)
mean_y, variance_y = model.predict_y(x_predict)
reference_model = vgp_model(x_observed, y_observed)
reference_model.data = (
tf.Variable(
reference_model.data[0],
trainable=False,
shape=[None, *reference_model.data[0].shape[1:]],
),
tf.Variable(
reference_model.data[1],
trainable=False,
shape=[None, *reference_model.data[1].shape[1:]],
),
)
gpflow.optimizers.Scipy().minimize(
reference_model.training_loss_closure(),
reference_model.trainable_variables,
)
reference_mean, reference_variance = reference_model.predict_f(x_predict)
npt.assert_allclose(mean, reference_mean)
npt.assert_allclose(variance, reference_variance, atol=1e-3)
npt.assert_allclose(variance_y - model.get_observation_noise(), variance, atol=5e-5)
def test_sparse_variational_model_attribute() -> None:
model = svgp_model(*mock_data())
sv = SparseVariational(model)
assert sv.model is model
assert isinstance(sv.model, SVGP)
assert isinstance(sv.model, NumDataPropertyMixin)
def test_sparse_variational_model_num_data_mixin_supports_subclasses() -> None:
class SVGPSubclass(SVGP): # type: ignore[misc]
@property
def mol(self) -> int:
return 42
x = mock_data()[0]
model = SVGPSubclass(
gpflow.kernels.Matern32(), gpflow.likelihoods.Gaussian(), x[:2], num_data=len(x)
)
sv = SparseVariational(model)
assert sv.model is model
assert isinstance(sv.model, NumDataPropertyMixin)
assert isinstance(sv.model, SVGPSubclass)
assert sv.model.mol == 42
def test_sparse_variational_update_updates_num_data() -> None:
model = SparseVariational(
svgp_model(tf.zeros([1, 4]), tf.zeros([1, 1])),
)
model.update(Dataset(tf.zeros([5, 4]), tf.zeros([5, 1])))
assert model.model.num_data == 5
@pytest.mark.parametrize(
"new_data",
[Dataset(tf.zeros([3, 5]), tf.zeros([3, 1])), Dataset(tf.zeros([3, 4]), tf.zeros([3, 2]))],
)
def test_sparse_variational_update_raises_for_invalid_shapes(new_data: Dataset) -> None:
model = SparseVariational(
svgp_model(tf.zeros([1, 4]), tf.zeros([1, 1])),
)
with pytest.raises(ValueError):
model.update(new_data)
def test_sparse_variational_optimize_with_defaults() -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
optimizer = BatchOptimizer(tf.optimizers.Adam(), max_iter=20)
model = SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer)
loss = model.model.training_loss(data)
model.optimize(dataset)
assert model.model.training_loss(data) < loss
def test_sparse_variational_optimize(batcher: DatasetTransformer, compile: bool) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
optimizer = BatchOptimizer(
tf.optimizers.Adam(),
max_iter=10,
batch_size=10,
dataset_builder=batcher,
compile=compile,
)
model = SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer)
loss = model.model.training_loss(data)
model.optimize(dataset)
assert model.model.training_loss(data) < loss
def test_sparse_variational_default_optimizer_is_correct() -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
model = SparseVariational(svgp_model(x_observed, y_observed))
assert isinstance(model.optimizer, BatchOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
def test_svgp_config_builds_and_default_optimizer_is_correct() -> None:
data = mock_data()
model_config = {"model": svgp_model(*data)}
model = create_model(model_config)
assert isinstance(model, SparseVariational)
assert isinstance(model.optimizer, BatchOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
def test_sparse_variational_raises_for_invalid_init() -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
with pytest.raises(ValueError):
optimizer1 = BatchOptimizer(gpflow.optimizers.Scipy())
SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer1)
with pytest.raises(ValueError):
optimizer2 = Optimizer(tf.optimizers.Adam())
SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer2)
@pytest.mark.parametrize("use_natgrads", [True, False])
def test_variational_gaussian_process_optimize_with_and_without_natgrads(
batcher: DatasetTransformer, compile: bool, use_natgrads: bool
) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
if use_natgrads:
optimizer = BatchOptimizer(
tf.optimizers.Adam(),
max_iter=10,
batch_size=10,
dataset_builder=batcher,
compile=compile,
)
else:
optimizer = Optimizer(gpflow.optimizers.Scipy(), compile=compile) # type:ignore
model = VariationalGaussianProcess(
vgp_model(x_observed[:10], y_observed[:10]), optimizer=optimizer, use_natgrads=use_natgrads
)
loss = model.model.training_loss()
model.optimize(dataset)
assert model.model.training_loss() < loss
def test_variational_gaussian_process_optimize_natgrads_only_updates_variational_params(
compile: bool,
) -> None:
x_observed = np.linspace(0, 100, 10).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
class DummyBatchOptimizer(BatchOptimizer):
def optimize(self, model: tf.Module, dataset: Dataset) -> None:
pass
optimizer = DummyBatchOptimizer(tf.optimizers.Adam(), compile=compile, max_iter=10)
model = VariationalGaussianProcess(
vgp_matern_model(x_observed[:10], y_observed[:10]), optimizer=optimizer, use_natgrads=True
)
old_num_trainable_params = len(model.model.trainable_variables)
old_kernel_params = model.get_kernel().parameters[0].numpy()
old_q_mu = model.model.q_mu.numpy()
old_q_sqrt = model.model.q_sqrt.numpy()
model.optimize(dataset)
new_num_trainable_params = len(model.model.trainable_variables)
new_kernel_params = model.get_kernel().parameters[0].numpy()
new_q_mu = model.model.q_mu.numpy()
new_q_sqrt = model.model.q_sqrt.numpy()
npt.assert_allclose(old_kernel_params, new_kernel_params, atol=1e-3)
npt.assert_equal(old_num_trainable_params, new_num_trainable_params)
npt.assert_raises(AssertionError, npt.assert_allclose, old_q_mu, new_q_mu)
npt.assert_raises(AssertionError, npt.assert_allclose, old_q_sqrt, new_q_sqrt)
@pytest.mark.parametrize("use_natgrads", [True, False])
def test_variational_gaussian_process_default_optimizer_is_correct(use_natgrads: bool) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
model = VariationalGaussianProcess(
vgp_model(x_observed[:10], y_observed[:10]), use_natgrads=use_natgrads
)
if use_natgrads:
assert isinstance(model.optimizer, BatchOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
else:
assert isinstance(model.optimizer, Optimizer)
assert isinstance(model.optimizer.optimizer, gpflow.optimizers.Scipy)
@pytest.mark.parametrize("use_natgrads", [True, False])
def test_vgp_config_builds_and_default_optimizer_is_correct(use_natgrads: bool) -> None:
data = mock_data()
model_config = {"model": vgp_model(*data), "model_args": {"use_natgrads": use_natgrads}}
model = create_model(model_config)
assert isinstance(model, VariationalGaussianProcess)
if use_natgrads:
assert isinstance(model.optimizer, BatchOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
else:
assert isinstance(model.optimizer, Optimizer)
assert isinstance(model.optimizer.optimizer, gpflow.optimizers.Scipy)
@random_seed
def test_gpflow_predictor_get_observation_noise_raises_for_likelihood_with_variance(
gpflow_interface_factory: ModelFactoryType,
) -> None:
data = mock_data()
model, _ = gpflow_interface_factory(*data)
model.model.likelihood = gpflow.likelihoods.Gaussian() # has variance attribute
model.get_observation_noise()
model.model.likelihood = gpflow.likelihoods.Bernoulli() # does not have variance attribute
with pytest.raises(NotImplementedError):
model.get_observation_noise()
def test_gaussian_process_regression_conditional_predict_equations() -> None:
x = gpflow.utilities.to_default_float(
tf.constant(np.arange(1, 8).reshape(-1, 1) / 8.0)
) # shape: [7, 1]
y = fnc_2sin_x_over_3(x)
model7 = GaussianProcessRegression(gpr_model(x, y))
model5 = GaussianProcessRegression(gpr_model(x[:5, :], y[:5, :]))
additional_data = Dataset(x[5:, :], y[5:, :])
query_points = tf.concat([0.5 * x, 2.0 * x], 0) # shape: [14, 1]
predj_mean7, predj_cov7 = model7.predict_joint(query_points)
predj_mean5, predj_cov5 = model5.conditional_predict_joint(query_points, additional_data)
pred_mean7, pred_var7 = model7.predict(query_points)
pred_mean5, pred_var5 = model5.conditional_predict_f(query_points, additional_data)
predy_mean7, predy_var7 = model7.predict_y(query_points)
predy_mean5, predy_var5 = model5.conditional_predict_y(query_points, additional_data)
np.testing.assert_allclose(tf.transpose(tf.linalg.diag_part(predj_cov5)), pred_var5, atol=1e-5)
np.testing.assert_allclose(predj_mean5, pred_mean5, atol=1e-5)
np.testing.assert_allclose(predj_mean5, predj_mean7, atol=1e-5)
np.testing.assert_allclose(pred_mean7, pred_mean5, atol=1e-5)
np.testing.assert_allclose(pred_var7, pred_var5, atol=1e-5)
np.testing.assert_allclose(predj_cov7, predj_cov5, atol=1e-5)
np.testing.assert_allclose(predy_mean7, predy_mean5, atol=1e-5)
np.testing.assert_allclose(predy_var7, predy_var5, atol=1e-5)
def test_gaussian_process_regression_conditional_predict_equations_broadcast() -> None:
x = gpflow.utilities.to_default_float(
tf.constant(np.arange(1, 24).reshape(-1, 1) / 8.0)
) # shape: [23, 1]
y = fnc_2sin_x_over_3(x)
model5 = GaussianProcessRegression(gpr_model(x[:5, :], y[:5, :]))
additional_data = Dataset(tf.reshape(x[5:, :], [3, 6, -1]), tf.reshape(y[5:, :], [3, 6, -1]))
query_points = tf.concat([0.5 * x, 2.0 * x], 0) # shape: [46, 1]
predj_mean5, predj_cov5 = model5.conditional_predict_joint(query_points, additional_data)
pred_mean5, pred_var5 = model5.conditional_predict_f(query_points, additional_data)
predy_mean5, predy_var5 = model5.conditional_predict_y(query_points, additional_data)
for i in range(3):
xi = tf.concat([x[:5, :], additional_data.query_points[i, ...]], axis=0)
yi = tf.concat([y[:5, :], additional_data.observations[i, ...]], axis=0)
modeli = GaussianProcessRegression(gpr_model(xi, yi))
predj_meani, predj_covi = modeli.predict_joint(query_points)
pred_meani, pred_vari = modeli.predict(query_points)
predy_meani, predy_vari = modeli.predict_y(query_points)
np.testing.assert_allclose(predj_mean5[i, ...], predj_meani, atol=1e-5)
np.testing.assert_allclose(pred_meani, pred_mean5[i, ...], atol=1e-5)
np.testing.assert_allclose(pred_vari, pred_var5[i, ...], atol=1e-5)
np.testing.assert_allclose(predj_covi, predj_cov5[i, ...], atol=1e-5)
np.testing.assert_allclose(predy_vari, predy_var5[i, ...], atol=1e-5)
np.testing.assert_allclose(predy_vari, predy_var5[i, ...], atol=1e-5)
def test_gaussian_process_regression_conditional_predict_f_sample() -> None:
x = gpflow.utilities.to_default_float(
tf.constant(np.arange(1, 24).reshape(-1, 1) / 8.0)
) # shape: [23, 1]
y = fnc_2sin_x_over_3(x)
model5 = GaussianProcessRegression(gpr_model(x[:5, :], y[:5, :]))
additional_data = Dataset(tf.reshape(x[5:, :], [3, 6, -1]), tf.reshape(y[5:, :], [3, 6, -1]))
query_points = tf.concat([0.5 * x, 2.0 * x], 0) # shape: [46, 1]
samples = model5.conditional_predict_f_sample(query_points, additional_data, num_samples=100000)
npt.assert_array_equal([3, 100000, 46, 1], samples.shape)
for i in range(3):
xi = tf.concat([x[:5, :], additional_data.query_points[i, ...]], axis=0)
yi = tf.concat([y[:5, :], additional_data.observations[i, ...]], axis=0)
modeli = GaussianProcessRegression(gpr_model(xi, yi))
predj_meani, predj_covi = modeli.predict_joint(query_points)
sample_mean = tf.reduce_mean(samples[i], axis=0)
sample_cov = tfp.stats.covariance(samples[i, :, :, 0], sample_axis=0)
np.testing.assert_allclose(sample_mean, predj_meani, atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(sample_cov, predj_covi[0], atol=1e-2, rtol=1e-2)
@pytest.mark.parametrize("num_outputs", [1, 2])
def test_gaussian_process_regression_pairwise_covariance(num_outputs: int) -> None:
x = tf.constant(np.arange(1, 5).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [4, 1]
y = fnc_3x_plus_10(x)
model = GaussianProcessRegression(gpr_model(x, tf.repeat(y, num_outputs, axis=1)))
query_points_1 = tf.concat([0.5 * x, 0.5 * x], 0) # shape: [8, 1]
query_points_2 = tf.concat([2 * x, 2 * x, 2 * x], 0) # shape: [12, 1]
all_query_points = tf.concat([query_points_1, query_points_2], 0)
_, predictive_covariance = model.predict_joint(all_query_points)
expected_covariance = predictive_covariance[:, :8, 8:]
actual_covariance = model.covariance_between_points(query_points_1, query_points_2)
np.testing.assert_allclose(expected_covariance, actual_covariance, atol=1e-5)
@random_seed
def test_gpflow_models_pairwise_covariance(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(1, 5).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [4, 1]
y = fnc_3x_plus_10(x)
model, _ = gpflow_interface_factory(x, y)
if isinstance(model.model, SGPR):
pytest.skip("Covariance between points is not supported for SGPR.")
if isinstance(model.model, (VGP, SVGP)): # for speed just update q_sqrt rather than optimize
num_inducing_points = tf.shape(model.model.q_sqrt)[1]
sampled_q_sqrt = tfp.distributions.WishartTriL(5, tf.eye(num_inducing_points)).sample(1)
model.model.q_sqrt.assign(sampled_q_sqrt)
query_points_1 = tf.concat([0.5 * x, 0.5 * x], 0) # shape: [8, 1]
query_points_2 = tf.concat([2 * x, 2 * x, 2 * x], 0) # shape: [12, 1]
all_query_points = tf.concat([query_points_1, query_points_2], 0)
_, predictive_covariance = model.predict_joint(all_query_points)
expected_covariance = predictive_covariance[0, :8, 8:]
actual_covariance = model.covariance_between_points( # type: ignore
query_points_1, query_points_2
)
np.testing.assert_allclose(expected_covariance, actual_covariance[0], atol=1e-4)
@random_seed
@pytest.mark.parametrize("whiten", [True, False])
@pytest.mark.parametrize(
"mo_type", ["shared+shared", "separate+shared", "separate+separate", "auto"]
)
def test_sparse_variational_pairwise_covariance_for_non_whitened(
whiten: bool, mo_type: str
) -> None:
x = tf.constant(np.arange(1, 7).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [6, 1]
y1 = fnc_3x_plus_10(x)
y2 = y1 * 0.5
svgp = two_output_svgp_model(x, mo_type)
model = SparseVariational(svgp, BatchOptimizer(tf.optimizers.Adam(), max_iter=3, batch_size=10))
model.model.whiten = whiten
model.optimize(Dataset(x, tf.concat([y1, y2], axis=-1)))
query_points_1 = tf.concat([0.5 * x, 0.5 * x], 0) # shape: [12, 1]
query_points_2 = tf.concat([2 * x, 2 * x, 2 * x], 0) # shape: [18, 1]
all_query_points = tf.concat([query_points_1, query_points_2], 0)
_, predictive_covariance = model.predict_joint(all_query_points)
expected_covariance = predictive_covariance[:, :12, 12:]
actual_covariance = model.covariance_between_points(query_points_1, query_points_2)
np.testing.assert_allclose(expected_covariance, actual_covariance, atol=1e-4)
@random_seed
def test_sparse_variational_raises_for_pairwise_covariance_for_invalid_query_points(
gpflow_interface_factory: ModelFactoryType,
) -> None:
data = mock_data()
model, _ = gpflow_interface_factory(*data)
if isinstance(model.model, (SGPR)):
pytest.skip("Covariance between points is not supported for SGPR.")
with pytest.raises(ValueError):
model.covariance_between_points(data[0], tf.expand_dims(data[0], axis=0)) # type: ignore
def test_gaussian_process_regression_sgpr_raises_for_pairwise_covariance() -> None:
data = mock_data()
model = GaussianProcessRegression(sgpr_model(*data))
with pytest.raises(NotImplementedError):
model.covariance_between_points(data[0], data[0])
| [
"tensorflow.shape",
"numpy.testing.assert_equal",
"gpflow.optimizers.Scipy",
"gpflow.default_float",
"tests.util.models.models.fnc_2sin_x_over_3",
"tensorflow.math.log",
"numpy.testing.assert_raises",
"trieste.data.Dataset",
"trieste.models.config.create_model",
"trieste.models.gpflow.SparseVariat... | [((7619, 7682), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""prior_for_lengthscale"""', '[True, False]'], {}), "('prior_for_lengthscale', [True, False])\n", (7642, 7682), False, 'import pytest\n'), ((26584, 26638), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_natgrads"""', '[True, False]'], {}), "('use_natgrads', [True, False])\n", (26607, 26638), False, 'import pytest\n'), ((28989, 29043), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_natgrads"""', '[True, False]'], {}), "('use_natgrads', [True, False])\n", (29012, 29043), False, 'import pytest\n'), ((29682, 29736), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_natgrads"""', '[True, False]'], {}), "('use_natgrads', [True, False])\n", (29705, 29736), False, 'import pytest\n'), ((35305, 35351), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_outputs"""', '[1, 2]'], {}), "('num_outputs', [1, 2])\n", (35328, 35351), False, 'import pytest\n'), ((37438, 37486), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""whiten"""', '[True, False]'], {}), "('whiten', [True, False])\n", (37461, 37486), False, 'import pytest\n'), ((37488, 37593), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mo_type"""', "['shared+shared', 'separate+shared', 'separate+separate', 'auto']"], {}), "('mo_type', ['shared+shared', 'separate+shared',\n 'separate+separate', 'auto'])\n", (37511, 37593), False, 'import pytest\n'), ((2532, 2549), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (2546, 2549), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((3092, 3109), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (3106, 3109), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((4086, 4106), 'tests.util.models.models.fnc_2sin_x_over_3', 'fnc_2sin_x_over_3', (['x'], {}), '(x)\n', (4103, 4106), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((4118, 4131), 'trieste.data.Dataset', 'Dataset', (['x', 'y'], {}), '(x, y)\n', (4125, 4131), False, 'from trieste.data import Dataset\n'), ((5892, 5930), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_np', 'x_np.dtype'], {}), '(x_np, x_np.dtype)\n', (5912, 5930), True, 'import tensorflow as tf\n'), ((5939, 5956), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (5953, 5956), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((6499, 6510), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (6508, 6510), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((7245, 7256), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (7254, 7256), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((7363, 7422), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['returned_data.query_points', 'data[0]'], {}), '(returned_data.query_points, data[0])\n', (7385, 7422), True, 'import numpy.testing as npt\n'), ((7427, 7486), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['returned_data.observations', 'data[1]'], {}), '(returned_data.observations, data[1])\n', (7449, 7486), True, 'import numpy.testing as npt\n'), ((8323, 8373), 'gpflow.set_trainable', 'gpflow.set_trainable', (['model.model.likelihood', '(True)'], {}), '(model.model.likelihood, True)\n', (8343, 8373), False, 'import gpflow\n'), ((9205, 9256), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['num_samples', '(10 * (dim + 1))'], {}), '(num_samples, 10 * (dim + 1))\n', (9227, 9256), True, 'import numpy.testing as npt\n'), ((9602, 9646), 'gpflow.kernels.RBF', 'gpflow.kernels.RBF', ([], {'lengthscales': '([0.2] * dim)'}), '(lengthscales=[0.2] * dim)\n', (9620, 9646), False, 'import gpflow\n'), ((9845, 9898), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['(1.0)', 'model.model.kernel.variance'], {}), '(1.0, model.model.kernel.variance)\n', (9864, 9898), True, 'import numpy.testing as npt\n'), ((9903, 9969), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['dim', 'model.model.kernel.lengthscales.shape'], {}), '(dim, model.model.kernel.lengthscales.shape)\n', (9925, 9969), True, 'import numpy.testing as npt\n'), ((9974, 10078), 'numpy.testing.assert_raises', 'npt.assert_raises', (['AssertionError', 'npt.assert_allclose', '[0.2, 0.2]', 'model.model.kernel.lengthscales'], {}), '(AssertionError, npt.assert_allclose, [0.2, 0.2], model.\n model.kernel.lengthscales)\n', (9991, 10078), True, 'import numpy.testing as npt\n'), ((10444, 10488), 'gpflow.kernels.RBF', 'gpflow.kernels.RBF', ([], {'lengthscales': '([0.2] * dim)'}), '(lengthscales=[0.2] * dim)\n', (10462, 10488), False, 'import gpflow\n'), ((10502, 10541), 'tensorflow.cast', 'tf.cast', (['([10.0] * dim)'], {'dtype': 'tf.float64'}), '([10.0] * dim, dtype=tf.float64)\n', (10509, 10541), True, 'import tensorflow as tf\n'), ((10774, 10827), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['(1.0)', 'model.model.kernel.variance'], {}), '(1.0, model.model.kernel.variance)\n', (10793, 10827), True, 'import numpy.testing as npt\n'), ((10832, 10898), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['dim', 'model.model.kernel.lengthscales.shape'], {}), '(dim, model.model.kernel.lengthscales.shape)\n', (10854, 10898), True, 'import numpy.testing as npt\n'), ((10903, 11007), 'numpy.testing.assert_raises', 'npt.assert_raises', (['AssertionError', 'npt.assert_allclose', '[0.2, 0.2]', 'model.model.kernel.lengthscales'], {}), '(AssertionError, npt.assert_allclose, [0.2, 0.2], model.\n model.kernel.lengthscales)\n', (10920, 11007), True, 'import numpy.testing as npt\n'), ((11364, 11425), 'gpflow.kernels.RBF', 'gpflow.kernels.RBF', ([], {'variance': '(0.01)', 'lengthscales': '([0.011] * dim)'}), '(variance=0.01, lengthscales=[0.011] * dim)\n', (11382, 11425), False, 'import gpflow\n'), ((11439, 11479), 'tensorflow.cast', 'tf.cast', (['([100.0] * dim)'], {'dtype': 'tf.float64'}), '([100.0] * dim, dtype=tf.float64)\n', (11446, 11479), True, 'import tensorflow as tf\n'), ((11826, 11890), 'numpy.testing.assert_array_less', 'npt.assert_array_less', (['pre_init_likelihood', 'post_init_likelihood'], {}), '(pre_init_likelihood, post_init_likelihood)\n', (11847, 11890), True, 'import numpy.testing as npt\n'), ((12235, 12293), 'gpflow.kernels.RBF', 'gpflow.kernels.RBF', ([], {'variance': '(1.0)', 'lengthscales': '([0.2] * dim)'}), '(variance=1.0, lengthscales=[0.2] * dim)\n', (12253, 12293), False, 'import gpflow\n'), ((12434, 12473), 'tensorflow.cast', 'tf.cast', (['([10.0] * dim)'], {'dtype': 'tf.float64'}), '([10.0] * dim, dtype=tf.float64)\n', (12441, 12473), True, 'import tensorflow as tf\n'), ((12805, 12857), 'numpy.testing.assert_array_less', 'npt.assert_array_less', (['post_init_loss', 'pre_init_loss'], {}), '(post_init_loss, pre_init_loss)\n', (12826, 12857), True, 'import numpy.testing as npt\n'), ((13342, 13353), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (13351, 13353), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((13414, 13440), 'trieste.models.config.create_model', 'create_model', (['model_config'], {}), '(model_config)\n', (13426, 13440), False, 'from trieste.models.config import create_model\n'), ((13707, 13718), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (13716, 13718), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((13780, 13806), 'trieste.models.config.create_model', 'create_model', (['model_config'], {}), '(model_config)\n', (13792, 13806), False, 'from trieste.models.config import create_model\n'), ((14354, 14412), 'gpflow.kernels.RBF', 'gpflow.kernels.RBF', ([], {'variance': '(1.0)', 'lengthscales': '([0.2] * dim)'}), '(variance=1.0, lengthscales=[0.2] * dim)\n', (14372, 14412), False, 'import gpflow\n'), ((15252, 15283), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (15266, 15283), True, 'import tensorflow as tf\n'), ((15306, 15350), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((samples - sample_mean) ** 2)'], {}), '((samples - sample_mean) ** 2)\n', (15320, 15350), True, 'import tensorflow as tf\n'), ((15478, 15552), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['(sample_mean + 1.0)', '(true_mean + 1.0)'], {'rtol': 'linear_error'}), '(sample_mean + 1.0, true_mean + 1.0, rtol=linear_error)\n', (15497, 15552), True, 'import numpy.testing as npt\n'), ((15557, 15631), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['sample_variance', 'true_variance'], {'rtol': '(2 * linear_error)'}), '(sample_variance, true_variance, rtol=2 * linear_error)\n', (15576, 15631), True, 'import numpy.testing as npt\n'), ((16041, 16076), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['mean_f', 'mean_y'], {}), '(mean_f, mean_y)\n', (16060, 16076), True, 'import numpy.testing as npt\n'), ((16081, 16126), 'numpy.testing.assert_array_less', 'npt.assert_array_less', (['variance_f', 'variance_y'], {}), '(variance_f, variance_y)\n', (16102, 16126), True, 'import numpy.testing as npt\n'), ((17341, 17379), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_np', 'x_np.dtype'], {}), '(x_np, x_np.dtype)\n', (17361, 17379), True, 'import tensorflow as tf\n'), ((17388, 17405), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (17402, 17405), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((18165, 18176), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (18174, 18176), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((18284, 18343), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['returned_data.query_points', 'data[0]'], {}), '(returned_data.query_points, data[0])\n', (18306, 18343), True, 'import numpy.testing as npt\n'), ((18348, 18407), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['returned_data.observations', 'data[1]'], {}), '(returned_data.observations, data[1])\n', (18370, 18407), True, 'import numpy.testing as npt\n'), ((18548, 18586), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_np', 'x_np.dtype'], {}), '(x_np, x_np.dtype)\n', (18568, 18586), True, 'import tensorflow as tf\n'), ((18595, 18612), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (18609, 18612), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((18710, 18745), 'tensorflow.concat', 'tf.concat', (['[x, [[10.0], [11.0]]]', '(0)'], {}), '([x, [[10.0], [11.0]]], 0)\n', (18719, 18745), True, 'import tensorflow as tf\n'), ((18758, 18779), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x_new'], {}), '(x_new)\n', (18772, 18779), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((19178, 19225), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['data.query_points', 'data.observations'], {}), '(data.query_points, data.observations)\n', (19187, 19225), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((19231, 19298), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['m.model.q_mu', 'reference_model.q_mu'], {'atol': '(1e-05)'}), '(m.model.q_mu, reference_model.q_mu, atol=1e-05)\n', (19250, 19298), True, 'import numpy.testing as npt\n'), ((19302, 19373), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['m.model.q_sqrt', 'reference_model.q_sqrt'], {'atol': '(1e-05)'}), '(m.model.q_sqrt, reference_model.q_sqrt, atol=1e-05)\n', (19321, 19373), True, 'import numpy.testing as npt\n'), ((19568, 19623), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['new_data.query_points', 'new_data.observations'], {}), '(new_data.query_points, new_data.observations)\n', (19577, 19623), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((19629, 19700), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['m.model.q_mu', 'reference_model_new.q_mu'], {'atol': '(1e-05)'}), '(m.model.q_mu, reference_model_new.q_mu, atol=1e-05)\n', (19648, 19700), True, 'import numpy.testing as npt\n'), ((19704, 19779), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['m.model.q_sqrt', 'reference_model_new.q_sqrt'], {'atol': '(1e-05)'}), '(m.model.q_sqrt, reference_model_new.q_sqrt, atol=1e-05)\n', (19723, 19779), True, 'import numpy.testing as npt\n'), ((19978, 20007), 'tests.util.models.models.fnc_2sin_x_over_3', 'fnc_2sin_x_over_3', (['x_observed'], {}), '(x_observed)\n', (19995, 20007), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((20185, 20216), 'trieste.data.Dataset', 'Dataset', (['x_observed', 'y_observed'], {}), '(x_observed, y_observed)\n', (20192, 20216), False, 'from trieste.data import Dataset\n'), ((20330, 20381), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['old_q_mu', 'new_q_mu'], {'atol': '(1e-05)'}), '(old_q_mu, new_q_mu, atol=1e-05)\n', (20349, 20381), True, 'import numpy.testing as npt\n'), ((20385, 20440), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['old_q_sqrt', 'new_q_sqrt'], {'atol': '(1e-05)'}), '(old_q_sqrt, new_q_sqrt, atol=1e-05)\n', (20404, 20440), True, 'import numpy.testing as npt\n'), ((20568, 20579), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (20577, 20579), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((20981, 20992), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (20990, 20992), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((21896, 21929), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['x_observed', 'y_observed'], {}), '(x_observed, y_observed)\n', (21905, 21929), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((22510, 22551), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['mean', 'reference_mean'], {}), '(mean, reference_mean)\n', (22529, 22551), True, 'import numpy.testing as npt\n'), ((22556, 22617), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['variance', 'reference_variance'], {'atol': '(0.001)'}), '(variance, reference_variance, atol=0.001)\n', (22575, 22617), True, 'import numpy.testing as npt\n'), ((22809, 22833), 'trieste.models.gpflow.SparseVariational', 'SparseVariational', (['model'], {}), '(model)\n', (22826, 22833), False, 'from trieste.models.gpflow import GaussianProcessRegression, SparseVariational, VariationalGaussianProcess\n'), ((23313, 23337), 'trieste.models.gpflow.SparseVariational', 'SparseVariational', (['model'], {}), '(model)\n', (23330, 23337), False, 'from trieste.models.gpflow import GaussianProcessRegression, SparseVariational, VariationalGaussianProcess\n'), ((24369, 24383), 'trieste.data.Dataset', 'Dataset', (['*data'], {}), '(*data)\n', (24376, 24383), False, 'from trieste.data import Dataset\n'), ((24910, 24924), 'trieste.data.Dataset', 'Dataset', (['*data'], {}), '(*data)\n', (24917, 24924), False, 'from trieste.data import Dataset\n'), ((25768, 25779), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (25777, 25779), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((25841, 25867), 'trieste.models.config.create_model', 'create_model', (['model_config'], {}), '(model_config)\n', (25853, 25867), False, 'from trieste.models.config import create_model\n'), ((26951, 26965), 'trieste.data.Dataset', 'Dataset', (['*data'], {}), '(*data)\n', (26958, 26965), False, 'from trieste.data import Dataset\n'), ((27825, 27839), 'trieste.data.Dataset', 'Dataset', (['*data'], {}), '(*data)\n', (27832, 27839), False, 'from trieste.data import Dataset\n'), ((28682, 28751), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['old_kernel_params', 'new_kernel_params'], {'atol': '(0.001)'}), '(old_kernel_params, new_kernel_params, atol=0.001)\n', (28701, 28751), True, 'import numpy.testing as npt\n'), ((28755, 28823), 'numpy.testing.assert_equal', 'npt.assert_equal', (['old_num_trainable_params', 'new_num_trainable_params'], {}), '(old_num_trainable_params, new_num_trainable_params)\n', (28771, 28823), True, 'import numpy.testing as npt\n'), ((28828, 28902), 'numpy.testing.assert_raises', 'npt.assert_raises', (['AssertionError', 'npt.assert_allclose', 'old_q_mu', 'new_q_mu'], {}), '(AssertionError, npt.assert_allclose, old_q_mu, new_q_mu)\n', (28845, 28902), True, 'import numpy.testing as npt\n'), ((28907, 28985), 'numpy.testing.assert_raises', 'npt.assert_raises', (['AssertionError', 'npt.assert_allclose', 'old_q_sqrt', 'new_q_sqrt'], {}), '(AssertionError, npt.assert_allclose, old_q_sqrt, new_q_sqrt)\n', (28924, 28985), True, 'import numpy.testing as npt\n'), ((29837, 29848), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (29846, 29848), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((29955, 29981), 'trieste.models.config.create_model', 'create_model', (['model_config'], {}), '(model_config)\n', (29967, 29981), False, 'from trieste.models.config import create_model\n'), ((30510, 30521), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (30519, 30521), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((30598, 30627), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {}), '()\n', (30625, 30627), False, 'import gpflow\n'), ((30718, 30748), 'gpflow.likelihoods.Bernoulli', 'gpflow.likelihoods.Bernoulli', ([], {}), '()\n', (30746, 30748), False, 'import gpflow\n'), ((31080, 31100), 'tests.util.models.models.fnc_2sin_x_over_3', 'fnc_2sin_x_over_3', (['x'], {}), '(x)\n', (31097, 31100), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((31251, 31278), 'trieste.data.Dataset', 'Dataset', (['x[5:, :]', 'y[5:, :]'], {}), '(x[5:, :], y[5:, :])\n', (31258, 31278), False, 'from trieste.data import Dataset\n'), ((31299, 31331), 'tensorflow.concat', 'tf.concat', (['[0.5 * x, 2.0 * x]', '(0)'], {}), '([0.5 * x, 2.0 * x], 0)\n', (31308, 31331), True, 'import tensorflow as tf\n'), ((31913, 31976), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['predj_mean5', 'pred_mean5'], {'atol': '(1e-05)'}), '(predj_mean5, pred_mean5, atol=1e-05)\n', (31939, 31976), True, 'import numpy as np\n'), ((31980, 32044), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['predj_mean5', 'predj_mean7'], {'atol': '(1e-05)'}), '(predj_mean5, predj_mean7, atol=1e-05)\n', (32006, 32044), True, 'import numpy as np\n'), ((32048, 32110), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pred_mean7', 'pred_mean5'], {'atol': '(1e-05)'}), '(pred_mean7, pred_mean5, atol=1e-05)\n', (32074, 32110), True, 'import numpy as np\n'), ((32114, 32174), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pred_var7', 'pred_var5'], {'atol': '(1e-05)'}), '(pred_var7, pred_var5, atol=1e-05)\n', (32140, 32174), True, 'import numpy as np\n'), ((32178, 32240), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['predj_cov7', 'predj_cov5'], {'atol': '(1e-05)'}), '(predj_cov7, predj_cov5, atol=1e-05)\n', (32204, 32240), True, 'import numpy as np\n'), ((32244, 32308), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['predy_mean7', 'predy_mean5'], {'atol': '(1e-05)'}), '(predy_mean7, predy_mean5, atol=1e-05)\n', (32270, 32308), True, 'import numpy as np\n'), ((32312, 32374), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['predy_var7', 'predy_var5'], {'atol': '(1e-05)'}), '(predy_var7, predy_var5, atol=1e-05)\n', (32338, 32374), True, 'import numpy as np\n'), ((32598, 32618), 'tests.util.models.models.fnc_2sin_x_over_3', 'fnc_2sin_x_over_3', (['x'], {}), '(x)\n', (32615, 32618), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((32809, 32841), 'tensorflow.concat', 'tf.concat', (['[0.5 * x, 2.0 * x]', '(0)'], {}), '([0.5 * x, 2.0 * x], 0)\n', (32818, 32841), True, 'import tensorflow as tf\n'), ((34259, 34279), 'tests.util.models.models.fnc_2sin_x_over_3', 'fnc_2sin_x_over_3', (['x'], {}), '(x)\n', (34276, 34279), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((34468, 34500), 'tensorflow.concat', 'tf.concat', (['[0.5 * x, 2.0 * x]', '(0)'], {}), '([0.5 * x, 2.0 * x], 0)\n', (34477, 34500), True, 'import tensorflow as tf\n'), ((34624, 34681), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['[3, 100000, 46, 1]', 'samples.shape'], {}), '([3, 100000, 46, 1], samples.shape)\n', (34646, 34681), True, 'import numpy.testing as npt\n'), ((35543, 35560), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (35557, 35560), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((35670, 35702), 'tensorflow.concat', 'tf.concat', (['[0.5 * x, 0.5 * x]', '(0)'], {}), '([0.5 * x, 0.5 * x], 0)\n', (35679, 35702), True, 'import tensorflow as tf\n'), ((35741, 35776), 'tensorflow.concat', 'tf.concat', (['[2 * x, 2 * x, 2 * x]', '(0)'], {}), '([2 * x, 2 * x, 2 * x], 0)\n', (35750, 35776), True, 'import tensorflow as tf\n'), ((35819, 35865), 'tensorflow.concat', 'tf.concat', (['[query_points_1, query_points_2]', '(0)'], {}), '([query_points_1, query_points_2], 0)\n', (35828, 35865), True, 'import tensorflow as tf\n'), ((36088, 36166), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected_covariance', 'actual_covariance'], {'atol': '(1e-05)'}), '(expected_covariance, actual_covariance, atol=1e-05)\n', (36114, 36166), True, 'import numpy as np\n'), ((36384, 36401), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (36398, 36401), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((36893, 36925), 'tensorflow.concat', 'tf.concat', (['[0.5 * x, 0.5 * x]', '(0)'], {}), '([0.5 * x, 0.5 * x], 0)\n', (36902, 36925), True, 'import tensorflow as tf\n'), ((36964, 36999), 'tensorflow.concat', 'tf.concat', (['[2 * x, 2 * x, 2 * x]', '(0)'], {}), '([2 * x, 2 * x, 2 * x], 0)\n', (36973, 36999), True, 'import tensorflow as tf\n'), ((37042, 37088), 'tensorflow.concat', 'tf.concat', (['[query_points_1, query_points_2]', '(0)'], {}), '([query_points_1, query_points_2], 0)\n', (37051, 37088), True, 'import tensorflow as tf\n'), ((37341, 37428), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected_covariance', 'actual_covariance[0]'], {'atol': '(0.0001)'}), '(expected_covariance, actual_covariance[0], atol=\n 0.0001)\n', (37367, 37428), True, 'import numpy as np\n'), ((37812, 37829), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (37826, 37829), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((37860, 37893), 'tests.util.models.gpflow.models.two_output_svgp_model', 'two_output_svgp_model', (['x', 'mo_type'], {}), '(x, mo_type)\n', (37881, 37893), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((38112, 38144), 'tensorflow.concat', 'tf.concat', (['[0.5 * x, 0.5 * x]', '(0)'], {}), '([0.5 * x, 0.5 * x], 0)\n', (38121, 38144), True, 'import tensorflow as tf\n'), ((38184, 38219), 'tensorflow.concat', 'tf.concat', (['[2 * x, 2 * x, 2 * x]', '(0)'], {}), '([2 * x, 2 * x, 2 * x], 0)\n', (38193, 38219), True, 'import tensorflow as tf\n'), ((38262, 38308), 'tensorflow.concat', 'tf.concat', (['[query_points_1, query_points_2]', '(0)'], {}), '([query_points_1, query_points_2], 0)\n', (38271, 38308), True, 'import tensorflow as tf\n'), ((38533, 38612), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected_covariance', 'actual_covariance'], {'atol': '(0.0001)'}), '(expected_covariance, actual_covariance, atol=0.0001)\n', (38559, 38612), True, 'import numpy as np\n'), ((38781, 38792), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (38790, 38792), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((39189, 39200), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (39198, 39200), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((2317, 2359), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.01)', 'size': 'x.shape'}), '(scale=0.01, size=x.shape)\n', (2333, 2359), True, 'import numpy as np\n'), ((3294, 3315), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x_new'], {}), '(x_new)\n', (3308, 3315), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((3581, 3602), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x_new'], {}), '(x_new)\n', (3595, 3602), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((5967, 5992), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5980, 5992), False, 'import pytest\n'), ((6078, 6103), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6091, 6103), False, 'import pytest\n'), ((6251, 6276), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6264, 6276), False, 'import pytest\n'), ((6549, 6566), 'tests.util.models.gpflow.models.sgpr_model', 'sgpr_model', (['*data'], {}), '(*data)\n', (6559, 6566), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((6578, 6612), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (6591, 6612), False, 'import pytest\n'), ((6712, 6746), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (6725, 6746), False, 'import pytest\n'), ((6850, 6884), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (6863, 6884), False, 'import pytest\n'), ((6984, 7018), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (6997, 7018), False, 'import pytest\n'), ((7295, 7311), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['*data'], {}), '(*data)\n', (7304, 7311), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((8591, 8630), 'tensorflow.cast', 'tf.cast', (['([10.0] * dim)'], {'dtype': 'tf.float64'}), '([10.0] * dim, dtype=tf.float64)\n', (8598, 8630), True, 'import tensorflow as tf\n'), ((13088, 13131), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['x_observed[:10]', 'y_observed[:10]'], {}), '(x_observed[:10], y_observed[:10])\n', (13097, 13131), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((13384, 13400), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['*data'], {}), '(*data)\n', (13393, 13400), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((13749, 13766), 'tests.util.models.gpflow.models.sgpr_model', 'sgpr_model', (['*data'], {}), '(*data)\n', (13759, 13766), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((14847, 14869), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (14867, 14869), False, 'import gpflow\n'), ((15910, 15932), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (15930, 15932), False, 'import gpflow\n'), ((16481, 16498), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (16495, 16498), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((16563, 16604), 'trieste.logging.tensorboard_writer', 'tensorboard_writer', (['mocked_summary_writer'], {}), '(mocked_summary_writer)\n', (16581, 16604), False, 'from trieste.logging import step_number, tensorboard_writer\n'), ((17416, 17441), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17429, 17441), False, 'import pytest\n'), ((17522, 17547), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17535, 17547), False, 'import pytest\n'), ((17708, 17733), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17721, 17733), False, 'import pytest\n'), ((17899, 17924), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17912, 17924), False, 'import pytest\n'), ((18216, 18232), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['*data'], {}), '(*data)\n', (18225, 18232), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((18648, 18663), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['x', 'y'], {}), '(x, y)\n', (18657, 18663), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((18793, 18814), 'trieste.data.Dataset', 'Dataset', (['x_new', 'y_new'], {}), '(x_new, y_new)\n', (18800, 18814), False, 'from trieste.data import Dataset\n'), ((19052, 19069), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (19066, 19069), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((19106, 19153), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['data.query_points', 'data.observations'], {}), '(data.query_points, data.observations)\n', (19115, 19153), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((19495, 19516), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x_new'], {}), '(x_new)\n', (19509, 19516), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((20047, 20087), 'tests.util.models.gpflow.models.vgp_matern_model', 'vgp_matern_model', (['x_observed', 'y_observed'], {}), '(x_observed, y_observed)\n', (20063, 20087), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((20826, 20840), 'trieste.data.Dataset', 'Dataset', (['*data'], {}), '(*data)\n', (20833, 20840), False, 'from trieste.data import Dataset\n'), ((21020, 21045), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (21043, 21045), False, 'import gpflow\n'), ((21102, 21118), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['*data'], {}), '(*data)\n', (21111, 21118), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((21190, 21204), 'trieste.data.Dataset', 'Dataset', (['*data'], {}), '(*data)\n', (21197, 21204), False, 'from trieste.data import Dataset\n'), ((21507, 21540), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['x_observed', 'y_observed'], {}), '(x_observed, y_observed)\n', (21516, 21540), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((21751, 21773), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (21771, 21773), False, 'import gpflow\n'), ((21968, 22076), 'tensorflow.Variable', 'tf.Variable', (['reference_model.data[0]'], {'trainable': '(False)', 'shape': '[None, *reference_model.data[0].shape[1:]]'}), '(reference_model.data[0], trainable=False, shape=[None, *\n reference_model.data[0].shape[1:]])\n', (21979, 22076), True, 'import tensorflow as tf\n'), ((22128, 22236), 'tensorflow.Variable', 'tf.Variable', (['reference_model.data[1]'], {'trainable': '(False)', 'shape': '[None, *reference_model.data[1].shape[1:]]'}), '(reference_model.data[1], trainable=False, shape=[None, *\n reference_model.data[1].shape[1:]])\n', (22139, 22236), True, 'import tensorflow as tf\n'), ((23168, 23179), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (23177, 23179), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((23217, 23242), 'gpflow.kernels.Matern32', 'gpflow.kernels.Matern32', ([], {}), '()\n', (23240, 23242), False, 'import gpflow\n'), ((23244, 23273), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {}), '()\n', (23271, 23273), False, 'import gpflow\n'), ((24087, 24112), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (24100, 24112), False, 'import pytest\n'), ((24415, 24435), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (24433, 24435), True, 'import tensorflow as tf\n'), ((24480, 24514), 'tests.util.models.gpflow.models.svgp_model', 'svgp_model', (['x_observed', 'y_observed'], {}), '(x_observed, y_observed)\n', (24490, 24514), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((24966, 24986), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (24984, 24986), True, 'import tensorflow as tf\n'), ((25126, 25160), 'tests.util.models.gpflow.models.svgp_model', 'svgp_model', (['x_observed', 'y_observed'], {}), '(x_observed, y_observed)\n', (25136, 25160), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((25517, 25551), 'tests.util.models.gpflow.models.svgp_model', 'svgp_model', (['x_observed', 'y_observed'], {}), '(x_observed, y_observed)\n', (25527, 25551), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((25810, 25827), 'tests.util.models.gpflow.models.svgp_model', 'svgp_model', (['*data'], {}), '(*data)\n', (25820, 25827), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((26233, 26258), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (26246, 26258), False, 'import pytest\n'), ((26417, 26442), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (26430, 26442), False, 'import pytest\n'), ((27334, 27377), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['x_observed[:10]', 'y_observed[:10]'], {}), '(x_observed[:10], y_observed[:10])\n', (27343, 27377), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((28014, 28034), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (28032, 28034), True, 'import tensorflow as tf\n'), ((28115, 28165), 'tests.util.models.gpflow.models.vgp_matern_model', 'vgp_matern_model', (['x_observed[:10]', 'y_observed[:10]'], {}), '(x_observed[:10], y_observed[:10])\n', (28131, 28165), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((29301, 29344), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['x_observed[:10]', 'y_observed[:10]'], {}), '(x_observed[:10], y_observed[:10])\n', (29310, 29344), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((29879, 29895), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['*data'], {}), '(*data)\n', (29888, 29895), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((30794, 30828), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (30807, 30828), False, 'import pytest\n'), ((31141, 31156), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['x', 'y'], {}), '(x, y)\n', (31150, 31156), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((31197, 31226), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['x[:5, :]', 'y[:5, :]'], {}), '(x[:5, :], y[:5, :])\n', (31206, 31226), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((32659, 32688), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['x[:5, :]', 'y[:5, :]'], {}), '(x[:5, :], y[:5, :])\n', (32668, 32688), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((32721, 32753), 'tensorflow.reshape', 'tf.reshape', (['x[5:, :]', '[3, 6, -1]'], {}), '(x[5:, :], [3, 6, -1])\n', (32731, 32753), True, 'import tensorflow as tf\n'), ((32755, 32787), 'tensorflow.reshape', 'tf.reshape', (['y[5:, :]', '[3, 6, -1]'], {}), '(y[5:, :], [3, 6, -1])\n', (32765, 32787), True, 'import tensorflow as tf\n'), ((33170, 33237), 'tensorflow.concat', 'tf.concat', (['[x[:5, :], additional_data.query_points[i, ...]]'], {'axis': '(0)'}), '([x[:5, :], additional_data.query_points[i, ...]], axis=0)\n', (33179, 33237), True, 'import tensorflow as tf\n'), ((33251, 33318), 'tensorflow.concat', 'tf.concat', (['[y[:5, :], additional_data.observations[i, ...]]'], {'axis': '(0)'}), '([y[:5, :], additional_data.observations[i, ...]], axis=0)\n', (33260, 33318), True, 'import tensorflow as tf\n'), ((33586, 33658), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['predj_mean5[i, ...]', 'predj_meani'], {'atol': '(1e-05)'}), '(predj_mean5[i, ...], predj_meani, atol=1e-05)\n', (33612, 33658), True, 'import numpy as np\n'), ((33666, 33736), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pred_meani', 'pred_mean5[i, ...]'], {'atol': '(1e-05)'}), '(pred_meani, pred_mean5[i, ...], atol=1e-05)\n', (33692, 33736), True, 'import numpy as np\n'), ((33744, 33812), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pred_vari', 'pred_var5[i, ...]'], {'atol': '(1e-05)'}), '(pred_vari, pred_var5[i, ...], atol=1e-05)\n', (33770, 33812), True, 'import numpy as np\n'), ((33820, 33890), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['predj_covi', 'predj_cov5[i, ...]'], {'atol': '(1e-05)'}), '(predj_covi, predj_cov5[i, ...], atol=1e-05)\n', (33846, 33890), True, 'import numpy as np\n'), ((33898, 33968), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['predy_vari', 'predy_var5[i, ...]'], {'atol': '(1e-05)'}), '(predy_vari, predy_var5[i, ...], atol=1e-05)\n', (33924, 33968), True, 'import numpy as np\n'), ((33976, 34046), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['predy_vari', 'predy_var5[i, ...]'], {'atol': '(1e-05)'}), '(predy_vari, predy_var5[i, ...], atol=1e-05)\n', (34002, 34046), True, 'import numpy as np\n'), ((34320, 34349), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['x[:5, :]', 'y[:5, :]'], {}), '(x[:5, :], y[:5, :])\n', (34329, 34349), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((34381, 34413), 'tensorflow.reshape', 'tf.reshape', (['x[5:, :]', '[3, 6, -1]'], {}), '(x[5:, :], [3, 6, -1])\n', (34391, 34413), True, 'import tensorflow as tf\n'), ((34415, 34447), 'tensorflow.reshape', 'tf.reshape', (['y[5:, :]', '[3, 6, -1]'], {}), '(y[5:, :], [3, 6, -1])\n', (34425, 34447), True, 'import tensorflow as tf\n'), ((34719, 34786), 'tensorflow.concat', 'tf.concat', (['[x[:5, :], additional_data.query_points[i, ...]]'], {'axis': '(0)'}), '([x[:5, :], additional_data.query_points[i, ...]], axis=0)\n', (34728, 34786), True, 'import tensorflow as tf\n'), ((34800, 34867), 'tensorflow.concat', 'tf.concat', (['[y[:5, :], additional_data.observations[i, ...]]'], {'axis': '(0)'}), '([y[:5, :], additional_data.observations[i, ...]], axis=0)\n', (34809, 34867), True, 'import tensorflow as tf\n'), ((35022, 35056), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['samples[i]'], {'axis': '(0)'}), '(samples[i], axis=0)\n', (35036, 35056), True, 'import tensorflow as tf\n'), ((35078, 35134), 'tensorflow_probability.stats.covariance', 'tfp.stats.covariance', (['samples[i, :, :, 0]'], {'sample_axis': '(0)'}), '(samples[i, :, :, 0], sample_axis=0)\n', (35098, 35134), True, 'import tensorflow_probability as tfp\n'), ((35143, 35217), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sample_mean', 'predj_meani'], {'atol': '(0.01)', 'rtol': '(0.01)'}), '(sample_mean, predj_meani, atol=0.01, rtol=0.01)\n', (35169, 35217), True, 'import numpy as np\n'), ((35226, 35301), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sample_cov', 'predj_covi[0]'], {'atol': '(0.01)', 'rtol': '(0.01)'}), '(sample_cov, predj_covi[0], atol=0.01, rtol=0.01)\n', (35252, 35301), True, 'import numpy as np\n'), ((36495, 36562), 'pytest.skip', 'pytest.skip', (['"""Covariance between points is not supported for SGPR."""'], {}), "('Covariance between points is not supported for SGPR.')\n", (36506, 36562), False, 'import pytest\n'), ((38889, 38956), 'pytest.skip', 'pytest.skip', (['"""Covariance between points is not supported for SGPR."""'], {}), "('Covariance between points is not supported for SGPR.')\n", (38900, 38956), False, 'import pytest\n'), ((38967, 38992), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (38980, 38992), False, 'import pytest\n'), ((39239, 39256), 'tests.util.models.gpflow.models.sgpr_model', 'sgpr_model', (['*data'], {}), '(*data)\n', (39249, 39256), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((39268, 39302), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (39281, 39302), False, 'import pytest\n'), ((2500, 2522), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (2520, 2522), False, 'import gpflow\n'), ((3060, 3082), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (3080, 3082), False, 'import gpflow\n'), ((3476, 3516), 'typing.cast', 'cast', (['TrainableProbabilisticModel', 'model'], {}), '(TrainableProbabilisticModel, model)\n', (3480, 3516), False, 'from typing import Any, cast\n'), ((4054, 4076), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (4074, 4076), False, 'import gpflow\n'), ((5099, 5207), 'tensorflow.Variable', 'tf.Variable', (['reference_model.data[0]'], {'trainable': '(False)', 'shape': '[None, *reference_model.data[0].shape[1:]]'}), '(reference_model.data[0], trainable=False, shape=[None, *\n reference_model.data[0].shape[1:]])\n', (5110, 5207), True, 'import tensorflow as tf\n'), ((5279, 5387), 'tensorflow.Variable', 'tf.Variable', (['reference_model.data[1]'], {'trainable': '(False)', 'shape': '[None, *reference_model.data[1].shape[1:]]'}), '(reference_model.data[1], trainable=False, shape=[None, *\n reference_model.data[1].shape[1:]])\n', (5290, 5387), True, 'import tensorflow as tf\n'), ((5838, 5868), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'np.float64'}), '(5, dtype=np.float64)\n', (5847, 5868), True, 'import numpy as np\n'), ((6028, 6043), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['x', 'y'], {}), '(x, y)\n', (6037, 6043), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((6141, 6166), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (6164, 6166), False, 'import gpflow\n'), ((6202, 6217), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['x', 'y'], {}), '(x, y)\n', (6211, 6217), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((6309, 6329), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (6327, 6329), True, 'import tensorflow as tf\n'), ((6365, 6380), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['x', 'y'], {}), '(x, y)\n', (6374, 6380), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((8002, 8027), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (8025, 8027), False, 'import gpflow\n'), ((8139, 8156), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (8153, 8156), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((8237, 8269), 'tensorflow.ones', 'tf.ones', (['[dim]'], {'dtype': 'tf.float64'}), '([dim], dtype=tf.float64)\n', (8244, 8269), True, 'import tensorflow as tf\n'), ((8915, 8946), 'tensorflow.cast', 'tf.cast', (['(-2.0)'], {'dtype': 'tf.float64'}), '(-2.0, dtype=tf.float64)\n', (8922, 8946), True, 'import tensorflow as tf\n'), ((8954, 8984), 'tensorflow.cast', 'tf.cast', (['(5.0)'], {'dtype': 'tf.float64'}), '(5.0, dtype=tf.float64)\n', (8961, 8984), True, 'import tensorflow as tf\n'), ((9025, 9042), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (9039, 9042), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((9449, 9471), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (9469, 9471), False, 'import gpflow\n'), ((9733, 9777), 'tensorflow.math.log', 'tf.math.log', (['model.model.kernel.lengthscales'], {}), '(model.model.kernel.lengthscales)\n', (9744, 9777), True, 'import tensorflow as tf\n'), ((10291, 10313), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (10311, 10313), False, 'import gpflow\n'), ((10673, 10717), 'tensorflow_probability.bijectors.Sigmoid', 'tfp.bijectors.Sigmoid', ([], {'low': 'lower', 'high': 'upper'}), '(low=lower, high=upper)\n', (10694, 10717), True, 'import tensorflow_probability as tfp\n'), ((11211, 11233), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (11231, 11233), False, 'import gpflow\n'), ((11613, 11657), 'tensorflow_probability.bijectors.Sigmoid', 'tfp.bijectors.Sigmoid', ([], {'low': 'lower', 'high': 'upper'}), '(low=lower, high=upper)\n', (11634, 11657), True, 'import tensorflow_probability as tfp\n'), ((12082, 12104), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (12102, 12104), False, 'import gpflow\n'), ((12376, 12392), 'numpy.float64', 'np.float64', (['(-2.0)'], {}), '(-2.0)\n', (12386, 12392), True, 'import numpy as np\n'), ((12400, 12415), 'numpy.float64', 'np.float64', (['(1.0)'], {}), '(1.0)\n', (12410, 12415), True, 'import numpy as np\n'), ((12605, 12649), 'tensorflow_probability.bijectors.Sigmoid', 'tfp.bijectors.Sigmoid', ([], {'low': 'lower', 'high': 'upper'}), '(low=lower, high=upper)\n', (12626, 12649), True, 'import tensorflow_probability as tfp\n'), ((12954, 12978), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (12965, 12978), True, 'import numpy as np\n'), ((14201, 14223), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (14221, 14223), False, 'import gpflow\n'), ((14705, 14727), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (14725, 14727), False, 'import gpflow\n'), ((15440, 15472), 'tensorflow.cast', 'tf.cast', (['num_samples', 'tf.float32'], {}), '(num_samples, tf.float32)\n', (15447, 15472), True, 'import tensorflow as tf\n'), ((15777, 15799), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (15797, 15799), False, 'import gpflow\n'), ((16397, 16419), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (16417, 16419), False, 'import gpflow\n'), ((16619, 16634), 'trieste.logging.step_number', 'step_number', (['(42)'], {}), '(42)\n', (16630, 16634), False, 'from trieste.logging import step_number, tensorboard_writer\n'), ((17287, 17317), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'np.float64'}), '(5, dtype=np.float64)\n', (17296, 17317), True, 'import numpy as np\n'), ((17478, 17493), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['x', 'y'], {}), '(x, y)\n', (17487, 17493), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((17579, 17604), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (17602, 17604), False, 'import gpflow\n'), ((17641, 17656), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['x', 'y'], {}), '(x, y)\n', (17650, 17656), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((17770, 17795), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (17793, 17795), False, 'import gpflow\n'), ((17832, 17847), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['x', 'y'], {}), '(x, y)\n', (17841, 17847), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((17956, 17976), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (17974, 17976), True, 'import tensorflow as tf\n'), ((18013, 18028), 'tests.util.models.gpflow.models.vgp_model', 'vgp_model', (['x', 'y'], {}), '(x, y)\n', (18022, 18028), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((18494, 18524), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'np.float64'}), '(5, dtype=np.float64)\n', (18503, 18524), True, 'import numpy as np\n'), ((19005, 19027), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (19025, 19027), False, 'import gpflow\n'), ((19937, 19959), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (19957, 19959), False, 'import gpflow\n'), ((21391, 21413), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (21411, 21413), False, 'import gpflow\n'), ((21580, 21605), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (21603, 21605), False, 'import gpflow\n'), ((22291, 22316), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (22314, 22316), False, 'import gpflow\n'), ((22787, 22798), 'tests.util.models.gpflow.models.mock_data', 'mock_data', ([], {}), '()\n', (22796, 22798), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((23612, 23628), 'tensorflow.zeros', 'tf.zeros', (['[1, 4]'], {}), '([1, 4])\n', (23620, 23628), True, 'import tensorflow as tf\n'), ((23630, 23646), 'tensorflow.zeros', 'tf.zeros', (['[1, 1]'], {}), '([1, 1])\n', (23638, 23646), True, 'import tensorflow as tf\n'), ((23680, 23696), 'tensorflow.zeros', 'tf.zeros', (['[5, 4]'], {}), '([5, 4])\n', (23688, 23696), True, 'import tensorflow as tf\n'), ((23698, 23714), 'tensorflow.zeros', 'tf.zeros', (['[5, 1]'], {}), '([5, 1])\n', (23706, 23714), True, 'import tensorflow as tf\n'), ((24035, 24051), 'tensorflow.zeros', 'tf.zeros', (['[1, 4]'], {}), '([1, 4])\n', (24043, 24051), True, 'import tensorflow as tf\n'), ((24053, 24069), 'tensorflow.zeros', 'tf.zeros', (['[1, 1]'], {}), '([1, 1])\n', (24061, 24069), True, 'import tensorflow as tf\n'), ((23811, 23827), 'tensorflow.zeros', 'tf.zeros', (['[3, 5]'], {}), '([3, 5])\n', (23819, 23827), True, 'import tensorflow as tf\n'), ((23829, 23845), 'tensorflow.zeros', 'tf.zeros', (['[3, 1]'], {}), '([3, 1])\n', (23837, 23845), True, 'import tensorflow as tf\n'), ((23856, 23872), 'tensorflow.zeros', 'tf.zeros', (['[3, 4]'], {}), '([3, 4])\n', (23864, 23872), True, 'import tensorflow as tf\n'), ((23874, 23890), 'tensorflow.zeros', 'tf.zeros', (['[3, 2]'], {}), '([3, 2])\n', (23882, 23890), True, 'import tensorflow as tf\n'), ((24226, 24250), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (24237, 24250), True, 'import numpy as np\n'), ((24767, 24791), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (24778, 24791), True, 'import numpy as np\n'), ((25391, 25415), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (25402, 25415), True, 'import numpy as np\n'), ((26128, 26152), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (26139, 26152), True, 'import numpy as np\n'), ((26296, 26321), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (26319, 26321), False, 'import gpflow\n'), ((26349, 26383), 'tests.util.models.gpflow.models.svgp_model', 'svgp_model', (['x_observed', 'y_observed'], {}), '(x_observed, y_observed)\n', (26359, 26383), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((26475, 26495), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (26493, 26495), True, 'import tensorflow as tf\n'), ((26523, 26557), 'tests.util.models.gpflow.models.svgp_model', 'svgp_model', (['x_observed', 'y_observed'], {}), '(x_observed, y_observed)\n', (26533, 26557), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((26808, 26832), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (26819, 26832), True, 'import numpy as np\n'), ((27036, 27056), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (27054, 27056), True, 'import tensorflow as tf\n'), ((27226, 27251), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (27249, 27251), False, 'import gpflow\n'), ((27683, 27706), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(10)'], {}), '(0, 100, 10)\n', (27694, 27706), True, 'import numpy as np\n'), ((29157, 29181), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (29168, 29181), True, 'import numpy as np\n'), ((31853, 31884), 'tensorflow.linalg.diag_part', 'tf.linalg.diag_part', (['predj_cov5'], {}), '(predj_cov5)\n', (31872, 31884), True, 'import tensorflow as tf\n'), ((33363, 33380), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['xi', 'yi'], {}), '(xi, yi)\n', (33372, 33380), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((34912, 34929), 'tests.util.models.gpflow.models.gpr_model', 'gpr_model', (['xi', 'yi'], {}), '(xi, yi)\n', (34921, 34929), False, 'from tests.util.models.gpflow.models import ModelFactoryType, gpr_model, mock_data, sgpr_model, svgp_model, two_output_svgp_model, vgp_matern_model, vgp_model\n'), ((35494, 35516), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (35514, 35516), False, 'import gpflow\n'), ((35612, 35645), 'tensorflow.repeat', 'tf.repeat', (['y', 'num_outputs'], {'axis': '(1)'}), '(y, num_outputs, axis=1)\n', (35621, 35645), True, 'import tensorflow as tf\n'), ((36335, 36357), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (36355, 36357), False, 'import gpflow\n'), ((36692, 36720), 'tensorflow.shape', 'tf.shape', (['model.model.q_sqrt'], {}), '(model.model.q_sqrt)\n', (36700, 36720), True, 'import tensorflow as tf\n'), ((37762, 37784), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (37782, 37784), False, 'import gpflow\n'), ((37946, 37966), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (37964, 37966), True, 'import tensorflow as tf\n'), ((38059, 38087), 'tensorflow.concat', 'tf.concat', (['[y1, y2]'], {'axis': '(-1)'}), '([y1, y2], axis=-1)\n', (38068, 38087), True, 'import tensorflow as tf\n'), ((39043, 39074), 'tensorflow.expand_dims', 'tf.expand_dims', (['data[0]'], {'axis': '(0)'}), '(data[0], axis=0)\n', (39057, 39074), True, 'import tensorflow as tf\n'), ((2465, 2477), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2474, 2477), True, 'import numpy as np\n'), ((3025, 3037), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (3034, 3037), True, 'import numpy as np\n'), ((4019, 4031), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (4028, 4031), True, 'import numpy as np\n'), ((4578, 4598), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), '()\n', (4596, 4598), True, 'import tensorflow as tf\n'), ((5465, 5490), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (5488, 5490), False, 'import gpflow\n'), ((6675, 6700), 'trieste.data.Dataset', 'Dataset', (['data[0]', 'data[1]'], {}), '(data[0], data[1])\n', (6682, 6700), False, 'from trieste.data import Dataset\n'), ((6813, 6838), 'trieste.data.Dataset', 'Dataset', (['data[0]', 'data[1]'], {}), '(data[0], data[1])\n', (6820, 6838), False, 'from trieste.data import Dataset\n'), ((6947, 6972), 'trieste.data.Dataset', 'Dataset', (['data[0]', 'data[1]'], {}), '(data[0], data[1])\n', (6954, 6972), False, 'from trieste.data import Dataset\n'), ((7101, 7126), 'trieste.data.Dataset', 'Dataset', (['data[0]', 'data[1]'], {}), '(data[0], data[1])\n', (7108, 7126), False, 'from trieste.data import Dataset\n'), ((7878, 7903), 'numpy.arange', 'np.arange', (['(1)', '(5 * dim + 1)'], {}), '(1, 5 * dim + 1)\n', (7887, 7903), True, 'import numpy as np\n'), ((8498, 8542), 'tensorflow.math.log', 'tf.math.log', (['model.model.kernel.lengthscales'], {}), '(model.model.kernel.lengthscales)\n', (8509, 8542), True, 'import tensorflow as tf\n'), ((8774, 8818), 'tensorflow_probability.bijectors.Sigmoid', 'tfp.bijectors.Sigmoid', ([], {'low': 'lower', 'high': 'upper'}), '(low=lower, high=upper)\n', (8795, 8818), True, 'import tensorflow_probability as tfp\n'), ((9398, 9424), 'numpy.arange', 'np.arange', (['(1)', '(1 + 10 * dim)'], {}), '(1, 1 + 10 * dim)\n', (9407, 9424), True, 'import numpy as np\n'), ((9549, 9566), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (9563, 9566), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((10240, 10266), 'numpy.arange', 'np.arange', (['(1)', '(1 + 10 * dim)'], {}), '(1, 1 + 10 * dim)\n', (10249, 10266), True, 'import numpy as np\n'), ((10391, 10408), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (10405, 10408), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((11160, 11186), 'numpy.arange', 'np.arange', (['(1)', '(1 + 10 * dim)'], {}), '(1, 1 + 10 * dim)\n', (11169, 11186), True, 'import numpy as np\n'), ((11311, 11328), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (11325, 11328), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((12031, 12057), 'numpy.arange', 'np.arange', (['(1)', '(1 + 10 * dim)'], {}), '(1, 1 + 10 * dim)\n', (12040, 12057), True, 'import numpy as np\n'), ((12182, 12199), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (12196, 12199), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((14150, 14176), 'numpy.arange', 'np.arange', (['(1)', '(1 + 10 * dim)'], {}), '(1, 1 + 10 * dim)\n', (14159, 14176), True, 'import numpy as np\n'), ((14301, 14318), 'tests.util.models.models.fnc_3x_plus_10', 'fnc_3x_plus_10', (['x'], {}), '(x)\n', (14315, 14318), False, 'from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10\n'), ((14670, 14682), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (14679, 14682), True, 'import numpy as np\n'), ((15052, 15081), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_predict', '(-2)'], {}), '(x_predict, -2)\n', (15066, 15081), True, 'import tensorflow as tf\n'), ((15201, 15230), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_predict', '(-2)'], {}), '(x_predict, -2)\n', (15215, 15230), True, 'import tensorflow as tf\n'), ((15742, 15754), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (15751, 15754), True, 'import numpy as np\n'), ((16359, 16374), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (16368, 16374), True, 'import numpy as np\n'), ((18970, 18982), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (18979, 18982), True, 'import numpy as np\n'), ((19899, 19912), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (19908, 19912), True, 'import numpy as np\n'), ((21354, 21366), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (21363, 21366), True, 'import numpy as np\n'), ((35456, 35471), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (35465, 35471), True, 'import numpy as np\n'), ((36297, 36312), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (36306, 36312), True, 'import numpy as np\n'), ((37724, 37739), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (37733, 37739), True, 'import numpy as np\n'), ((3235, 3257), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (3255, 3257), False, 'import gpflow\n'), ((19436, 19458), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (19456, 19458), False, 'import gpflow\n'), ((36782, 36809), 'tensorflow.eye', 'tf.eye', (['num_inducing_points'], {}), '(num_inducing_points)\n', (36788, 36809), True, 'import tensorflow as tf\n'), ((31011, 31026), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (31020, 31026), True, 'import numpy as np\n'), ((32527, 32543), 'numpy.arange', 'np.arange', (['(1)', '(24)'], {}), '(1, 24)\n', (32536, 32543), True, 'import numpy as np\n'), ((34188, 34204), 'numpy.arange', 'np.arange', (['(1)', '(24)'], {}), '(1, 24)\n', (34197, 34204), True, 'import numpy as np\n')] |
from __future__ import (absolute_import, division,print_function, unicode_literals)
from builtins import *
import numpy as np
import cv2
import SimpleITK as sitk
from builtins import *
from scipy.spatial import distance
from scipy import stats
import sys
import time
############### FUNCTIONS ##########################
def imcomplement(im):
if np.max(im)>1:
imout=255-im
else:
imout=1-im
return imout
def mat2gray(img):
max_img=np.max(img)
min_img=np.min(img)
imgout=(img-min_img)/(max_img-min_img)
return imgout
def im2double(img):
imgout=img.astype('float32')
imgout= mat2gray(imgout)
return imgout
def imreconstruct(marker,mask):
markeritk=sitk.GetImageFromArray(marker)
maskitk=sitk.GetImageFromArray(mask)
recfilt=sitk.ReconstructionByDilationImageFilter()
rectoutitk=recfilt.Execute(markeritk,maskitk)
rectout=sitk.GetArrayFromImage(rectoutitk)
return rectout
def eigen_cov(x,y):
mx=np.mean(x)
my=np.mean(y)
x=x-mx
y=y-my
cxx=np.var(x)
cxy=0
cyy=np.var(y);
nx=len(x)
for ct in range(nx):
cxy=cxy+x[ct]*y[ct];
cxy=cxy/nx;
C=np.zeros((2,2))
C[0,0]=cxx
C[0,1]=cxy
C[1,0]=cxy
C[1,1]=cyy
D,V=np.linalg.eig(C)
return V,D
def is_inside(img,x,y):
x=int(x)
y=int(y)
if(x>0 and y>0 and x<img.shape[1] and y<img.shape[0]):
return True
else:
return False
def improfile(img,x,y,n):
xm=x[0]
x0=x[1]
ym=y[0]
y0=y[1]
a = np.arctan((y0 - ym) / (x0 - xm))
i=range(0,100,int(100/n))
cx=np.squeeze(np.zeros((1,len(i))))
cy=np.squeeze(np.zeros((1,len(i))))
c=np.squeeze(np.zeros((1,len(i))))
ct=0
for t in range(0,100,int(100/30)):
tf=t/100.0
cx[ct] = int(xm + (x0 - xm)*tf)
cy[ct] = int(ym + (y0 - ym)*tf)
if(is_inside(img,cx[ct],cy[ct])):
c[ct]=img[int(cy[ct]), int(cx[ct])]
else:
c[ct]=1;
ct=ct+1
return c,cx,cy
def filter_result3(img,bw_result,ths,thm):
bw_result_orig=np.copy(bw_result);
points=np.where(bw_result>0)
points=np.reshape(points,np.shape(points))
points=np.transpose(points)
npoints=np.shape(points)[0]
k=20
step=5
hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
sat=hsv[:,:,1]/255
bw_result_filter=np.zeros(np.shape(bw_result))
xc=points[:,1]
yc=points[:,0]
for ct in range(0,npoints,step):
#print(ct/npoints)
ystart=max(0,yc[ct]-k);
xstart=max(0,xc[ct]-k);
yend=min(np.shape(img)[0],yc[ct]+k);
xend=min(np.shape(img)[1],xc[ct]+k);
p=points[ct,:]
p=np.reshape(p,(1,2))
Dpoints=distance.cdist(p,points)
Dpoints=np.squeeze(Dpoints)
ipoints=np.squeeze(np.where(Dpoints<40))
xneigh=points[ipoints,1];
yneigh=points[ipoints,0];
V,D=eigen_cov(xneigh,yneigh)
vmin=V[:,0];
if D[1]<D[0]:
vmin=V[:,1];
x1=xc[ct]-k*vmin[0];
y1=yc[ct]-k*vmin[1];
x2=xc[ct]+k*vmin[0];
y2=yc[ct]+k*vmin[1];
p,px,py=improfile(sat,np.array([x1,x2]),np.array([y1,y2]),30);
s=np.abs(np.mean(p[0:5])-np.mean(p[len(p)-5:len(p)]));
s=round(s*100);
m=np.max([p[0:5],p[len(p)-5:len(p)]]);
if(s<ths and m<thm):
bw_result_filter[ystart:yend,xstart:xend]=bw_result_orig[ystart:yend,xstart:xend];
return bw_result_filter
def min_openings(im,LEN,DEG_NUM):
imo=[];
for i in range(DEG_NUM):
#DEG=(i)*((360/DEG_NUM)/2)
filtername=str(i+1)+'se.txt'
se=np.loadtxt('filters/videos/filters/'+filtername)
if(i==0):
se=np.reshape(se,(1,len(se)))
if(i==6):
se=np.reshape(se,(len(se),1))
se=se.astype('uint8')
imoi=cv2.erode(im,se)
imoi=cv2.dilate(imoi,se)
imo.append(imoi)
imB=imo[0]
for i in range(DEG_NUM-1):
k=i+1
imB=np.minimum(imB,imo[k])
return imB
def smooth_cross_section(imV,LEN_diff,DEG_NUM):
imV_c=imcomplement(imV)
imd=[]
for i in range(12):
k=i+1
se1=np.loadtxt('filters/videos/filters/'+str(k)+'linekernel1.txt')
se2=np.loadtxt('filters/videos/filters/'+str(k)+'linekernel2.txt')
if(i==0):
se1=np.reshape(se1,(1,len(se1)))
se2=np.reshape(se2,(len(se2),1))
if(i==6):
se1=np.reshape(se1,(len(se1),1))
se2=np.reshape(se2,(1,len(se2)))
temp=cv2.filter2D(imV_c.astype('float32'),-1,se1)
imdi=cv2.filter2D(temp,-1,se2)
imdi[imdi<0]=0
imd.append(imdi)
imDiff=imd[0]
for i in range(11):
k=i+1
imDiff=np.maximum(imDiff,imd[k])
imDiff=mat2gray(imDiff)
return imDiff
def reconstruction_by_dilation(im,LEN,DEG_NUM):
imo=[];
for i in range(DEG_NUM):
#DEG=(i)*((360/DEG_NUM)/2)
filtername=str(i+1)+'se.txt'
se=np.loadtxt('filters/videos/filters/'+filtername)
if(i==0):
se=np.reshape(se,(1,len(se)))
if(i==6):
se=np.reshape(se,(len(se),1))
se=se.astype('uint8')
imoi=cv2.erode(im,se)
imoi=cv2.dilate(imoi,se)
imo.append(imoi)
imC=imo[0]
for i in range(DEG_NUM-1):
k=i+1
imC=np.maximum(imC,imo[k])
imC2=imreconstruct(imC,im)
imC2=mat2gray(imC2)
return imC2
def reconstruction_by_erosion(im,LEN,DEG_NUM):
im_close=[];
for i in range(DEG_NUM):
#DEG=(i)*((360/DEG_NUM)/2)
filtername=str(i+1)+'se.txt'
se=np.loadtxt('filters/videos/filters/'+filtername)
if(i==0):
se=np.reshape(se,(1,len(se)))
if(i==6):
se=np.reshape(se,(len(se),1))
se=se.astype('uint8')
im_closei=cv2.dilate(im,se)
im_closei=cv2.erode(im_closei,se)
im_close.append(im_closei);
imTemp39=im_close[0]
for i in range(DEG_NUM-1):
k=i+1
imTemp39=np.minimum(imTemp39,im_close[k])
marker=imcomplement(imTemp39)
mask=imcomplement(im)
imF=imreconstruct(marker,mask)
imF=mat2gray(imF)
imF=imcomplement(imF)
return imF
def find_th(x):
#mode= stats.mode(x)
(_, idx, counts) = np.unique(x, return_index=True, return_counts=True)
index = idx[np.argmax(counts)]
mx = x[index]
sx=np.std(x)
thl=mx+3*sx
thh=mx+4*sx
return thl,thh
############ MAIN ##############
if len(sys.argv)<3:
print('example usage : python crack_detection2 images_milestone2/1.jpg 50 ( optional images_milestone2/output/1.jpg)')
width_cm=int(sys.argv[2])
if len(sys.argv)==5:
img_file_out=sys.argv[3]
img_file_out_bin=sys.argv[4]
else:
img_file_out='/Applications/XAMPP/xamppfiles/htdocs/bd/uploads/raw/output.png'
img_file_out_bin='/Applications/XAMPP/xamppfiles/htdocs/bd/uploads/raw/output_bin.png'
img_file=sys.argv[1]
print('processing '+img_file)
imgorig=cv2.imread(img_file)
start_time = time.time()
size_orig=np.shape(imgorig)
print(size_orig)
## resize if the original size is different from dataset images
## so we can keep the same parameters for the filters
res=size_orig[1]/float(width_cm);
res_opt=65; # optimal resolution for bilateral filter
scale = float(res_opt)/float(res);
print(scale)
d=int(51/scale)
sigmaColor=int(201)
sigmaSpace =int(201/scale);
if scale < 5:
resize_scale=1.8
rows_dataset=int(2448/resize_scale)
cols_dataset=int(3264/resize_scale)
img_blur = cv2.bilateralFilter(cv2.resize(imgorig,(cols_dataset,rows_dataset)) ,int(d/resize_scale),sigmaColor,int(sigmaSpace/resize_scale))
img_blur=cv2.resize(img_blur,(size_orig[1],size_orig[0]))
#img_blur = cv2.bilateralFilter(imgorig ,d,sigmaColor,sigmaSpace)
else:
img_blur =imgorig
##
print("bilateral filter --- %s seconds ---" % (time.time() - start_time))
res_opt=16 # optimal resolution for bilateral filter
scale = float(res_opt)/float(res)
img=cv2.resize(img_blur,(int(size_orig[1]*scale),int(size_orig[0]*scale)))
hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
im=hsv[:,:,2]
bw_mask=np.zeros(np.shape(im))
bw_mask_offr=0;#round(np.shape(im)[0]/20)
bw_mask_offc=0;#round(np.shape(im)[1]/20)
bw_mask[bw_mask_offr:np.shape(im)[0]-bw_mask_offr, bw_mask_offc:np.shape(im)[1]-bw_mask_offc]=1;
im=mat2gray(im) #*mat2gray(bw_mask)
im=imcomplement(im)
im=im2double(im)
DEG_NUM=12;
LEN_c=11;
LEN_o=11;
LEN_diff=7;
ic1=reconstruction_by_dilation(im,LEN_c,DEG_NUM)
io1=min_openings(im,LEN_o,DEG_NUM)
iv=mat2gray(ic1-io1)
imDiff=smooth_cross_section(iv,LEN_diff,LEN_c)
imL=reconstruction_by_dilation(imDiff,LEN_c,DEG_NUM)
imF=reconstruction_by_erosion(imL,LEN_c,DEG_NUM)
F=np.squeeze(np.reshape(imF,(1,imF.size)))
TH_LOW,TH_HIGH=find_th(F)
#TH_LOW=0.12;
#TH_HIGH=0.2;
min_obj=5;
min_hole=10;
mask=np.zeros(np.shape(imF))
marker=np.zeros(np.shape(imF))
mask[imF>TH_LOW]=1
marker[imF>TH_HIGH]=1
bw_result=imreconstruct(marker,mask)
bw_result=filter_result3(img,bw_result,4,0.2)
bw_result=cv2.resize(bw_result,(size_orig[1],size_orig[0]))
imgr=imgorig[:,:,2];
imgr[bw_result>0]=255;
imgorig[:,:,2]=imgr;
print("completed --- %s seconds ---" % (time.time() - start_time))
print('saving output file: '+img_file_out)
cv2.imwrite(img_file_out,imgorig)
cv2.imwrite(img_file_out_bin,bw_result*255)
print('done ')
| [
"cv2.dilate",
"cv2.filter2D",
"numpy.array",
"numpy.mean",
"numpy.reshape",
"numpy.where",
"cv2.erode",
"SimpleITK.GetArrayFromImage",
"numpy.max",
"numpy.min",
"numpy.maximum",
"numpy.arctan",
"numpy.linalg.eig",
"numpy.argmax",
"numpy.squeeze",
"cv2.cvtColor",
"numpy.std",
"cv2.r... | [((7290, 7310), 'cv2.imread', 'cv2.imread', (['img_file'], {}), '(img_file)\n', (7300, 7310), False, 'import cv2\n'), ((7325, 7336), 'time.time', 'time.time', ([], {}), '()\n', (7334, 7336), False, 'import time\n'), ((7348, 7365), 'numpy.shape', 'np.shape', (['imgorig'], {}), '(imgorig)\n', (7356, 7365), True, 'import numpy as np\n'), ((8403, 8439), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (8415, 8439), False, 'import cv2\n'), ((9384, 9435), 'cv2.resize', 'cv2.resize', (['bw_result', '(size_orig[1], size_orig[0])'], {}), '(bw_result, (size_orig[1], size_orig[0]))\n', (9394, 9435), False, 'import cv2\n'), ((9617, 9651), 'cv2.imwrite', 'cv2.imwrite', (['img_file_out', 'imgorig'], {}), '(img_file_out, imgorig)\n', (9628, 9651), False, 'import cv2\n'), ((9652, 9698), 'cv2.imwrite', 'cv2.imwrite', (['img_file_out_bin', '(bw_result * 255)'], {}), '(img_file_out_bin, bw_result * 255)\n', (9663, 9698), False, 'import cv2\n'), ((484, 495), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (490, 495), True, 'import numpy as np\n'), ((509, 520), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (515, 520), True, 'import numpy as np\n'), ((740, 770), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['marker'], {}), '(marker)\n', (762, 770), True, 'import SimpleITK as sitk\n'), ((784, 812), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['mask'], {}), '(mask)\n', (806, 812), True, 'import SimpleITK as sitk\n'), ((826, 868), 'SimpleITK.ReconstructionByDilationImageFilter', 'sitk.ReconstructionByDilationImageFilter', ([], {}), '()\n', (866, 868), True, 'import SimpleITK as sitk\n'), ((933, 967), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['rectoutitk'], {}), '(rectoutitk)\n', (955, 967), True, 'import SimpleITK as sitk\n'), ((1019, 1029), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1026, 1029), True, 'import numpy as np\n'), ((1038, 1048), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1045, 1048), True, 'import numpy as np\n'), ((1082, 1091), 'numpy.var', 'np.var', (['x'], {}), '(x)\n', (1088, 1091), True, 'import numpy as np\n'), ((1112, 1121), 'numpy.var', 'np.var', (['y'], {}), '(y)\n', (1118, 1121), True, 'import numpy as np\n'), ((1218, 1234), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (1226, 1234), True, 'import numpy as np\n'), ((1307, 1323), 'numpy.linalg.eig', 'np.linalg.eig', (['C'], {}), '(C)\n', (1320, 1323), True, 'import numpy as np\n'), ((1601, 1633), 'numpy.arctan', 'np.arctan', (['((y0 - ym) / (x0 - xm))'], {}), '((y0 - ym) / (x0 - xm))\n', (1610, 1633), True, 'import numpy as np\n'), ((2201, 2219), 'numpy.copy', 'np.copy', (['bw_result'], {}), '(bw_result)\n', (2208, 2219), True, 'import numpy as np\n'), ((2233, 2256), 'numpy.where', 'np.where', (['(bw_result > 0)'], {}), '(bw_result > 0)\n', (2241, 2256), True, 'import numpy as np\n'), ((2315, 2335), 'numpy.transpose', 'np.transpose', (['points'], {}), '(points)\n', (2327, 2335), True, 'import numpy as np\n'), ((2400, 2436), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (2412, 2436), False, 'import cv2\n'), ((6564, 6615), 'numpy.unique', 'np.unique', (['x'], {'return_index': '(True)', 'return_counts': '(True)'}), '(x, return_index=True, return_counts=True)\n', (6573, 6615), True, 'import numpy as np\n'), ((6679, 6688), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (6685, 6688), True, 'import numpy as np\n'), ((7996, 8046), 'cv2.resize', 'cv2.resize', (['img_blur', '(size_orig[1], size_orig[0])'], {}), '(img_blur, (size_orig[1], size_orig[0]))\n', (8006, 8046), False, 'import cv2\n'), ((8472, 8484), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (8480, 8484), True, 'import numpy as np\n'), ((9070, 9100), 'numpy.reshape', 'np.reshape', (['imF', '(1, imF.size)'], {}), '(imF, (1, imF.size))\n', (9080, 9100), True, 'import numpy as np\n'), ((9198, 9211), 'numpy.shape', 'np.shape', (['imF'], {}), '(imF)\n', (9206, 9211), True, 'import numpy as np\n'), ((9230, 9243), 'numpy.shape', 'np.shape', (['imF'], {}), '(imF)\n', (9238, 9243), True, 'import numpy as np\n'), ((364, 374), 'numpy.max', 'np.max', (['im'], {}), '(im)\n', (370, 374), True, 'import numpy as np\n'), ((2285, 2301), 'numpy.shape', 'np.shape', (['points'], {}), '(points)\n', (2293, 2301), True, 'import numpy as np\n'), ((2349, 2365), 'numpy.shape', 'np.shape', (['points'], {}), '(points)\n', (2357, 2365), True, 'import numpy as np\n'), ((2491, 2510), 'numpy.shape', 'np.shape', (['bw_result'], {}), '(bw_result)\n', (2499, 2510), True, 'import numpy as np\n'), ((2815, 2836), 'numpy.reshape', 'np.reshape', (['p', '(1, 2)'], {}), '(p, (1, 2))\n', (2825, 2836), True, 'import numpy as np\n'), ((2852, 2877), 'scipy.spatial.distance.cdist', 'distance.cdist', (['p', 'points'], {}), '(p, points)\n', (2866, 2877), False, 'from scipy.spatial import distance\n'), ((2894, 2913), 'numpy.squeeze', 'np.squeeze', (['Dpoints'], {}), '(Dpoints)\n', (2904, 2913), True, 'import numpy as np\n'), ((3798, 3848), 'numpy.loadtxt', 'np.loadtxt', (["('filters/videos/filters/' + filtername)"], {}), "('filters/videos/filters/' + filtername)\n", (3808, 3848), True, 'import numpy as np\n'), ((4022, 4039), 'cv2.erode', 'cv2.erode', (['im', 'se'], {}), '(im, se)\n', (4031, 4039), False, 'import cv2\n'), ((4054, 4074), 'cv2.dilate', 'cv2.dilate', (['imoi', 'se'], {}), '(imoi, se)\n', (4064, 4074), False, 'import cv2\n'), ((4177, 4200), 'numpy.minimum', 'np.minimum', (['imB', 'imo[k]'], {}), '(imB, imo[k])\n', (4187, 4200), True, 'import numpy as np\n'), ((4801, 4828), 'cv2.filter2D', 'cv2.filter2D', (['temp', '(-1)', 'se2'], {}), '(temp, -1, se2)\n', (4813, 4828), False, 'import cv2\n'), ((4952, 4978), 'numpy.maximum', 'np.maximum', (['imDiff', 'imd[k]'], {}), '(imDiff, imd[k])\n', (4962, 4978), True, 'import numpy as np\n'), ((5209, 5259), 'numpy.loadtxt', 'np.loadtxt', (["('filters/videos/filters/' + filtername)"], {}), "('filters/videos/filters/' + filtername)\n", (5219, 5259), True, 'import numpy as np\n'), ((5433, 5450), 'cv2.erode', 'cv2.erode', (['im', 'se'], {}), '(im, se)\n', (5442, 5450), False, 'import cv2\n'), ((5465, 5485), 'cv2.dilate', 'cv2.dilate', (['imoi', 'se'], {}), '(imoi, se)\n', (5475, 5485), False, 'import cv2\n'), ((5588, 5611), 'numpy.maximum', 'np.maximum', (['imC', 'imo[k]'], {}), '(imC, imo[k])\n', (5598, 5611), True, 'import numpy as np\n'), ((5874, 5924), 'numpy.loadtxt', 'np.loadtxt', (["('filters/videos/filters/' + filtername)"], {}), "('filters/videos/filters/' + filtername)\n", (5884, 5924), True, 'import numpy as np\n'), ((6103, 6121), 'cv2.dilate', 'cv2.dilate', (['im', 'se'], {}), '(im, se)\n', (6113, 6121), False, 'import cv2\n'), ((6141, 6165), 'cv2.erode', 'cv2.erode', (['im_closei', 'se'], {}), '(im_closei, se)\n', (6150, 6165), False, 'import cv2\n'), ((6294, 6327), 'numpy.minimum', 'np.minimum', (['imTemp39', 'im_close[k]'], {}), '(imTemp39, im_close[k])\n', (6304, 6327), True, 'import numpy as np\n'), ((6633, 6650), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (6642, 6650), True, 'import numpy as np\n'), ((7872, 7921), 'cv2.resize', 'cv2.resize', (['imgorig', '(cols_dataset, rows_dataset)'], {}), '(imgorig, (cols_dataset, rows_dataset))\n', (7882, 7921), False, 'import cv2\n'), ((2942, 2964), 'numpy.where', 'np.where', (['(Dpoints < 40)'], {}), '(Dpoints < 40)\n', (2950, 2964), True, 'import numpy as np\n'), ((3298, 3316), 'numpy.array', 'np.array', (['[x1, x2]'], {}), '([x1, x2])\n', (3306, 3316), True, 'import numpy as np\n'), ((3316, 3334), 'numpy.array', 'np.array', (['[y1, y2]'], {}), '([y1, y2])\n', (3324, 3334), True, 'import numpy as np\n'), ((8204, 8215), 'time.time', 'time.time', ([], {}), '()\n', (8213, 8215), False, 'import time\n'), ((9545, 9556), 'time.time', 'time.time', ([], {}), '()\n', (9554, 9556), False, 'import time\n'), ((2702, 2715), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (2710, 2715), True, 'import numpy as np\n'), ((2748, 2761), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (2756, 2761), True, 'import numpy as np\n'), ((3357, 3372), 'numpy.mean', 'np.mean', (['p[0:5]'], {}), '(p[0:5])\n', (3364, 3372), True, 'import numpy as np\n'), ((8594, 8606), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (8602, 8606), True, 'import numpy as np\n'), ((8637, 8649), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (8645, 8649), True, 'import numpy as np\n')] |
from multiprocessing import Pool, cpu_count
import copy
import gzip
import math
import os
import sys
import orjson as json
# import json
from pathlib import Path
import pyconll
import pyconll.util
from pycountry import languages
try:
from utf8.utils import *
except:
# to solve issue with ipython executing this import
from utils import *
try:
from .preprocess_conllu import *
# from preprocessors.preprocess_conllu import *
except:
from preprocess_conllu import *
import pandas as pd
import numpy as np
from scipy import stats
import bokeh
from bokeh.plotting import figure, show
# from bokeh.palettes import Spectral4
# from bokeh.io import output_file
from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label
from bokeh.models.layouts import Column, Panel, Tabs
from bokeh.models.callbacks import CustomJS
from bokeh.layouts import gridplot, column, row, Spacer
from bokeh.resources import CDN
from bokeh.embed import components, file_html, json_item, autoload_static
# UD_VERSION = "2.6"
# BASEPATH = "/home/leo/projects/Datasets/text"
# CONLLU_BASEPATH = os.path.join(BASEPATH, 'UniversalDependencies/ud-treebanks-v{}'.format(UD_VERSION))
CONLLU_BASEPATH = "/home/leo/projects/AI/Datasets/text/UniversalDependencies/ud-treebanks-v2.6"
#
DISTRIBUTIONS = {"norm": stats.norm,
"skewnorm": stats.skewnorm,
"gennorm": stats.gennorm,
"beta": stats.beta,
"betaprime": stats.betaprime,
"lognorm": stats.lognorm,
}
rootdir = CONLLU_BASEPATH
blacklist = [] # BLACKLIST
allconll = get_all_files_recurse(rootdir)
train, test, dev = filter_conllu_files(allconll, blacklist)
def conllu_get_fields(fname):
"""
Processes one conllu file
:param fname: absolute path to the conllu file
:return:
"""
conll = pyconll.load_from_file(fname)
upos = []
xpos = []
# deprel = []
sentences = []
forms = []
src_lang = path_leaf(fname).split('_')[0]
for sen in conll:
sentences.append((src_lang, sen.text))
try:
forms.extend([t.form for t in sen._tokens])
except:
pass
try:
sen_upos = [t.upos for t in sen._tokens]
upos.append((src_lang, sen.text, tuple(sen_upos)))
except:
pass
try:
sen_xpos = [t.xpos for t in sen._tokens]
xpos.append((src_lang, sen.text, tuple(sen_xpos)))
except:
pass
# try:
# sen_deprel = [t.deprel for t in sen._tokens]
# deprel.append((src_lang, sen.text, tuple(sen_deprel)))
# except:
# pass
# return (set(upos), len(upos)), (set(xpos), len(xpos)), (set(deprel), len(deprel)), (
# set(sentences), len(sentences)), (set(forms), len(forms))
return (set(upos), len(upos)), (set(xpos), len(xpos)), (set(sentences), len(sentences)), (set(forms), len(forms))
def _try_get_2list(fname):
try:
return conllu_get_fields(fname)
except Exception as e:
print("Error processing file: {} \nWith error: {}".format(fname, e))
def conllu_process_get_2list(rootdir=CONLLU_BASEPATH, blacklist=BLACKLIST):
allconll = get_all_files_recurse(rootdir)
train, test, dev = filter_conllu_files(allconll, blacklist)
all_files = train + test + dev
# print(all_files)
with Pool(processes=cpu_count()) as pool:
res = pool.map(_try_get_2list, all_files)
return res
def extract_data_from_fields(results):
upos_data = []
# deprel_data = []
sentences_data = []
forms_data = []
for r in results:
# upos_val, xpos_val, deprel_val, sentences_val, forms_val = r
upos_val, xpos_val, sentences_val, forms_val = r
# print("lala 1")
forms_data.extend(forms_val[0])
for val in upos_val[0]:
# print(val)
lang1, txt1, upos = val
upos_data.append((lang1, txt1, upos, len(upos)))
# for lang3, txt3, deprel in deprel_val[0]:
# deprel_data.append((lang3, txt3, deprel, len(deprel)))
for lang4, txt4 in sentences_val[0]:
sentences_data.append((lang4, txt4, len(txt4)))
return upos_data, sentences_data, forms_data
# return upos_data, deprel_data, sentences_data, forms_data
def get_best_distribution(data, distributions=DISTRIBUTIONS):
dist_results = []
params = {}
for dist_name, dist in distributions.items():
param = dist.fit(data)
params[dist_name] = param
# Applying the Kolmogorov-Smirnov test
D, p = stats.kstest(data, dist_name, args=param)
# print("p value for "+dist_name+" = "+str(p))
dist_results.append((dist_name, D, p))
# select the best fitted distribution
# store the name of the best fit and its p value
best_dist, D, best_p = max(dist_results, key=lambda item: item[2])
# print("Best fitting distribution: "+str(best_dist))
# print("Best p value: "+ str(best_p))
# print("Parameters for the best fit: "+ str(params[best_dist]))
return best_dist, best_p, params[best_dist]
# def compute_distributions(upos_data, deprel_data, sentences_data, langs=None):
def compute_distributions(upos_data, sentences_data, langs=None):
df_upos = pd.DataFrame(upos_data, columns=["lang", "text", "upos", "upos_len"])
# df_deprel = pd.DataFrame(deprel_data, columns=["lang", "text", "deprel", "deprel_len"])
df_txt = pd.DataFrame(sentences_data, columns=["lang", "text", "text_len"])
if langs is None:
langs = sorted(df_upos['lang'].unique())
langs_data = {}
for lang in langs:
try:
# fig, ax = plt.subplots()
dest_lang = languages.get(alpha_2=lang) if len(lang) == 2 else languages.get(alpha_3=lang)
dest_lang = dest_lang.name
lng_upos_len = df_upos.loc[df_upos['lang'] == lang]['upos_len']
# lng_deprel_len = df_deprel.loc[df_deprel['lang'] == lang]['deprel_len']
lng_text_len = df_txt.loc[df_txt['lang'] == lang]['text_len']
langs_data[lang] = {
'lang': dest_lang,
'upos_len': lng_upos_len,
'upos_distrib': get_best_distribution(lng_upos_len),
# 'deprel_len': lng_deprel_len,
# 'deprel_distrib': get_best_distribution(lng_deprel_len),
'text_len': lng_text_len,
'text_distrib': get_best_distribution(lng_text_len),
}
except Exception as e:
print("Error processing lang {} with Exception {}".format(lang, e))
pass
return langs_data
def _try_compute_distributions(upos_data, sentence_data):
try:
return compute_distributions(upos_data=upos_data, sentences_data=sentence_data)
except Exception as e:
print("Error computing distributions With error: {}".format(e))
# TODO parallelization of compute_distributions with starmap
def _get_stats(distrib, distrib_params, data, n_bins=100, n_samples=100):
"""
:param distrib: distribution function (scipy.stats.[beta|norm|....])
:param distrib_params: parameters of the distribution
:param data:
:param n_bins: number of bins to compute for the histograms.
:param n_samples: number of samples for the CDF and PDF functions
:return: (stats, {cdf,pdf})
"""
try: # if data is a pandas dataframe (which it is) TODO cleanup these dirty things
data = data.to_numpy()
except:
pass
mskv = [None, None, None, None]
t_mskv = distrib.stats(*distrib_params)
for i in range(len(t_mskv)): # mean, variance, skew, kurtosis -> variable length
mskv[i] = t_mskv[i]
ret_stats = {
'mean': mskv[0], # mean, variance, skew, kurtosis -> variable length
'variance': mskv[1],
'skew': mskv[2],
'kurtosis': mskv[3],
'median': distrib.median(*distrib_params),
'std': distrib.std(*distrib_params),
'intervals': {'99': distrib.interval(0.99, *distrib_params),
'98': distrib.interval(0.98, *distrib_params),
'95': distrib.interval(0.95, *distrib_params),
'90': distrib.interval(0.90, *distrib_params),
'85': distrib.interval(0.85, *distrib_params),
'80': distrib.interval(0.8, *distrib_params),
}
}
max_len = max(data)
x = np.linspace(0, max_len, 100)
hist, bin_edges = np.histogram(data, bins=n_bins) # (hist, bin_edges)
# the function computation is to make life easy when drawing with bokeh ... some points still to clarify
# for this n_samples needs to be the same as n_bins
ret_foo = {'x': x,
'hist': hist,
'bin_edges': bin_edges,
# 'bin_edges_left': bin_edges[:-1],
# 'bin_edges_right': bin_edges[1:],
'cdf': distrib.cdf(x, *distrib_params),
'pdf': distrib.pdf(x, *distrib_params)
}
return ret_stats, ret_foo
def _get_lang_stats(lang_data, distributions=DISTRIBUTIONS):
upos_distrib = distributions[lang_data['upos_distrib'][0]]
upos_distrib_params = lang_data['upos_distrib'][2]
# print('upos', upos_distrib, upos_distrib_params)
upos_data = lang_data['upos_len']
upos_stats, upos_functions = _get_stats(upos_distrib, upos_distrib_params, upos_data)
#
# deprel_distrib = distributions[lang_data['deprel_distrib'][0]]
# deprel_distrib_params = lang_data['deprel_distrib'][2]
# # print('deprel', deprel_distrib, deprel_distrib_params)
# deprel_data = lang_data['deprel_len']
# deprel_stats, deprel_functions = _get_stats(deprel_distrib, deprel_distrib_params, deprel_data)
#
text_distrib = distributions[lang_data['text_distrib'][0]]
text_distrib_params = lang_data['text_distrib'][2]
# print('text', text_distrib, text_distrib_params)
text_data = lang_data['text_len']
text_stats, text_functions = _get_stats(text_distrib, text_distrib_params, text_data)
lang_data['upos_stats'] = upos_stats
# lang_data['deprel_stats'] = deprel_stats
lang_data['text_stats'] = text_stats
lang_data['upos_functions'] = upos_functions
# lang_data['deprel_functions'] = deprel_functions
lang_data['text_functions'] = text_functions
return lang_data
def flatten_dict(lang, d, sep="_"):
import collections
obj = collections.OrderedDict()
obj['lang_code'] = lang
lang_name = languages.get(alpha_2=lang) if len(lang) == 2 else languages.get(alpha_3=lang)
obj['lang_name'] = lang_name.name
def recurse(t, parent_key=""):
if isinstance(t, list):
for i in range(len(t)):
recurse(t[i], parent_key + sep + str(i) if parent_key else str(i))
elif isinstance(t, dict):
for k, v in t.items():
recurse(v, parent_key + sep + k if parent_key else k)
else:
obj[parent_key] = t
recurse(d)
return obj
def make_plot(title, data_source):
# TODO make it even better being able to change the Sizing mode from a dropdown menu ?
hover = HoverTool(
names=["CDF"],
tooltips=[
("length", '$x{int}'),
("Count", "@hist{int}"),
("pdf", "@pdf{1.111}"),
("cdf", "@cdf{1.111}"),
],
# display a tooltip whenever the cursor is vertically in line with a glyph
mode='vline',
)
p = figure(title=title, background_fill_color="#fafafa",
plot_height=500, sizing_mode="stretch_width",
tools="crosshair,pan,wheel_zoom,box_zoom,zoom_in,zoom_out,undo,redo,reset",
toolbar_location="left",
output_backend="webgl")
p.add_tools(hover)
# p = figure(title=title, tools='', background_fill_color="#fafafa")
p.xaxis.axis_label = 'Length'
p.yaxis.axis_label = 'Count'
# second axe, probability
p.extra_y_ranges = {"cdf(x)": Range1d(start=0., end=1.02),
"Pr(x)": Range1d(start=0., end=max(data_source.data['pdf']) * 1.02)
}
p.add_layout(LinearAxis(y_range_name="Pr(x)", axis_label='Pr(x)'), 'right')
p.quad(name='hist', top='hist', bottom=0, left='bin_edges_left', right='bin_edges_right',
fill_color="blue", line_color="white", alpha=0.5, legend_label="Freq.", source=data_source)
p.line(name='PDF', x='x', y='pdf', line_color="green", line_width=4, alpha=0.7, legend_label="PDF",
y_range_name="Pr(x)", source=data_source)
p.line(name='CDF', x='x', y='cdf', line_color="red", line_width=2, alpha=0.7, legend_label="CDF",
y_range_name="cdf(x)", source=data_source)
p.y_range.start = 0
p.title.align = 'center'
p.legend.location = "center_right"
# p.legend.location = "bottom_right"
p.legend.background_fill_color = "#fefefe"
p.grid.grid_line_color = "grey"
# p.legend.click_policy="mute"
p.legend.click_policy = "hide"
leo_label = Label(x=0, y=10, text='leomrocha.github.io')
p.add_layout(leo_label)
return p
def _make_data_sources(lang_data):
lang_name = lang_data['lang']
##
upos_plot_name = lang_name + ' - Text Length by UPOS Count'
upos_stats = lang_data['upos_stats']
upos_functions = lang_data['upos_functions']
upos_data_source = ColumnDataSource({'hist': upos_functions['hist'],
'bin_edges_left': upos_functions['bin_edges'][:-1],
'bin_edges_right': upos_functions['bin_edges'][1:],
'x': upos_functions['x'],
'pdf': upos_functions['pdf'],
'cdf': upos_functions['cdf']
}
)
# TODO refactor this to make it better ...
text_plot_name = lang_name + ' - Text Length by Character Count'
text_stats = lang_data['text_stats']
text_functions = lang_data['text_functions']
text_data_source = ColumnDataSource({'hist': text_functions['hist'],
'bin_edges_left': text_functions['bin_edges'][:-1],
'bin_edges_right': text_functions['bin_edges'][1:],
'x': text_functions['x'],
'pdf': text_functions['pdf'],
'cdf': text_functions['cdf']
}
)
return (upos_plot_name, upos_data_source, upos_stats), (text_plot_name, text_data_source, text_stats)
def _make_stats_tables(stats):
name_col = [k for k in stats.keys() if k != 'intervals' and stats[k] is not None]
value_col = [round(float(stats[k]), 3) for k in name_col if stats[k] is not None]
interval_names = list(stats['intervals'].keys())
interval_values = [round(float(i[1]), 1) for i in stats['intervals'].values()]
data = dict(
names=name_col,
values=value_col,
)
source = ColumnDataSource(data)
columns = [
TableColumn(field="names", title="Name"),
TableColumn(field="values", title="Value"),
]
data_table = DataTable(source=source, columns=columns, width=150, fit_columns=True, index_position=None)
int_data = dict(
names=interval_names,
values=interval_values,
)
int_source = ColumnDataSource(int_data)
int_columns = [
TableColumn(field="names", title="interval"),
TableColumn(field="values", title="Max Value"),
]
interval_table = DataTable(source=int_source, columns=int_columns, width=120, fit_columns=True, index_position=None)
return data_table, interval_table
# def _make_grid_datasources(lang_data):
# upos_plt_info, txt_plt_info = _make_data_sources(lang_data)
# upos_plot = make_plot(*upos_plt_info[:2])
# text_plot = make_plot(*txt_plt_info[:2])
#
# upos_stats_table, upos_interval_table = _make_stats_tables(upos_plt_info[2])
# text_stats_table, text_interval_table = _make_stats_tables(txt_plt_info[2])
# pass
def _make_grid_plot(lang_data):
upos_plt_info, txt_plt_info = _make_data_sources(lang_data)
upos_plot = make_plot(*upos_plt_info[:2])
text_plot = make_plot(*txt_plt_info[:2])
upos_stats_table, upos_interval_table = _make_stats_tables(upos_plt_info[2])
text_stats_table, text_interval_table = _make_stats_tables(txt_plt_info[2])
upos_stats = Column(upos_stats_table, sizing_mode="fixed", height=350, width=150)
upos_interval = Column(upos_interval_table, sizing_mode="fixed", height=350, width=150, margin=(0, 0, 0, 10))
text_stats = Column(text_stats_table, sizing_mode="fixed", height=350, width=150)
text_interval = Column(text_interval_table, sizing_mode="fixed", height=350, width=150, margin=(0, 0, 0, 10))
gp = gridplot([upos_stats, upos_interval, upos_plot,
text_stats, text_interval, text_plot],
ncols=3, sizing_mode="stretch_width", plot_height=350)
return gp
def stats_dict2table(all_lang_stats):
upos_stats = []
# deprel_stats = []
text_stats = []
for lang, lang_data in all_lang_stats.items():
# upos_row, deprel_row, text_row = stats_dict2rows(lang, lang_data)
upos_row, text_row = stats_dict2rows(lang, lang_data)
upos_stats.append(upos_row)
# deprel_stats.append(deprel_row)
text_stats.append(text_row)
upos_df = pd.DataFrame(upos_stats)
# deprel_df = pd.DataFrame(deprel_stats)
text_df = pd.DataFrame(text_stats)
# return upos_df, deprel_df, text_df
return upos_df, text_df
def stats_dict2rows(lang, lang_data):
upos_data = flatten_dict(lang, lang_data['upos_stats'])
# deprel_data = flatten(lang, lang_data['deprel_stats'])
text_data = flatten_dict(lang, lang_data['text_stats'])
return upos_data, text_data
# return upos_data, deprel_data, text_data
# complete tables showing stats for all the available languages
def _make_complete_stats_tables(all_lang_stats):
upos_df, text_df = stats_dict2table(all_lang_stats)
df_tables = (upos_df.round(2), text_df.round(2))
intervals = ['intervals_99', 'intervals_98', 'intervals_95', 'intervals_90', 'intervals_85', 'intervals_80']
cols_to_drop = intervals + ['intervals_99_low', 'intervals_98_low',
'intervals_95_low', 'intervals_90_low',
'intervals_85_low', 'intervals_80_low',
'skew', 'kurtosis']
# separate and clean the data
df_clean_tables = []
for df in df_tables:
for interval in intervals:
df[[interval + '_low', interval + '_high']] = pd.DataFrame(df[interval].tolist(), index=df.index)
df = df.drop(columns=cols_to_drop).round(2)
df_clean_tables.append(df)
bk_tables = []
def _get_title(col_name):
if col_name == 'lang_code':
return 'Code'
elif col_name == 'lang_name':
return 'Language'
else:
return col_name.replace('intervals_', '').replace('_high', '').replace('_low', '')
def _get_width(col_name):
if col_name == 'lang_code':
return 60
elif col_name == 'lang_name':
return 140
else:
return 50
for table in df_clean_tables:
columns = [TableColumn(field=Ci, title=_get_title(Ci), width=_get_width(Ci)) for Ci in
table.columns] # bokeh columns
data_table = DataTable(columns=columns, source=ColumnDataSource(table), sizing_mode='stretch_width',
fit_columns=False) # bokeh table
bk_tables.append(data_table)
return bk_tables
# convert to json TODO this must be improved and all sent to the NumpyEncoder ...
# This solution is modified from:
# https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
# https://github.com/mpld3/mpld3/issues/434
# class NumpyEncoder(json.JSONEncoder):
# """ Special json encoder for numpy types """
#
# def default(self, obj):
# if isinstance(obj, (tuple, set)):
# return list(obj)
# elif isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
# np.int16, np.int32, np.int64, np.uint8,
# np.uint16, np.uint32, np.uint64)):
# return int(obj)
# elif isinstance(obj, (np.float_, np.float16, np.float32,
# np.float64)):
# return float(obj)
# elif isinstance(obj, np.ndarray):
# return obj.tolist()
# elif isinstance(obj, pd.Series):
# obj = obj.to_list()
# return json.JSONEncoder.default(self, obj)
def _recursive_jsonify(dict_data):
new_dict = {}
for k, v in dict_data.items():
k = str(k) # always convert,
if isinstance(v, (tuple, set)):
ov = []
for t in v:
if isinstance(t, str):
ov.append(t)
elif isinstance(t, tuple):
ov.append([float(i) for i in t])
else:
ov.append(float(t))
v = ov
if isinstance(v, pd.Series):
v = v.to_list()
if isinstance(v, np.ndarray):
v = v.tolist()
if np.issubdtype(type(v), np.number):
v = float(v)
if isinstance(v, dict):
new_dict[k] = _recursive_jsonify(v)
else:
new_dict[k] = v
return new_dict
###
def _save_file(obj, fpath):
with open(fpath, 'w') as f:
f.write(obj)
f.flush()
def _generate_html_plots(all_stats, path='./conllu_blog/plots{}/', fname="{}_plot{}"):
all_grids = {}
all_grids_html = {}
all_grids_json = {}
for lang, data in all_stats.items():
grid = _make_grid_plot(data)
all_grids[lang] = grid
plt_name = lang + " stats plot"
# complete html page
html = file_html(grid, CDN, plt_name)
all_grids_html[lang] = html
# grid as json information
# jsn_grid = json.dumps(json_item(grid, plt_name))
# all_grids_json[lang] = jsn_grid
# save html
Path(path.format('_html')).mkdir(parents=True, exist_ok=True)
html_fname = os.path.join(path.format('_html'), fname.format(lang, '.html'))
_save_file(html, html_fname)
# save json
# Path(path.format('_json')).mkdir(parents=True, exist_ok=True)
# json_fname = os.path.join(path.format('_json'), fname.format(lang, '.json'))
# _save_file(jsn_grid, json_fname)
# all_components = (all_components_script, all_components_div) = components(all_grids)
# # save all grids as component
# Path(path.format('_components')).mkdir(parents=True, exist_ok=True)
# comp_fname_script = os.path.join(path.format('_components'), fname.format('all_components', '_script.html'))
# comp_fname_div = os.path.join(path.format('_components'), fname.format('all_components', '_div.html'))
# _save_file(all_components_div, comp_fname_div)
# _save_file(all_components_script, comp_fname_script)
return all_grids, all_grids_html
# return all_grids, all_grids_html, all_components
def _generate_html_table(table, name, path='./conllu_blog/tables_{}/', fname="{}_table{}"):
Path(path.format('html')).mkdir(parents=True, exist_ok=True)
html_fname = os.path.join(path.format('html'), fname.format(name, '.html'))
html = file_html(table, CDN, name)
_save_file(html, html_fname)
# Path(path.format('json')).mkdir(parents=True, exist_ok=True)
# jsn_fname = os.path.join(path.format('json'), fname.format(name, '.json'))
# jsn = json.dumps(json_item(table, name))
# _save_file(json, jsn_fname)
# Path(path.format('components')).mkdir(parents=True, exist_ok=True)
# cmp_script_fname = os.path.join(path.format('components'), fname.format(name, '_script.html'))
# cmp_div_fname = os.path.join(path.format('components'), fname.format(name, '_div.html'))
# cmp_script, cmp_div = components(table)
# # _save_file(cmp_script, cmp_script_fname)
# # _save_file(cmp_div, cmp_div_fname)
return html
# return html, (cmp_script, cmp_div)
# return html, jsn, (cmp_script, cmp_div)
def generate_files(blacklist=[], saveto='conllu_stats.json.gz'):
res = conllu_process_get_2list(blacklist=blacklist)
# upos_data, deprel_data, sentences_data, forms_data = extract_data_from_fields(res)
# langs_data = compute_distributions(upos_data, deprel_data, sentences_data)
upos_data, sentences_data, forms_data = extract_data_from_fields(res)
langs_data = compute_distributions(upos_data, sentences_data)
all_stats = {}
for lang, lang_data in langs_data.items():
print('processing {}'.format(lang))
all_stats[lang] = _get_lang_stats(lang_data)
all_stats_copy = copy.deepcopy(all_stats)
all_stats_copy = _recursive_jsonify(all_stats_copy)
jsn = json.dumps(all_stats_copy)
# this is with default json lib
# jsn = json.dumps(all_stats_copy, cls=NumpyEncoder)
# for non compressed file -> too big, not worth it
# with open('conllu_stats.json', 'w') as f:
# f.write(jsn)
# f.flush()
with gzip.open(saveto, 'wb') as f:
print("Saving to {}".format(saveto))
# f.write(jsn.encode('utf-8'))
f.write(jsn)
f.flush()
return all_stats
def generate_html(all_stats):
all_stats_copy = copy.deepcopy(all_stats)
all_grids, all_grids_html, all_grid_components = _generate_html_plots(all_stats_copy)
all_stats_copy = copy.deepcopy(all_stats)
upos_table, text_table = _make_complete_stats_tables(all_stats_copy)
_generate_html_table(upos_table, 'upos_table')
_generate_html_table(text_table, 'text_table')
def main():
all_stats = generate_files()
generate_html(all_stats)
if __name__ == '__main__':
main()
| [
"bokeh.plotting.figure",
"gzip.open",
"bokeh.models.Range1d",
"multiprocessing.cpu_count",
"copy.deepcopy",
"numpy.histogram",
"bokeh.models.Label",
"bokeh.models.layouts.Column",
"bokeh.models.TableColumn",
"numpy.linspace",
"pycountry.languages.get",
"pandas.DataFrame",
"orjson.dumps",
"... | [((1906, 1935), 'pyconll.load_from_file', 'pyconll.load_from_file', (['fname'], {}), '(fname)\n', (1928, 1935), False, 'import pyconll\n'), ((5407, 5476), 'pandas.DataFrame', 'pd.DataFrame', (['upos_data'], {'columns': "['lang', 'text', 'upos', 'upos_len']"}), "(upos_data, columns=['lang', 'text', 'upos', 'upos_len'])\n", (5419, 5476), True, 'import pandas as pd\n'), ((5584, 5650), 'pandas.DataFrame', 'pd.DataFrame', (['sentences_data'], {'columns': "['lang', 'text', 'text_len']"}), "(sentences_data, columns=['lang', 'text', 'text_len'])\n", (5596, 5650), True, 'import pandas as pd\n'), ((8605, 8633), 'numpy.linspace', 'np.linspace', (['(0)', 'max_len', '(100)'], {}), '(0, max_len, 100)\n', (8616, 8633), True, 'import numpy as np\n'), ((8656, 8687), 'numpy.histogram', 'np.histogram', (['data'], {'bins': 'n_bins'}), '(data, bins=n_bins)\n', (8668, 8687), True, 'import numpy as np\n'), ((10629, 10654), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (10652, 10654), False, 'import collections\n'), ((11361, 11515), 'bokeh.models.HoverTool', 'HoverTool', ([], {'names': "['CDF']", 'tooltips': "[('length', '$x{int}'), ('Count', '@hist{int}'), ('pdf', '@pdf{1.111}'), (\n 'cdf', '@cdf{1.111}')]", 'mode': '"""vline"""'}), "(names=['CDF'], tooltips=[('length', '$x{int}'), ('Count',\n '@hist{int}'), ('pdf', '@pdf{1.111}'), ('cdf', '@cdf{1.111}')], mode=\n 'vline')\n", (11370, 11515), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((11689, 11925), 'bokeh.plotting.figure', 'figure', ([], {'title': 'title', 'background_fill_color': '"""#fafafa"""', 'plot_height': '(500)', 'sizing_mode': '"""stretch_width"""', 'tools': '"""crosshair,pan,wheel_zoom,box_zoom,zoom_in,zoom_out,undo,redo,reset"""', 'toolbar_location': '"""left"""', 'output_backend': '"""webgl"""'}), "(title=title, background_fill_color='#fafafa', plot_height=500,\n sizing_mode='stretch_width', tools=\n 'crosshair,pan,wheel_zoom,box_zoom,zoom_in,zoom_out,undo,redo,reset',\n toolbar_location='left', output_backend='webgl')\n", (11695, 11925), False, 'from bokeh.plotting import figure, show\n'), ((13250, 13294), 'bokeh.models.Label', 'Label', ([], {'x': '(0)', 'y': '(10)', 'text': '"""leomrocha.github.io"""'}), "(x=0, y=10, text='leomrocha.github.io')\n", (13255, 13294), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((13592, 13846), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (["{'hist': upos_functions['hist'], 'bin_edges_left': upos_functions[\n 'bin_edges'][:-1], 'bin_edges_right': upos_functions['bin_edges'][1:],\n 'x': upos_functions['x'], 'pdf': upos_functions['pdf'], 'cdf':\n upos_functions['cdf']}"], {}), "({'hist': upos_functions['hist'], 'bin_edges_left':\n upos_functions['bin_edges'][:-1], 'bin_edges_right': upos_functions[\n 'bin_edges'][1:], 'x': upos_functions['x'], 'pdf': upos_functions['pdf'\n ], 'cdf': upos_functions['cdf']})\n", (13608, 13846), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((14351, 14605), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (["{'hist': text_functions['hist'], 'bin_edges_left': text_functions[\n 'bin_edges'][:-1], 'bin_edges_right': text_functions['bin_edges'][1:],\n 'x': text_functions['x'], 'pdf': text_functions['pdf'], 'cdf':\n text_functions['cdf']}"], {}), "({'hist': text_functions['hist'], 'bin_edges_left':\n text_functions['bin_edges'][:-1], 'bin_edges_right': text_functions[\n 'bin_edges'][1:], 'x': text_functions['x'], 'pdf': text_functions['pdf'\n ], 'cdf': text_functions['cdf']})\n", (14367, 14605), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((15415, 15437), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['data'], {}), '(data)\n', (15431, 15437), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((15580, 15675), 'bokeh.models.DataTable', 'DataTable', ([], {'source': 'source', 'columns': 'columns', 'width': '(150)', 'fit_columns': '(True)', 'index_position': 'None'}), '(source=source, columns=columns, width=150, fit_columns=True,\n index_position=None)\n', (15589, 15675), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((15779, 15805), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['int_data'], {}), '(int_data)\n', (15795, 15805), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((15965, 16069), 'bokeh.models.DataTable', 'DataTable', ([], {'source': 'int_source', 'columns': 'int_columns', 'width': '(120)', 'fit_columns': '(True)', 'index_position': 'None'}), '(source=int_source, columns=int_columns, width=120, fit_columns=\n True, index_position=None)\n', (15974, 16069), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((16855, 16923), 'bokeh.models.layouts.Column', 'Column', (['upos_stats_table'], {'sizing_mode': '"""fixed"""', 'height': '(350)', 'width': '(150)'}), "(upos_stats_table, sizing_mode='fixed', height=350, width=150)\n", (16861, 16923), False, 'from bokeh.models.layouts import Column, Panel, Tabs\n'), ((16944, 17041), 'bokeh.models.layouts.Column', 'Column', (['upos_interval_table'], {'sizing_mode': '"""fixed"""', 'height': '(350)', 'width': '(150)', 'margin': '(0, 0, 0, 10)'}), "(upos_interval_table, sizing_mode='fixed', height=350, width=150,\n margin=(0, 0, 0, 10))\n", (16950, 17041), False, 'from bokeh.models.layouts import Column, Panel, Tabs\n'), ((17055, 17123), 'bokeh.models.layouts.Column', 'Column', (['text_stats_table'], {'sizing_mode': '"""fixed"""', 'height': '(350)', 'width': '(150)'}), "(text_stats_table, sizing_mode='fixed', height=350, width=150)\n", (17061, 17123), False, 'from bokeh.models.layouts import Column, Panel, Tabs\n'), ((17144, 17241), 'bokeh.models.layouts.Column', 'Column', (['text_interval_table'], {'sizing_mode': '"""fixed"""', 'height': '(350)', 'width': '(150)', 'margin': '(0, 0, 0, 10)'}), "(text_interval_table, sizing_mode='fixed', height=350, width=150,\n margin=(0, 0, 0, 10))\n", (17150, 17241), False, 'from bokeh.models.layouts import Column, Panel, Tabs\n'), ((17248, 17393), 'bokeh.layouts.gridplot', 'gridplot', (['[upos_stats, upos_interval, upos_plot, text_stats, text_interval, text_plot]'], {'ncols': '(3)', 'sizing_mode': '"""stretch_width"""', 'plot_height': '(350)'}), "([upos_stats, upos_interval, upos_plot, text_stats, text_interval,\n text_plot], ncols=3, sizing_mode='stretch_width', plot_height=350)\n", (17256, 17393), False, 'from bokeh.layouts import gridplot, column, row, Spacer\n'), ((17863, 17887), 'pandas.DataFrame', 'pd.DataFrame', (['upos_stats'], {}), '(upos_stats)\n', (17875, 17887), True, 'import pandas as pd\n'), ((17947, 17971), 'pandas.DataFrame', 'pd.DataFrame', (['text_stats'], {}), '(text_stats)\n', (17959, 17971), True, 'import pandas as pd\n'), ((23969, 23996), 'bokeh.embed.file_html', 'file_html', (['table', 'CDN', 'name'], {}), '(table, CDN, name)\n', (23978, 23996), False, 'from bokeh.embed import components, file_html, json_item, autoload_static\n'), ((25392, 25416), 'copy.deepcopy', 'copy.deepcopy', (['all_stats'], {}), '(all_stats)\n', (25405, 25416), False, 'import copy\n'), ((25483, 25509), 'orjson.dumps', 'json.dumps', (['all_stats_copy'], {}), '(all_stats_copy)\n', (25493, 25509), True, 'import orjson as json\n'), ((25987, 26011), 'copy.deepcopy', 'copy.deepcopy', (['all_stats'], {}), '(all_stats)\n', (26000, 26011), False, 'import copy\n'), ((26124, 26148), 'copy.deepcopy', 'copy.deepcopy', (['all_stats'], {}), '(all_stats)\n', (26137, 26148), False, 'import copy\n'), ((4694, 4735), 'scipy.stats.kstest', 'stats.kstest', (['data', 'dist_name'], {'args': 'param'}), '(data, dist_name, args=param)\n', (4706, 4735), False, 'from scipy import stats\n'), ((10699, 10726), 'pycountry.languages.get', 'languages.get', ([], {'alpha_2': 'lang'}), '(alpha_2=lang)\n', (10712, 10726), False, 'from pycountry import languages\n'), ((10750, 10777), 'pycountry.languages.get', 'languages.get', ([], {'alpha_3': 'lang'}), '(alpha_3=lang)\n', (10763, 10777), False, 'from pycountry import languages\n'), ((12200, 12228), 'bokeh.models.Range1d', 'Range1d', ([], {'start': '(0.0)', 'end': '(1.02)'}), '(start=0.0, end=1.02)\n', (12207, 12228), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((12365, 12417), 'bokeh.models.LinearAxis', 'LinearAxis', ([], {'y_range_name': '"""Pr(x)"""', 'axis_label': '"""Pr(x)"""'}), "(y_range_name='Pr(x)', axis_label='Pr(x)')\n", (12375, 12417), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((15463, 15503), 'bokeh.models.TableColumn', 'TableColumn', ([], {'field': '"""names"""', 'title': '"""Name"""'}), "(field='names', title='Name')\n", (15474, 15503), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((15513, 15555), 'bokeh.models.TableColumn', 'TableColumn', ([], {'field': '"""values"""', 'title': '"""Value"""'}), "(field='values', title='Value')\n", (15524, 15555), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((15835, 15879), 'bokeh.models.TableColumn', 'TableColumn', ([], {'field': '"""names"""', 'title': '"""interval"""'}), "(field='names', title='interval')\n", (15846, 15879), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((15889, 15935), 'bokeh.models.TableColumn', 'TableColumn', ([], {'field': '"""values"""', 'title': '"""Max Value"""'}), "(field='values', title='Max Value')\n", (15900, 15935), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n'), ((22448, 22478), 'bokeh.embed.file_html', 'file_html', (['grid', 'CDN', 'plt_name'], {}), '(grid, CDN, plt_name)\n', (22457, 22478), False, 'from bokeh.embed import components, file_html, json_item, autoload_static\n'), ((25759, 25782), 'gzip.open', 'gzip.open', (['saveto', '"""wb"""'], {}), "(saveto, 'wb')\n", (25768, 25782), False, 'import gzip\n'), ((15046, 15058), 'scipy.stats.keys', 'stats.keys', ([], {}), '()\n', (15056, 15058), False, 'from scipy import stats\n'), ((3473, 3484), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (3482, 3484), False, 'from multiprocessing import Pool, cpu_count\n'), ((5852, 5879), 'pycountry.languages.get', 'languages.get', ([], {'alpha_2': 'lang'}), '(alpha_2=lang)\n', (5865, 5879), False, 'from pycountry import languages\n'), ((5903, 5930), 'pycountry.languages.get', 'languages.get', ([], {'alpha_3': 'lang'}), '(alpha_3=lang)\n', (5916, 5930), False, 'from pycountry import languages\n'), ((19975, 19998), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['table'], {}), '(table)\n', (19991, 19998), False, 'from bokeh.models import LinearAxis, Range1d, HoverTool, ColumnDataSource, DataTable, TableColumn, Label\n')] |
from __future__ import annotations
import copy
import itertools
from typing import (
TYPE_CHECKING,
Sequence,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
internals as libinternals,
)
from pandas._libs.missing import NA
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
Shape,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
ensure_dtype_can_hold_na,
find_common_type,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_1d_only_ea_obj,
is_datetime64tz_dtype,
is_dtype_equal,
needs_i8_conversion,
)
from pandas.core.dtypes.concat import (
cast_to_common_type,
concat_compat,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna_all,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.internals.array_manager import (
ArrayManager,
NullArrayProxy,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block,
)
from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas import Index
def _concatenate_array_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate array managers into one.
Parameters
----------
mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
ArrayManager
"""
# reindex all arrays
mgrs = []
for mgr, indexers in mgrs_indexers:
for ax, indexer in indexers.items():
mgr = mgr.reindex_indexer(
axes[ax], indexer, axis=ax, allow_dups=True, use_na_proxy=True
)
mgrs.append(mgr)
if concat_axis == 1:
# concatting along the rows -> concat the reindexed arrays
# TODO(ArrayManager) doesn't yet preserve the correct dtype
arrays = [
concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])
for j in range(len(mgrs[0].arrays))
]
else:
# concatting along the columns -> combine reindexed arrays in a single manager
assert concat_axis == 0
arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
if copy:
arrays = [x.copy() for x in arrays]
new_mgr = ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False)
return new_mgr
def concat_arrays(to_concat: list) -> ArrayLike:
"""
Alternative for concat_compat but specialized for use in the ArrayManager.
Differences: only deals with 1D arrays (no axis keyword), assumes
ensure_wrapped_if_datetimelike and does not skip empty arrays to determine
the dtype.
In addition ensures that all NullArrayProxies get replaced with actual
arrays.
Parameters
----------
to_concat : list of arrays
Returns
-------
np.ndarray or ExtensionArray
"""
# ignore the all-NA proxies to determine the resulting dtype
to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]
dtypes = {x.dtype for x in to_concat_no_proxy}
single_dtype = len(dtypes) == 1
if single_dtype:
target_dtype = to_concat_no_proxy[0].dtype
elif all(x.kind in ["i", "u", "b"] and isinstance(x, np.dtype) for x in dtypes):
# GH#42092
target_dtype = np.find_common_type(list(dtypes), [])
else:
target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy])
if target_dtype.kind in ["m", "M"]:
# for datetimelike use DatetimeArray/TimedeltaArray concatenation
# don't use arr.astype(target_dtype, copy=False), because that doesn't
# work for DatetimeArray/TimedeltaArray (returns ndarray)
to_concat = [
arr.to_array(target_dtype) if isinstance(arr, NullArrayProxy) else arr
for arr in to_concat
]
return type(to_concat_no_proxy[0])._concat_same_type(to_concat, axis=0)
to_concat = [
arr.to_array(target_dtype)
if isinstance(arr, NullArrayProxy)
else cast_to_common_type(arr, target_dtype)
for arr in to_concat
]
if isinstance(to_concat[0], ExtensionArray):
cls = type(to_concat[0])
return cls._concat_same_type(to_concat)
result = np.concatenate(to_concat)
# TODO decide on exact behaviour (we shouldn't do this only for empty result)
# see https://github.com/pandas-dev/pandas/issues/39817
if len(result) == 0:
# all empties -> check for bool to not coerce to float
kinds = {obj.dtype.kind for obj in to_concat_no_proxy}
if len(kinds) != 1:
if "b" in kinds:
result = result.astype(object)
return result
def concatenate_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
BlockManager
"""
# TODO(ArrayManager) this assumes that all managers are of the same type
if isinstance(mgrs_indexers[0][0], ArrayManager):
return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy)
concat_plans = [
_get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
]
concat_plan = _combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
unit = join_units[0]
blk = unit.block
if len(join_units) == 1 and not join_units[0].indexers:
values = blk.values
if copy:
values = values.copy()
else:
values = values.view()
fastpath = True
elif _is_uniform_join_units(join_units):
vals = [ju.block.values for ju in join_units]
if not blk.is_extension:
# _is_uniform_join_units ensures a single dtype, so
# we can use np.concatenate, which is more performant
# than concat_compat
values = np.concatenate(vals, axis=blk.ndim - 1)
else:
# TODO(EA2D): special-casing not needed with 2D EAs
values = concat_compat(vals, axis=1)
values = ensure_block_shape(values, blk.ndim)
values = ensure_wrapped_if_datetimelike(values)
fastpath = blk.values.dtype == values.dtype
else:
values = _concatenate_join_units(join_units, concat_axis, copy=copy)
fastpath = False
if fastpath:
b = blk.make_block_same_class(values, placement=placement)
else:
b = new_block(values, placement=placement, ndim=len(axes))
blocks.append(b)
return BlockManager(tuple(blocks), axes)
def _get_mgr_concatenation_plan(mgr: BlockManager, indexers: dict[int, np.ndarray]):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape_list = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape_list[ax] = len(indexer)
mgr_shape = tuple(mgr_shape_list)
has_column_indexer = False
if 0 in indexers:
has_column_indexer = True
ax0_indexer = indexers.pop(0)
blknos = algos.take_nd(mgr.blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_nd(mgr.blklocs, ax0_indexer, fill_value=-1)
else:
if mgr.is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
blknos = mgr.blknos
blklocs = mgr.blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape_list = list(mgr_shape)
shape_list[0] = len(placements)
shape = tuple(shape_list)
if blkno == -1:
# only reachable in the `0 in indexers` case
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs)
and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
(
(
not has_column_indexer
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1
)
or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()
)
)
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
class JoinUnit:
def __init__(self, block, shape: Shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
# Note: block is None implies indexers is None, but not vice-versa
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self.block)}, {self.indexers})"
@cache_readonly
def needs_filling(self) -> bool:
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
blk = self.block
if blk is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return blk.dtype
return ensure_dtype_can_hold_na(blk.dtype)
def _is_valid_na_for(self, dtype: DtypeObj) -> bool:
"""
Check that we are all-NA of a type/dtype that is compatible with this dtype.
Augments `self.is_na` with an additional check of the type of NA values.
"""
if not self.is_na:
return False
if self.block is None:
return True
if self.dtype == object:
values = self.block.values
return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))
na_value = self.block.fill_value
if na_value is NaT and not is_dtype_equal(self.dtype, dtype):
# e.g. we are dt64 and other is td64
# fill_values match but we should not cast self.block.values to dtype
# TODO: this will need updating if we ever have non-nano dt64/td64
return False
if na_value is NA and needs_i8_conversion(dtype):
# FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat
# e.g. self.dtype == "Int64" and dtype is td64, we dont want
# to consider these as matching
return False
# TODO: better to use can_hold_element?
return is_valid_na_for_dtype(na_value, dtype)
@cache_readonly
def is_na(self) -> bool:
if self.block is None:
return True
if not self.block._can_hold_na:
return False
values = self.block.values
if isinstance(self.block.values.dtype, SparseDtype):
return False
elif self.block.is_extension:
# TODO(EA2D): no need for special case with 2D EAs
values_flat = values
else:
values_flat = values.ravel(order="K")
return isna_all(values_flat)
def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self._is_valid_na_for(empty_dtype):
# note: always holds when self.block is None
blk_dtype = getattr(self.block, "dtype", None)
if blk_dtype == np.dtype("object"):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if is_datetime64tz_dtype(empty_dtype):
i8values = np.full(self.shape, fill_value.value)
return DatetimeArray(i8values, dtype=empty_dtype)
elif is_1d_only_ea_dtype(empty_dtype):
empty_dtype = cast(ExtensionDtype, empty_dtype)
cls = empty_dtype.construct_array_type()
missing_arr = cls._from_sequence([], dtype=empty_dtype)
ncols, nrows = self.shape
assert ncols == 1, ncols
empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
return missing_arr.take(
empty_arr, allow_fill=True, fill_value=fill_value
)
elif isinstance(empty_dtype, ExtensionDtype):
# TODO: no tests get here, a handful would if we disabled
# the dt64tz special-case above (which is faster)
cls = empty_dtype.construct_array_type()
missing_arr = cls._empty(shape=self.shape, dtype=empty_dtype)
missing_arr[:] = fill_value
return missing_arr
else:
# NB: we should never get here with empty_dtype integer or bool;
# if we did, the missing_arr.fill would cast to gibberish
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if (not self.indexers) and (not self.block._can_consolidate):
# preserve these for validation in concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.values
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax)
return values
def _concatenate_join_units(
join_units: list[JoinUnit], concat_axis: int, copy: bool
) -> ArrayLike:
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype = _get_empty_dtype(join_units)
has_none_blocks = any(unit.block is None for unit in join_units)
upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)
to_concat = [
ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)
for ju in join_units
]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy:
if isinstance(concat_values, np.ndarray):
# non-reindexed (=not yet copied) arrays are made into a view
# in JoinUnit.get_reindexed_values
if concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = concat_values.copy()
elif any(is_1d_only_ea_obj(t) for t in to_concat):
# TODO(EA2D): special case not needed if all EAs used HybridBlocks
# NB: we are still assuming here that Hybrid blocks have shape (1, N)
# concatting with at least one EA means we are concatting a single column
# the non-EA values are 2D arrays with shape (1, n)
# error: Invalid index type "Tuple[int, slice]" for
# "Union[ExtensionArray, ndarray]"; expected type "Union[int, slice, ndarray]"
to_concat = [
t if is_1d_only_ea_obj(t) else t[0, :] # type: ignore[index]
for t in to_concat
]
concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True)
concat_values = ensure_block_shape(concat_values, 2)
else:
concat_values = concat_compat(to_concat, axis=concat_axis)
return concat_values
def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):
"""
Find the NA value to go with this dtype.
"""
if isinstance(dtype, ExtensionDtype):
return dtype.na_value
elif dtype.kind in ["m", "M"]:
return dtype.type("NaT")
elif dtype.kind in ["f", "c"]:
return dtype.type("NaN")
elif dtype.kind == "b":
# different from missing.na_value_for_dtype
return None
elif dtype.kind in ["i", "u"]:
if not has_none_blocks:
# different from missing.na_value_for_dtype
return None
return np.nan
elif dtype.kind == "O":
return np.nan
raise NotImplementedError
def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj:
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.dtype(np.float64)
if _is_uniform_reindex(join_units):
# FIXME: integrate property
empty_dtype = join_units[0].block.dtype
return empty_dtype
has_none_blocks = any(unit.block is None for unit in join_units)
dtypes = [
unit.dtype for unit in join_units if unit.block is not None and not unit.is_na
]
if not len(dtypes):
dtypes = [unit.dtype for unit in join_units if unit.block is not None]
dtype = find_common_type(dtypes)
if has_none_blocks:
dtype = ensure_dtype_can_hold_na(dtype)
return dtype
def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
_concatenate_join_units (which uses `concat_compat`).
"""
first = join_units[0].block
if first is None:
return False
return (
# exclude cases where a) ju.block is None or b) we have e.g. Int64+int64
all(type(ju.block) is type(first) for ju in join_units)
and
# e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform
all(
is_dtype_equal(ju.block.dtype, first.dtype)
# GH#42092 we only want the dtype_equal check for non-numeric blocks
# (for now, may change but that would need a deprecation)
or ju.block.dtype.kind in ["b", "i", "u"]
for ju in join_units
)
and
# no blocks that would get missing values (can lead to type upcasts)
# unless we're an extension dtype.
all(not ju.is_na or ju.block.is_extension for ju in join_units)
and
# no blocks with indexers (as then the dimensions do not fit)
all(not ju.indexers for ju in join_units)
and
# only use this path when there is something to concatenate
len(join_units) > 1
)
def _is_uniform_reindex(join_units) -> bool:
return (
# TODO: should this be ju.block._can_hold_na?
all(ju.block and ju.block.is_extension for ju in join_units)
and len({ju.block.dtype.name for ju in join_units}) == 1
)
def _trim_join_unit(join_unit: JoinUnit, length: int) -> JoinUnit:
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)
def _combine_concat_plans(plans, concat_axis: int):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
# singleton list so we can modify it as a side-effect within _next_or_none
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# _trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:], _trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
| [
"copy.copy",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.diff",
"pandas.core.dtypes.missing.is_valid_na_for_dtype",
"itertools.chain.from_iterable",
"pandas.core.dtypes.common.is_1d_only_ea_dtype",
"numpy.empty",
"numpy.concatenate",
"numpy.dtype",
"pandas.core.dtypes.cast.find_commo... | [((2644, 2708), 'pandas.core.internals.array_manager.ArrayManager', 'ArrayManager', (['arrays', '[axes[1], axes[0]]'], {'verify_integrity': '(False)'}), '(arrays, [axes[1], axes[0]], verify_integrity=False)\n', (2656, 2708), False, 'from pandas.core.internals.array_manager import ArrayManager, NullArrayProxy\n'), ((4632, 4657), 'numpy.concatenate', 'np.concatenate', (['to_concat'], {}), '(to_concat)\n', (4646, 4657), True, 'import numpy as np\n'), ((8414, 8468), 'pandas._libs.internals.get_blkno_placements', 'libinternals.get_blkno_placements', (['blknos'], {'group': '(False)'}), '(blknos, group=False)\n', (8447, 8468), True, 'from pandas._libs import NaT, internals as libinternals\n'), ((19898, 19922), 'pandas.core.dtypes.cast.find_common_type', 'find_common_type', (['dtypes'], {}), '(dtypes)\n', (19914, 19922), False, 'from pandas.core.dtypes.cast import ensure_dtype_can_hold_na, find_common_type\n'), ((8037, 8090), 'pandas.core.algorithms.take_nd', 'algos.take_nd', (['mgr.blknos', 'ax0_indexer'], {'fill_value': '(-1)'}), '(mgr.blknos, ax0_indexer, fill_value=-1)\n', (8050, 8090), True, 'import pandas.core.algorithms as algos\n'), ((8109, 8163), 'pandas.core.algorithms.take_nd', 'algos.take_nd', (['mgr.blklocs', 'ax0_indexer'], {'fill_value': '(-1)'}), '(mgr.blklocs, ax0_indexer, fill_value=-1)\n', (8122, 8163), True, 'import pandas.core.algorithms as algos\n'), ((11023, 11058), 'pandas.core.dtypes.cast.ensure_dtype_can_hold_na', 'ensure_dtype_can_hold_na', (['blk.dtype'], {}), '(blk.dtype)\n', (11047, 11058), False, 'from pandas.core.dtypes.cast import ensure_dtype_can_hold_na, find_common_type\n'), ((12266, 12304), 'pandas.core.dtypes.missing.is_valid_na_for_dtype', 'is_valid_na_for_dtype', (['na_value', 'dtype'], {}), '(na_value, dtype)\n', (12287, 12304), False, 'from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna_all\n'), ((12812, 12833), 'pandas.core.dtypes.missing.isna_all', 'isna_all', (['values_flat'], {}), '(values_flat)\n', (12820, 12833), False, 'from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna_all\n'), ((19963, 19994), 'pandas.core.dtypes.cast.ensure_dtype_can_hold_na', 'ensure_dtype_can_hold_na', (['dtype'], {}), '(dtype)\n', (19987, 19994), False, 'from pandas.core.dtypes.cast import ensure_dtype_can_hold_na, find_common_type\n'), ((22239, 22268), 'copy.copy', 'copy.copy', (['join_unit.indexers'], {}), '(join_unit.indexers)\n', (22248, 22268), False, 'import copy\n'), ((2503, 2562), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[mgr.arrays for mgr in mgrs]'], {}), '([mgr.arrays for mgr in mgrs])\n', (2532, 2562), False, 'import itertools\n'), ((3755, 3814), 'pandas.core.dtypes.cast.find_common_type', 'find_common_type', (['[arr.dtype for arr in to_concat_no_proxy]'], {}), '([arr.dtype for arr in to_concat_no_proxy])\n', (3771, 3814), False, 'from pandas.core.dtypes.cast import ensure_dtype_can_hold_na, find_common_type\n'), ((4413, 4451), 'pandas.core.dtypes.concat.cast_to_common_type', 'cast_to_common_type', (['arr', 'target_dtype'], {}), '(arr, target_dtype)\n', (4432, 4451), False, 'from pandas.core.dtypes.concat import cast_to_common_type, concat_compat\n'), ((11954, 11980), 'pandas.core.dtypes.common.needs_i8_conversion', 'needs_i8_conversion', (['dtype'], {}), '(dtype)\n', (11973, 11980), False, 'from pandas.core.dtypes.common import is_1d_only_ea_dtype, is_1d_only_ea_obj, is_datetime64tz_dtype, is_dtype_equal, needs_i8_conversion\n'), ((18148, 18201), 'pandas.core.dtypes.concat.concat_compat', 'concat_compat', (['to_concat'], {'axis': '(0)', 'ea_compat_axis': '(True)'}), '(to_concat, axis=0, ea_compat_axis=True)\n', (18161, 18201), False, 'from pandas.core.dtypes.concat import cast_to_common_type, concat_compat\n'), ((18226, 18262), 'pandas.core.internals.blocks.ensure_block_shape', 'ensure_block_shape', (['concat_values', '(2)'], {}), '(concat_values, 2)\n', (18244, 18262), False, 'from pandas.core.internals.blocks import ensure_block_shape, new_block\n'), ((18298, 18340), 'pandas.core.dtypes.concat.concat_compat', 'concat_compat', (['to_concat'], {'axis': 'concat_axis'}), '(to_concat, axis=concat_axis)\n', (18311, 18340), False, 'from pandas.core.dtypes.concat import cast_to_common_type, concat_compat\n'), ((19430, 19450), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (19438, 19450), True, 'import numpy as np\n'), ((6811, 6849), 'pandas.core.construction.ensure_wrapped_if_datetimelike', 'ensure_wrapped_if_datetimelike', (['values'], {}), '(values)\n', (6841, 6849), False, 'from pandas.core.construction import ensure_wrapped_if_datetimelike\n'), ((11653, 11686), 'pandas.core.dtypes.common.is_dtype_equal', 'is_dtype_equal', (['self.dtype', 'dtype'], {}), '(self.dtype, dtype)\n', (11667, 11686), False, 'from pandas.core.dtypes.common import is_1d_only_ea_dtype, is_1d_only_ea_obj, is_datetime64tz_dtype, is_dtype_equal, needs_i8_conversion\n'), ((13711, 13745), 'pandas.core.dtypes.common.is_datetime64tz_dtype', 'is_datetime64tz_dtype', (['empty_dtype'], {}), '(empty_dtype)\n', (13732, 13745), False, 'from pandas.core.dtypes.common import is_1d_only_ea_dtype, is_1d_only_ea_obj, is_datetime64tz_dtype, is_dtype_equal, needs_i8_conversion\n'), ((16225, 16264), 'pandas.core.algorithms.take_nd', 'algos.take_nd', (['values', 'indexer'], {'axis': 'ax'}), '(values, indexer, axis=ax)\n', (16238, 16264), True, 'import pandas.core.algorithms as algos\n'), ((17502, 17522), 'pandas.core.dtypes.common.is_1d_only_ea_obj', 'is_1d_only_ea_obj', (['t'], {}), '(t)\n', (17519, 17522), False, 'from pandas.core.dtypes.common import is_1d_only_ea_dtype, is_1d_only_ea_obj, is_datetime64tz_dtype, is_dtype_equal, needs_i8_conversion\n'), ((6548, 6587), 'numpy.concatenate', 'np.concatenate', (['vals'], {'axis': '(blk.ndim - 1)'}), '(vals, axis=blk.ndim - 1)\n', (6562, 6587), True, 'import numpy as np\n'), ((6699, 6726), 'pandas.core.dtypes.concat.concat_compat', 'concat_compat', (['vals'], {'axis': '(1)'}), '(vals, axis=1)\n', (6712, 6726), False, 'from pandas.core.dtypes.concat import cast_to_common_type, concat_compat\n'), ((6752, 6788), 'pandas.core.internals.blocks.ensure_block_shape', 'ensure_block_shape', (['values', 'blk.ndim'], {}), '(values, blk.ndim)\n', (6770, 6788), False, 'from pandas.core.internals.blocks import ensure_block_shape, new_block\n'), ((11510, 11541), 'pandas.core.dtypes.missing.is_valid_na_for_dtype', 'is_valid_na_for_dtype', (['x', 'dtype'], {}), '(x, dtype)\n', (11531, 11541), False, 'from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna_all\n'), ((13344, 13362), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (13352, 13362), True, 'import numpy as np\n'), ((13778, 13815), 'numpy.full', 'np.full', (['self.shape', 'fill_value.value'], {}), '(self.shape, fill_value.value)\n', (13785, 13815), True, 'import numpy as np\n'), ((13843, 13885), 'pandas.core.arrays.DatetimeArray', 'DatetimeArray', (['i8values'], {'dtype': 'empty_dtype'}), '(i8values, dtype=empty_dtype)\n', (13856, 13885), False, 'from pandas.core.arrays import DatetimeArray, ExtensionArray\n'), ((13908, 13940), 'pandas.core.dtypes.common.is_1d_only_ea_dtype', 'is_1d_only_ea_dtype', (['empty_dtype'], {}), '(empty_dtype)\n', (13927, 13940), False, 'from pandas.core.dtypes.common import is_1d_only_ea_dtype, is_1d_only_ea_obj, is_datetime64tz_dtype, is_dtype_equal, needs_i8_conversion\n'), ((18026, 18046), 'pandas.core.dtypes.common.is_1d_only_ea_obj', 'is_1d_only_ea_obj', (['t'], {}), '(t)\n', (18043, 18046), False, 'from pandas.core.dtypes.common import is_1d_only_ea_dtype, is_1d_only_ea_obj, is_datetime64tz_dtype, is_dtype_equal, needs_i8_conversion\n'), ((20646, 20689), 'pandas.core.dtypes.common.is_dtype_equal', 'is_dtype_equal', (['ju.block.dtype', 'first.dtype'], {}), '(ju.block.dtype, first.dtype)\n', (20660, 20689), False, 'from pandas.core.dtypes.common import is_1d_only_ea_dtype, is_1d_only_ea_obj, is_datetime64tz_dtype, is_dtype_equal, needs_i8_conversion\n'), ((13976, 14009), 'typing.cast', 'cast', (['ExtensionDtype', 'empty_dtype'], {}), '(ExtensionDtype, empty_dtype)\n', (13980, 14009), False, 'from typing import TYPE_CHECKING, Sequence, cast\n'), ((14276, 14308), 'numpy.ones', 'np.ones', (['(nrows,)'], {'dtype': 'np.intp'}), '((nrows,), dtype=np.intp)\n', (14283, 14308), True, 'import numpy as np\n'), ((15111, 15150), 'numpy.empty', 'np.empty', (['self.shape'], {'dtype': 'empty_dtype'}), '(self.shape, dtype=empty_dtype)\n', (15119, 15150), True, 'import numpy as np\n'), ((9646, 9670), 'numpy.diff', 'np.diff', (['ax0_blk_indexer'], {}), '(ax0_blk_indexer)\n', (9653, 9670), True, 'import numpy as np\n')] |
"""Classes to describe clusters and cluster catalogs.
This file contains the main cluster and cluster catalog classes, including
richness computation, etc.
"""
import fitsio
import esutil
import numpy as np
import itertools
import scipy.optimize
import scipy.integrate
import copy
from .solver_nfw import Solver
from .catalog import Catalog, Entry
from .utilities import chisq_pdf, calc_theta_i, MStar, schechter_pdf, nfw_pdf
from .mask import HPMask
from .redsequence import RedSequenceColorPar
from esutil.cosmology import Cosmo
from .galaxy import GalaxyCatalog
from .depth_fitting import calcErrorModel
cluster_dtype_base = [('MEM_MATCH_ID', 'i4'),
('RA', 'f8'),
('DEC', 'f8'),
('Z', 'f4'),
('REFMAG', 'f4'),
('REFMAG_ERR', 'f4'),
('LAMBDA', 'f4'),
('LAMBDA_E', 'f4'),
('Z_LAMBDA', 'f4'),
('Z_LAMBDA_E', 'f4'),
('CG_SPEC_Z', 'f4'),
('Z_SPEC_INIT', 'f4'),
('Z_INIT', 'f4'),
('R_LAMBDA', 'f4'),
('R_MASK', 'f4'),
('SCALEVAL', 'f4'),
('MASKFRAC', 'f4'),
('ZRED', 'f4'),
('ZRED_E', 'f4'),
('ZRED_CHISQ', 'f4'),
('CHISQ', 'f4'),
('Z_LAMBDA_NITER', 'i2'),
('EBV_MEAN', 'f4'),
('LNLAMLIKE', 'f4'),
('LNCGLIKE', 'f4'),
('LNLIKE', 'f4'),
('RA_ORIG', 'f8'),
('DEC_ORIG', 'f8'),
('W', 'f4'),
('DLAMBDA_DZ', 'f4'),
('DLAMBDA_DZ2', 'f4'),
('DLAMBDAVAR_DZ', 'f4'),
('DLAMBDAVAR_DZ2', 'f4'),
('Z_LAMBDA_RAW', 'f4'),
('Z_LAMBDA_E_RAW', 'f4'),
('BKG_LOCAL', 'f4'),
('LIM_EXPTIME', 'f4'),
('LIM_LIMMAG', 'f4'),
('LIM_LIMMAG_HARD', 'f4'),
('LAMBDA_C', 'f4'),
('LAMBDA_CE', 'f4'),
('NCENT_GOOD', 'i2'),
('MASKGAL_INDEX', 'i2')]
member_dtype_base = [('MEM_MATCH_ID', 'i4'),
('ID', 'i8'),
('Z', 'f4'),
('RA', 'f8'),
('DEC', 'f8'),
('R', 'f4'),
('P', 'f4'),
('PFREE', 'f4'),
('PCOL', 'f4'),
('THETA_I', 'f4'),
('THETA_R', 'f4'),
('REFMAG', 'f4'),
('REFMAG_ERR', 'f4'),
('ZRED', 'f4'),
('ZRED_E', 'f4'),
('ZRED_CHISQ','f4'),
('CHISQ', 'f4'),
('EBV', 'f4'),
('ZSPEC', 'f4')]
class Cluster(Entry):
"""
Class for a single galaxy cluster.
This class includes methods to perform richness computations on individual clusters using the associated neighbor galaxy catalog.
"""
def __init__(self, cat_vals=None, r0=None, beta=None, config=None, zredstr=None, bkg=None, cbkg=None, neighbors=None, zredbkg=None, dtype=None):
"""
Instantiate a Cluster object.
Note that while all the associated config, zredstr, bkg, neighbors are
optional to instantiate the object, they are necessary in order to
perform any calculations (but not to be able to store a representation
of the cluster catalog.) This also allows these to be set "lazily"
after instantiation.
Parameters
----------
cat_vals: `np.ndarray`, optional
Existing catalog values to pre-fill. Default is None (zeros).
r0: `float`, optional
Richness/radius scale parameter. Default to 1.0 h^-1 Mpc.
beta: `float`, optional
Richness/radius slope parameter. Default to 0.2.
config: `redmapper.Configuration`, optional
Configuration information. Default is None.
zredstr: `redmapper.RedSequenceColorPar`, optional
Red sequence parameterization. Default is None.
bkg: `redmapper.Background`, optional
Galaxy background. Default is None.
cbkg: `redmapper.ColorBackground`, optional
Galaxy color-only background. Default is None.
neighbors: `redmapper.GalaxyCatalog`, optional
Neighbor galaxy catalog. Default is None.
zredbkg: `redmapper.ZredBackground`, optional
Zred background. Default is None.
"""
if cat_vals is None:
if dtype is not None:
cat_vals = np.zeros(1, dtype=dtype)
else:
if config is not None:
cat_vals = np.zeros(1, dtype=config.cluster_dtype)
else:
# This might lead to bugs down the line, but let's try
cat_vals = np.zeros(1, dtype=cluster_dtype_base)
# Start by taking catalog values and stuffing them into a nice Entry format
# we need to extend if necessary? Or just the catalog?
super(Cluster, self).__init__(cat_vals)
self.r0 = 1.0 if r0 is None else r0
self.beta = 0.2 if beta is None else beta
# FIXME: this should explicitly set our default cosmology
if config is None:
self.cosmo = Cosmo()
else:
self.cosmo = config.cosmo
self.config = config
self.zredstr = zredstr
self.bkg = bkg
self.cbkg = cbkg
self.zredbkg = zredbkg
self.set_neighbors(neighbors)
self._mstar = None
self._mpc_scale = None
if self.z > 0.0 and self.zredstr is not None:
self.redshift = self.z
else:
self._redshift = None
def reset(self):
"""
Reset the cluster richness and redshift to -1.0.
"""
# reset values to defaults
self.Lambda = -1.0
self.z_lambda = -1.0
def set_neighbors(self, neighbors):
"""
Set the neighbor galaxy catalog from a list of neighbors. The input
neighbor catalog is deep-copied.
Parameters
----------
neighbors: `redmapper.GalaxyCatalog`
"""
if (neighbors.__class__ is not GalaxyCatalog and neighbors is not None):
raise ValueError("Cluster neighbors must be a GalaxyCatalog")
self.neighbors = None
if (neighbors is not None):
# @jacobic: this avoid pycharm debugger detachment! self.neighbors = copy.deepcopy(neighbors)
self.neighbors = GalaxyCatalog(neighbors._ndarray.copy())
# extra fields
neighbor_extra_dtype = [('R', 'f8'),
('DIST', 'f8'),
('CHISQ', 'f8'),
('ZRED_CHISQ', 'f8'),
('PFREE', 'f8'),
('THETA_I', 'f8'),
('THETA_R', 'f8'),
('P', 'f8'),
('PCOL', 'f8'),
('PMEM', 'f8'),
('INDEX', 'i8'),
('CENTERING_CAND', 'i2')]
dtype_augment = [dt for dt in neighbor_extra_dtype if dt[0].lower() not in self.neighbors.dtype.names]
if len(dtype_augment) > 0:
self.neighbors.add_fields(dtype_augment)
if 'PFREE' in [dt[0] for dt in dtype_augment]:
# The PFREE is new, so we must set it to 1s
self.neighbors.pfree[:] = 1.0
if ('ZRED_CHISQ', 'f8') in dtype_augment:
# If we've had to add this, we want to copy the chisq values
# since they were from the "zred" side
self.neighbors.zred_chisq = self.neighbors.chisq
def find_neighbors(self, radius, galcat, megaparsec=False, maxmag=None):
"""
Find neighbors from a full galaxy catalog.
Parameters
----------
radius: `float`
Radius in degrees or megaparsec to search for neighbors
galcat: `redmapper.GalaxyCatalog`
Full catalog of galaxies to look for neighbors
megaparsec: `bool`, optional
The radius has units of megaparsec? Default is False.
maxmag: `float`, optional
The maximum refmag to store the neighbors. Default is None (no cuts).
"""
if radius is None:
raise ValueError("A radius must be specified")
if galcat is None:
raise ValueError("A GalaxyCatalog object must be specified.")
if megaparsec:
radius_degrees = radius / self.mpc_scale
else:
radius_degrees = radius
indices, dists = galcat.match_one(self.ra, self.dec, radius_degrees)
if maxmag is not None:
use, = np.where(galcat.refmag[indices] <= maxmag)
indices = indices[use]
dists = dists[use]
self.set_neighbors(galcat[indices])
self.neighbors.dist = dists
self.neighbors.index = indices
# And we need to compute the r values here
self._compute_neighbor_r()
def update_neighbors_dist(self):
"""
Update the distance from the neighbors to the central galaxy (in degrees)
"""
self.neighbors.dist = esutil.coords.sphdist(self.ra, self.dec,
self.neighbors.ra, self.neighbors.dec)
self._compute_neighbor_r()
def clear_neighbors(self):
"""
Clear out all neighbors, to save memory.
"""
# Clear out the memory used by the neighbors.
self.neighbors = None
def _calc_radial_profile(self, idx=None, rscale=0.15):
"""
Internal method for computing radial profile weights.
Parameters
----------
idx: `np.array`, optional
Integer indices to compute. Default is None (all).
rscale: `float`, optional
r_s for nfw profile. Default is 0.15
Returns
-------
sigx: `np.array`
sigma(x) from radial profile
"""
if idx is None:
idx = np.arange(len(self.neighbors))
sigx = nfw_pdf(self.neighbors.r[idx], rscale=rscale)
return sigx
def _calc_luminosity(self, normmag, idx=None):
"""
Internal method to compute luminosity filter
Parameters
----------
normmag: `float`
Normalization magnitude
idx: `np.array`, optional
Integer indices to compute. Default is None (all).
Returns
-------
phi: `np.array`
phi(x) for the cluster
"""
if idx is None:
idx = np.arange(len(self.neighbors))
zind = self.zredstr.zindex(self._redshift)
refind = self.zredstr.lumrefmagindex(normmag)
normalization = self.zredstr.lumnorm[refind, zind]
mstar = self.zredstr.mstar(self._redshift)
phi = schechter_pdf(self.neighbors.refmag[idx], alpha=self.zredstr.alpha, mstar=mstar)
return phi / normalization
def calc_bkg_density(self, r, chisq, refmag):
"""
Internal method to compute background filter.
Parameters
----------
r: `np.array`
Radius (megaparsec)
chisq: `np.array`
Chi-squared values at redshift of the cluster
refmag: `np.array`
Reference magnitude of the galaxies
Returns
-------
bcounts: `np.array`
b(x) for the neighbors
"""
sigma_g = self.bkg.sigma_g_lookup(self._redshift, chisq, refmag)
return 2. * np.pi * r * (sigma_g/self.mpc_scale**2.)
def calc_cbkg_density(self, r, col_index, col, refmag):
"""
Internal method to compute color background filter.
Parameters
----------
r: `np.array`
Radius (megaparsec)
col_index: `int`
Index of color used
col: `np.array`
Float array of colors
refmag: `np.array`
Reference magnitude of galaxies
Returns
-------
bcounts: `np.array`
b(x) for the neighbors
"""
sigma_g = self.cbkg.sigma_g_diagonal(col_index, col, refmag)
return 2. * np.pi * r * (sigma_g / self.mpc_scale**2.)
def calc_zred_bkg_density(self, r, zred, refmag):
"""
Internal method to compute zred background filter
Parameters
----------
r: `np.array`
Radius (megaparsec)
zred: `np.array`
Zred values
refmag: `np.array`
Reference magnitude of galaxies
Returns
-------
bcounts: `np.array`
b(x) for the neighbors
"""
if self.zredbkg is None:
raise AttributeError("zredbkg has not been set for this cluster")
sigma_g = self.zredbkg.sigma_g_lookup(zred, refmag)
return 2. * np.pi * r * (sigma_g / self.mpc_scale**2.)
def compute_bkg_local(self, mask, depth):
"""
Compute the local background relative to the global.
Parameters
----------
mask: `redmapper.Mask`
Footprint mask for survey
depth: `redmapper.Depthmap` or `redmapper.Depthlim`
Depth map for survey or depth fitting class
Returns
-------
bkg_local: `float`
Local background relative to global average for redshift
"""
ras = self.ra + (mask.maskgals.x_uniform/self.mpc_scale)/np.cos(np.deg2rad(self.dec))
decs = self.dec + mask.maskgals.y_uniform/self.mpc_scale
maxmag = self.mstar - 2.5*np.log10(self.config.lval_reference)
maskgals_mark = mask.compute_radmask(ras, decs)
maskgals_refmag = self.mstar + mask.maskgals.m
try:
maskgals_depth = depth.get_depth_values(ras, decs)[0]
except AttributeError:
# We have a "Depthlim" limit.
limpars, fail = calcErrorModel(self.neighbors.refmag, self.neighbors.refmag_err, calcErr=False)
if fail:
maskgals_depth = depth.initpars['LIMMAG']
else:
maskgals_depth = limpars['LIMMAG']
sigma_g_maskgals = self.bkg.sigma_g_lookup(self._redshift, mask.maskgals.chisq, mask.maskgals.refmag)
bright_enough, = np.where((mask.maskgals.refmag < maxmag) & (np.isfinite(sigma_g_maskgals))
&
(mask.maskgals.chisq_pdf > 0.0) & (mask.maskgals.lum_pdf > 0.0) &
(mask.maskgals.refmag < maskgals_depth))
# Predicted number density according to global bkg
prediction = np.sum(sigma_g_maskgals[bright_enough].astype(np.float64) / (mask.maskgals.chisq_pdf[bright_enough].astype(np.float64) * mask.maskgals.lum_pdf[bright_enough].astype(np.float64))) / float(bright_enough.size)
# What is the annulus area?
in_annulus, = np.where((mask.maskgals.r_uniform > self.config.bkg_local_annuli[0]) &
(mask.maskgals.r_uniform < self.config.bkg_local_annuli[1]))
in_annulus_gd, = np.where(maskgals_mark[in_annulus])
annulus_area = (np.pi*((self.config.bkg_local_annuli[1]/self.mpc_scale)**2. -
(self.config.bkg_local_annuli[0]/self.mpc_scale)**2.) *
(float(in_annulus_gd.size) / float(in_annulus.size)))
try:
neighbors_depth = depth.get_depth_values(self.neighbors.ra, self.neighbors.dec)[0]
except AttributeError:
neighbors_depth = maskgals_depth
neighbors_in_annulus, = np.where((self.neighbors.r > self.config.bkg_local_annuli[0]) &
(self.neighbors.r < self.config.bkg_local_annuli[1]) &
(self.neighbors.refmag < maxmag) &
(self.neighbors.chisq < mask.maskgals.chisq.max()) &
(self.neighbors.refmag < neighbors_depth))
bkg_density_in_annulus = float(neighbors_in_annulus.size) / annulus_area
bkg_local = bkg_density_in_annulus / prediction
return bkg_local
def calc_richness(self, mask, calc_err=True, index=None):
"""
Calculate the richness for the cluster.
Parameters
----------
mask: `redmapper.Mask`
Footprint mask for survey
calc_err: `bool`, optional
Calculate the richness error? Default is True.
index: `np.array`, optional
Integer array of neighbor indices. Default is None (all).
Returns
-------
lam: `float`
Cluster richness. Will be < 0 when no cluster found.
"""
#set index for slicing self.neighbors
if index is not None:
idx = index
else:
idx = np.arange(len(self.neighbors))
maxmag = self.mstar - 2.5 * np.log10(self.config.lval_reference)
self.neighbors.chisq[idx] = self.zredstr.calculate_chisq(self.neighbors[idx], self._redshift)
rho = chisq_pdf(self.neighbors.chisq[idx], self.zredstr.ncol)
nfw = self._calc_radial_profile(idx=idx)
phi = self._calc_luminosity(maxmag, idx=idx) #phi is lumwt in the IDL code
ucounts = (2*np.pi*self.neighbors.r[idx]) * nfw * phi * rho
bcounts = self.calc_bkg_density(self.neighbors.r[idx], self.neighbors.chisq[idx],
self.neighbors.refmag[idx])
theta_i = calc_theta_i(self.neighbors.refmag[idx], self.neighbors.refmag_err[idx],
maxmag, self.zredstr.limmag)
cpars = mask.calc_maskcorr(self.mstar, maxmag, self.zredstr.limmag)
try:
w = theta_i * self.neighbors.pfree[idx]
except AttributeError:
w = theta_i * np.ones_like(ucounts)
richness_obj = Solver(self.r0, self.beta, ucounts, bcounts,
self.neighbors.r[idx], w,
cpars=cpars, rsig=self.config.rsig)
# Call the solving routine
# this returns five items: lam_obj, p, pmem, rlam, theta_r
# Note that pmem used to be called "wvals" in IDL code
# pmem = p * pfree * theta_i * theta_r
lam, p, pmem, rlam, theta_r = richness_obj.solve_nfw()
# reset before setting subsets
self.neighbors.theta_i[:] = 0.0
self.neighbors.theta_r[:] = 0.0
self.neighbors.p[:] = 0.0
self.neighbors.pcol[:] = 0.0
self.neighbors.pmem[:] = 0.0
# This also checks for crazy invalid values
if lam < 0.0 or pmem.max() == 0.0:
lam = -1.0
lam_err = -1.0
self.scaleval = -1.0
else:
# Only do this computation if we have a valid measurement
bar_pmem = np.sum(pmem**2.0)/np.sum(pmem)
cval = np.clip(np.sum(cpars * rlam**np.arange(cpars.size, dtype=float)),
0.0, None)
self.scaleval = np.absolute(lam / np.sum(pmem))
lam_unscaled = lam / self.scaleval
if calc_err:
lam_cerr = self.calc_lambdacerr(mask.maskgals, self.mstar,
lam, rlam, pmem, cval, self.config.dldr_gamma)
lam_err = np.sqrt((1-bar_pmem) * lam_unscaled * self.scaleval**2. + lam_cerr**2.)
# calculate pcol -- color only. Don't need to worry about nfw norm!
ucounts = rho*phi
pcol = ucounts * lam/(ucounts * lam + bcounts)
bad, = np.where((self.neighbors.r[idx] > rlam) | (self.neighbors.refmag[idx] > maxmag) |
(self.neighbors.refmag[idx] > self.zredstr.limmag) | (~np.isfinite(pcol)))
pcol[bad] = 0.0
# and set the values
self.neighbors.theta_i[idx] = theta_i
self.neighbors.theta_r[idx] = theta_r
self.neighbors.p[idx] = p
self.neighbors.pcol[idx] = pcol
self.neighbors.pmem[idx] = pmem
# set values and return
self.Lambda = lam
self.r_lambda = rlam
if calc_err:
self.Lambda_e = lam_err
else:
self.Lambda_e = 0.0
return lam
def calc_lambdacerr(self, maskgals, mstar, lam, rlam, pmem, cval, gamma):
"""
Calculate richness error from masking only.
Parameters
----------
maskgals: `redmapper.Catalog`
Mask galaxies for monte carlo (see `redmapper.Mask`)
mstar: `float`
mstar at redshift of the cluster
lam: `float`
Richness of cluster
rlam: `float`
Radius of cluster
pmem: `np.array`
Array of pmem membership probabilities
cval: `float`
Total mask value c
gamma: `float`
Slope of the dlambda/dradius relation (on average)
Returns
-------
lam_err: `float`
Error on richness due to masking.
"""
dof = self.zredstr.ncol
limmag = self.zredstr.limmag
use, = np.where(maskgals.r < rlam)
mark = maskgals.mark[use]
refmag = mstar + maskgals.m[use]
cwt = maskgals.cwt[use]
nfw = maskgals.nfw[use]
lumwt = maskgals.lumwt[use]
chisq = maskgals.chisq[use]
r = maskgals.r[use]
# normalizing nfw
logrc = np.log(rlam)
norm = np.exp(1.65169 - 0.547850*logrc + 0.138202*logrc**2. -
0.0719021*logrc**3. - 0.0158241*logrc**4.-0.000854985*logrc**5.)
nfw = norm*nfw
ucounts = cwt*nfw*lumwt
#Set too faint galaxy magnitudes close to limiting magnitude
faint, = np.where(refmag >= limmag)
refmag_for_bcounts = np.copy(refmag)
refmag_for_bcounts[faint] = limmag-0.01
bcounts = self.calc_bkg_density(r, chisq, refmag_for_bcounts)
out, = np.where((refmag > limmag) | (mark == 0))
if out.size == 0 or cval < 0.01:
lam_err = 0.0
else:
p_out = lam*ucounts[out] / (lam*ucounts[out] + bcounts[out])
varc0 = (1./lam) * (1./use.size) * np.sum(p_out)
sigc = np.sqrt(varc0 - varc0**2.)
k = lam**2. / np.sum(pmem**2.)
lam_err = k*sigc/(1. - self.beta*gamma)
return lam_err
def calc_richness_fit(self, mask, col_index, centcolor_in=None, rcut=0.5, mingal=5, sigint=0.05, calc_err=False):
"""
Compute richness for a cluster by fitting the red sequence in a single color.
This is approximate and is used for the first iteration of training.
Parameters
----------
mask: `redmapper.Mask`
Footprint mask for survey
col_index: `int`
Index of color to use
centcolor_in: `float`, optional
Central color to use for reference. Default is None (use BCG color)
rcut: `float`, optional
Maximum radius (megaparsec). Default is 0.5.
mingal: `int`, optional
Minimum number of galaxies to try fit. Default is 5.
sigint: `float`, optional
Default intrinsic scatter. Default is 0.05.
calc_err: `bool`, optional
Compute richness error? Default is False.
Returns
-------
lam: `float`
cluster richness. Will be < 0 when no cluster found.
"""
badlam = -10.0
s2p = np.sqrt(2. * np.pi)
maxmag = self.mstar - 2.5 * np.log10(self.config.lval_reference)
minmag = self.mstar - 2.5 * np.log10(20.0)
col = self.neighbors.galcol[:, col_index]
col_err = self.neighbors.galcol_err[:, col_index]
colrange = self.cbkg.get_colrange(col_index)
guse, = np.where((self.neighbors.refmag > minmag) &
(self.neighbors.refmag < maxmag) &
(self.neighbors.r < rcut) &
(col > colrange[0]) &
(col < colrange[1]))
if guse.size < mingal:
self.scaleval = -1.0
return badlam
if centcolor_in is None:
# We were not passed a probably central color
# Use the BCG (central) color as the peak guess
ind = np.argmin(self.neighbors.r)
test, = np.where(guse == ind)
if test.size == 0:
# Nominal BCG not in color range. All bad!
return badlam
centcolor = col[ind]
else:
centcolor = centcolor_in
cerr = np.sqrt(col_err**2. + sigint**2.)
in2sig, = np.where((np.abs(col[guse] - centcolor) < 2. * cerr[guse]))
if in2sig.size < mingal:
self.scaleval = -1.0
return badlam
pivot = np.median(self.neighbors.refmag[guse])
fit = np.polyfit(self.neighbors.refmag[guse[in2sig]] - pivot,
col[guse[in2sig]],
1,
w=1. / col_err[guse[in2sig]])
mpivot = fit[0]
bpivot = fit[1]
d = col - (mpivot * (self.neighbors.refmag - pivot) + bpivot)
d_err_net = np.sqrt(col_err**2. + sigint**2.)
d_wt = (1. / (s2p * d_err_net)) * np.exp(-(d**2.) / (2. * d_err_net**2.))
# The nfw filter as normal
nfw = self._calc_radial_profile()
theta_i = calc_theta_i(self.neighbors.refmag, self.neighbors.refmag_err, maxmag, self.config.limmag_catalog)
phi = self._calc_luminosity(maxmag)
ucounts = (2. * np.pi * self.neighbors.r) * d_wt * nfw * phi
bcounts = self.calc_cbkg_density(self.neighbors.r, col_index, col, self.neighbors.refmag)
cpars = mask.calc_maskcorr(self.mstar, maxmag, self.config.limmag_catalog)
try:
w = theta_i * self.neighbors.pfree
except AttributeError:
w = theta_i * np.ones_like(ucounts)
richness_obj = Solver(self.r0, self.beta, ucounts, bcounts, self.neighbors.r, w, cpars=cpars)
lam, p, pmem, rlam, theta_r = richness_obj.solve_nfw()
bar_pmem = np.sum(pmem**2.) / np.sum(pmem)
# reset
self.neighbors.theta_i[:] = 0.0
self.neighbors.theta_r[:] = 0.0
self.neighbors.p[:] = 0.0
self.neighbors.pcol[:] = 0.0
self.neighbors.pmem[:] = 0.0
if lam < 0.0:
lam_err = -1.0
self.scaleval = -1.0
else:
self.scaleval = np.absolute(lam / np.sum(pmem))
lam_unscaled = lam / self.scaleval
if calc_err:
lam_cerr = self.calc_lambdacerr(mask.maskgals, self.mstar,
lam, rlam, pmem, cval, self.config.dldr_gamma)
lam_err = np.sqrt((1. - bar_pmem) * lam_unscaled + lambda_cerr**2.)
# calculate pcol (color only)
ucounts = d_wt * phi
bcounts = (bcounts / (2. * np.pi * self.neighbors.r)) * np.pi * rlam**2.
pcol = ucounts * lam / (ucounts * lam + bcounts)
bad, = np.where((self.neighbors.r > rlam) | (self.neighbors.refmag > maxmag) |
(self.neighbors.refmag > self.config.limmag_catalog))
pcol[bad] = 0.0
# And set the values
self.neighbors.theta_i[:] = theta_i
self.neighbors.theta_r[:] = theta_r
self.neighbors.p[:] = p
self.neighbors.pcol[:] = pcol
self.neighbors.pmem[:] = pmem
# Set values and return
self.Lambda = lam
self.r_lambda = rlam
if calc_err:
self.Lambda_e = lam_err
else:
self.Lambda_e = 0.0
return lam
@property
def redshift(self):
"""Current cluster redshift."""
return self._redshift
@redshift.setter
def redshift(self, value):
"""Set the cluster redshift.
Parameters
----------
value: `float`
New redshift
"""
if (value < 0.0):
raise ValueError("Cannot set redshift to < 0.0")
# This forces things to not blow up...
self._redshift = np.clip(value, 0.01, None)
self._update_mstar()
self._update_mpc_scale()
self._compute_neighbor_r()
# want to change this and mpc_scale to properties,
# and internal update methods. When you update the redshift,
# all of these things should be kept in sync. That would be pretty cool.
@property
def mstar(self):
"""Get the cluster mstar at self.redshift"""
return self._mstar
def _update_mstar(self):
"""Update the stored mstar based on self.redshift"""
self._mstar = self.zredstr.mstar(self._redshift)
@property
def mpc_scale(self):
"""
Angular scaling in units of Mpc / degree
"""
return self._mpc_scale
def _update_mpc_scale(self):
"""
Compute the scaling, in units of Mpc / degree
"""
self._mpc_scale = np.radians(1.) * self.cosmo.Da(0, self._redshift)
def _compute_neighbor_r(self):
"""
Compute the radius in Mpc for the neighbors, given the current redshift
and neighbor dist (in degrees).
"""
if self.neighbors is not None and self._redshift is not None:
# Clipping at 1e-6 to avoid singularities.
self.neighbors.r = np.clip(self.mpc_scale * self.neighbors.dist, 1e-6, None)
def copy(self):
"""
Copy the current cluster, including deep copying the neighbors.
Returns
-------
cluster: `redmapper.Cluster`
Deep copy of the cluster
"""
cluster = self.__copy__()
cluster.redshift = self.redshift
return cluster
def __copy__(self):
# This returns a copy of the cluster, and note that the neighbors will
# be deepcopied which is what we want.
return Cluster(r0=self.r0,
beta=self.beta,
config=self.config,
zredstr=self.zredstr,
bkg=self.bkg,
cbkg=self.cbkg,
neighbors=self.neighbors)
class ClusterCatalog(Catalog):
"""
Class for a catalog of clusters.
This class comprises a set of clusters and their neighbors.
"""
entry_class = Cluster
def __init__(self, array, **kwargs):
"""
Instantiate a ClusterCatalog
Parameters
----------
array: `np.ndarray`
Array of initialization values for the cluster catalog
r0: `float`, optional
Richness/radius scale parameter. Default to 1.0 h^-1 Mpc.
beta: `float`, optional
Richness/radius slope parameter. Default to 0.2.
config: `redmapper.Configuration`, optional
Configuration information. Default is None.
zredstr: `redmapper.RedSequenceColorPar`, optional
Red sequence parameterization. Default is None.
bkg: `redmapper.Background`, optional
Galaxy background. Default is None.
cbkg: `redmapper.ColorBackground`, optional
Galaxy color-only background. Default is None.
zredbkg: `redmapper.ZredBackground`, optional
Zred background. Default is None.
"""
super(ClusterCatalog, self).__init__(array)
self.r0 = kwargs.pop('r0', None)
self.beta = kwargs.pop('beta', None)
self.zredstr = kwargs.pop('zredstr', None)
self.config = kwargs.pop('config', None)
self.bkg = kwargs.pop('bkg', None)
self.cbkg = kwargs.pop('cbkg', None)
self.zredbkg = kwargs.pop('zredbkg', None)
dtype = kwargs.pop('dtype', None)
if dtype is not None:
cluster_dtype = dtype
else:
if self.config is not None:
cluster_dtype = self.config.cluster_dtype
else:
cluster_dtype = cluster_dtype_base
# and if config is set then use that cluster_dtype because that
# will have all the other stuff filled as well.
dtype_augment = [dt for dt in cluster_dtype if dt[0].lower() not in self._ndarray.dtype.names]
if len(dtype_augment) > 0:
self.add_fields(dtype_augment)
@classmethod
def from_catfile(cls, filename, **kwargs):
"""
Instantiate a ClusterCatalog from a catalog file
Parameters
----------
filename: `str`
Filename of catalog file
r0: `float`, optional
Richness/radius scale parameter. Default to 1.0 h^-1 Mpc.
beta: `float`, optional
Richness/radius slope parameter. Default to 0.2.
config: `redmapper.Configuration`, optional
Configuration information. Default is None.
zredstr: `redmapper.RedSequenceColorPar`, optional
Red sequence parameterization. Default is None.
bkg: `redmapper.Background`, optional
Galaxy background. Default is None.
cbkg: `redmapper.ColorBackground`, optional
Galaxy color-only background. Default is None.
zredbkg: `redmapper.ZredBackground`, optional
Zred background. Default is None.
"""
cat = fitsio.read(filename, ext=1, upper=True)
return cls(cat, **kwargs)
@classmethod
def zeros(cls, size, **kwargs):
"""
Instantiate a ClusterCatalog of a given size, filled with zeros.
Parameters
----------
size: `int`
Length of cluster catalog
r0: `float`, optional
Richness/radius scale parameter. Default to 1.0 h^-1 Mpc.
beta: `float`, optional
Richness/radius slope parameter. Default to 0.2.
config: `redmapper.Configuration`, optional
Configuration information. Default is None.
zredstr: `redmapper.RedSequenceColorPar`, optional
Red sequence parameterization. Default is None.
bkg: `redmapper.Background`, optional
Galaxy background. Default is None.
cbkg: `redmapper.ColorBackground`, optional
Galaxy color-only background. Default is None.
zredbkg: `redmapper.ZredBackground`, optional
Zred background. Default is None.
"""
dtype = kwargs.get('dtype', None)
if dtype is not None:
cluster_dtype = dtype
else:
cluster_dtype = cluster_dtype_base
return cls(np.zeros(size, dtype=cluster_dtype), **kwargs)
def __getitem__(self, key):
if isinstance(key, int):
# Note that if we have members, we can associate them with the cluster
# here.
return Cluster(cat_vals=self._ndarray.__getitem__(key),
r0=self.r0,
beta=self.beta,
zredstr=self.zredstr,
config=self.config,
bkg=self.bkg,
cbkg=self.cbkg,
zredbkg=self.zredbkg)
else:
return ClusterCatalog(self._ndarray.__getitem__(key),
r0=self.r0,
beta=self.beta,
zredstr=self.zredstr,
config=self.config,
bkg=self.bkg,
cbkg=self.cbkg,
zredbkg=self.zredbkg)
| [
"numpy.clip",
"numpy.radians",
"numpy.log10",
"numpy.sqrt",
"numpy.polyfit",
"numpy.log",
"numpy.isfinite",
"numpy.arange",
"numpy.where",
"fitsio.read",
"numpy.exp",
"numpy.argmin",
"numpy.abs",
"esutil.coords.sphdist",
"numpy.deg2rad",
"numpy.copy",
"numpy.ones_like",
"numpy.medi... | [((9866, 9945), 'esutil.coords.sphdist', 'esutil.coords.sphdist', (['self.ra', 'self.dec', 'self.neighbors.ra', 'self.neighbors.dec'], {}), '(self.ra, self.dec, self.neighbors.ra, self.neighbors.dec)\n', (9887, 9945), False, 'import esutil\n'), ((15578, 15714), 'numpy.where', 'np.where', (['((mask.maskgals.r_uniform > self.config.bkg_local_annuli[0]) & (mask.\n maskgals.r_uniform < self.config.bkg_local_annuli[1]))'], {}), '((mask.maskgals.r_uniform > self.config.bkg_local_annuli[0]) & (\n mask.maskgals.r_uniform < self.config.bkg_local_annuli[1]))\n', (15586, 15714), True, 'import numpy as np\n'), ((15766, 15801), 'numpy.where', 'np.where', (['maskgals_mark[in_annulus]'], {}), '(maskgals_mark[in_annulus])\n', (15774, 15801), True, 'import numpy as np\n'), ((21832, 21859), 'numpy.where', 'np.where', (['(maskgals.r < rlam)'], {}), '(maskgals.r < rlam)\n', (21840, 21859), True, 'import numpy as np\n'), ((22167, 22179), 'numpy.log', 'np.log', (['rlam'], {}), '(rlam)\n', (22173, 22179), True, 'import numpy as np\n'), ((22198, 22345), 'numpy.exp', 'np.exp', (['(1.65169 - 0.54785 * logrc + 0.138202 * logrc ** 2.0 - 0.0719021 * logrc **\n 3.0 - 0.0158241 * logrc ** 4.0 - 0.000854985 * logrc ** 5.0)'], {}), '(1.65169 - 0.54785 * logrc + 0.138202 * logrc ** 2.0 - 0.0719021 * \n logrc ** 3.0 - 0.0158241 * logrc ** 4.0 - 0.000854985 * logrc ** 5.0)\n', (22204, 22345), True, 'import numpy as np\n'), ((22477, 22503), 'numpy.where', 'np.where', (['(refmag >= limmag)'], {}), '(refmag >= limmag)\n', (22485, 22503), True, 'import numpy as np\n'), ((22533, 22548), 'numpy.copy', 'np.copy', (['refmag'], {}), '(refmag)\n', (22540, 22548), True, 'import numpy as np\n'), ((22684, 22725), 'numpy.where', 'np.where', (['((refmag > limmag) | (mark == 0))'], {}), '((refmag > limmag) | (mark == 0))\n', (22692, 22725), True, 'import numpy as np\n'), ((24220, 24240), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (24227, 24240), True, 'import numpy as np\n'), ((24544, 24698), 'numpy.where', 'np.where', (['((self.neighbors.refmag > minmag) & (self.neighbors.refmag < maxmag) & (\n self.neighbors.r < rcut) & (col > colrange[0]) & (col < colrange[1]))'], {}), '((self.neighbors.refmag > minmag) & (self.neighbors.refmag < maxmag\n ) & (self.neighbors.r < rcut) & (col > colrange[0]) & (col < colrange[1]))\n', (24552, 24698), True, 'import numpy as np\n'), ((25347, 25386), 'numpy.sqrt', 'np.sqrt', (['(col_err ** 2.0 + sigint ** 2.0)'], {}), '(col_err ** 2.0 + sigint ** 2.0)\n', (25354, 25386), True, 'import numpy as np\n'), ((25568, 25606), 'numpy.median', 'np.median', (['self.neighbors.refmag[guse]'], {}), '(self.neighbors.refmag[guse])\n', (25577, 25606), True, 'import numpy as np\n'), ((25622, 25735), 'numpy.polyfit', 'np.polyfit', (['(self.neighbors.refmag[guse[in2sig]] - pivot)', 'col[guse[in2sig]]', '(1)'], {'w': '(1.0 / col_err[guse[in2sig]])'}), '(self.neighbors.refmag[guse[in2sig]] - pivot, col[guse[in2sig]], \n 1, w=1.0 / col_err[guse[in2sig]])\n', (25632, 25735), True, 'import numpy as np\n'), ((25944, 25983), 'numpy.sqrt', 'np.sqrt', (['(col_err ** 2.0 + sigint ** 2.0)'], {}), '(col_err ** 2.0 + sigint ** 2.0)\n', (25951, 25983), True, 'import numpy as np\n'), ((28942, 28968), 'numpy.clip', 'np.clip', (['value', '(0.01)', 'None'], {}), '(value, 0.01, None)\n', (28949, 28968), True, 'import numpy as np\n'), ((34115, 34155), 'fitsio.read', 'fitsio.read', (['filename'], {'ext': '(1)', 'upper': '(True)'}), '(filename, ext=1, upper=True)\n', (34126, 34155), False, 'import fitsio\n'), ((5734, 5741), 'esutil.cosmology.Cosmo', 'Cosmo', ([], {}), '()\n', (5739, 5741), False, 'from esutil.cosmology import Cosmo\n'), ((9375, 9417), 'numpy.where', 'np.where', (['(galcat.refmag[indices] <= maxmag)'], {}), '(galcat.refmag[indices] <= maxmag)\n', (9383, 9417), True, 'import numpy as np\n'), ((22961, 22990), 'numpy.sqrt', 'np.sqrt', (['(varc0 - varc0 ** 2.0)'], {}), '(varc0 - varc0 ** 2.0)\n', (22968, 22990), True, 'import numpy as np\n'), ((25055, 25082), 'numpy.argmin', 'np.argmin', (['self.neighbors.r'], {}), '(self.neighbors.r)\n', (25064, 25082), True, 'import numpy as np\n'), ((25103, 25124), 'numpy.where', 'np.where', (['(guse == ind)'], {}), '(guse == ind)\n', (25111, 25124), True, 'import numpy as np\n'), ((26020, 26064), 'numpy.exp', 'np.exp', (['(-d ** 2.0 / (2.0 * d_err_net ** 2.0))'], {}), '(-d ** 2.0 / (2.0 * d_err_net ** 2.0))\n', (26026, 26064), True, 'import numpy as np\n'), ((26880, 26899), 'numpy.sum', 'np.sum', (['(pmem ** 2.0)'], {}), '(pmem ** 2.0)\n', (26886, 26899), True, 'import numpy as np\n'), ((26899, 26911), 'numpy.sum', 'np.sum', (['pmem'], {}), '(pmem)\n', (26905, 26911), True, 'import numpy as np\n'), ((27842, 27972), 'numpy.where', 'np.where', (['((self.neighbors.r > rlam) | (self.neighbors.refmag > maxmag) | (self.\n neighbors.refmag > self.config.limmag_catalog))'], {}), '((self.neighbors.r > rlam) | (self.neighbors.refmag > maxmag) | (\n self.neighbors.refmag > self.config.limmag_catalog))\n', (27850, 27972), True, 'import numpy as np\n'), ((29813, 29828), 'numpy.radians', 'np.radians', (['(1.0)'], {}), '(1.0)\n', (29823, 29828), True, 'import numpy as np\n'), ((30199, 30257), 'numpy.clip', 'np.clip', (['(self.mpc_scale * self.neighbors.dist)', '(1e-06)', 'None'], {}), '(self.mpc_scale * self.neighbors.dist, 1e-06, None)\n', (30206, 30257), True, 'import numpy as np\n'), ((35350, 35385), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'cluster_dtype'}), '(size, dtype=cluster_dtype)\n', (35358, 35385), True, 'import numpy as np\n'), ((5004, 5028), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (5012, 5028), True, 'import numpy as np\n'), ((14286, 14322), 'numpy.log10', 'np.log10', (['self.config.lval_reference'], {}), '(self.config.lval_reference)\n', (14294, 14322), True, 'import numpy as np\n'), ((17610, 17646), 'numpy.log10', 'np.log10', (['self.config.lval_reference'], {}), '(self.config.lval_reference)\n', (17618, 17646), True, 'import numpy as np\n'), ((19533, 19552), 'numpy.sum', 'np.sum', (['(pmem ** 2.0)'], {}), '(pmem ** 2.0)\n', (19539, 19552), True, 'import numpy as np\n'), ((19551, 19563), 'numpy.sum', 'np.sum', (['pmem'], {}), '(pmem)\n', (19557, 19563), True, 'import numpy as np\n'), ((20018, 20097), 'numpy.sqrt', 'np.sqrt', (['((1 - bar_pmem) * lam_unscaled * self.scaleval ** 2.0 + lam_cerr ** 2.0)'], {}), '((1 - bar_pmem) * lam_unscaled * self.scaleval ** 2.0 + lam_cerr ** 2.0)\n', (20025, 20097), True, 'import numpy as np\n'), ((22928, 22941), 'numpy.sum', 'np.sum', (['p_out'], {}), '(p_out)\n', (22934, 22941), True, 'import numpy as np\n'), ((23014, 23033), 'numpy.sum', 'np.sum', (['(pmem ** 2.0)'], {}), '(pmem ** 2.0)\n', (23020, 23033), True, 'import numpy as np\n'), ((24277, 24313), 'numpy.log10', 'np.log10', (['self.config.lval_reference'], {}), '(self.config.lval_reference)\n', (24285, 24313), True, 'import numpy as np\n'), ((24350, 24364), 'numpy.log10', 'np.log10', (['(20.0)'], {}), '(20.0)\n', (24358, 24364), True, 'import numpy as np\n'), ((25409, 25438), 'numpy.abs', 'np.abs', (['(col[guse] - centcolor)'], {}), '(col[guse] - centcolor)\n', (25415, 25438), True, 'import numpy as np\n'), ((27543, 27604), 'numpy.sqrt', 'np.sqrt', (['((1.0 - bar_pmem) * lam_unscaled + lambda_cerr ** 2.0)'], {}), '((1.0 - bar_pmem) * lam_unscaled + lambda_cerr ** 2.0)\n', (27550, 27604), True, 'import numpy as np\n'), ((5117, 5156), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'config.cluster_dtype'}), '(1, dtype=config.cluster_dtype)\n', (5125, 5156), True, 'import numpy as np\n'), ((5285, 5322), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'cluster_dtype_base'}), '(1, dtype=cluster_dtype_base)\n', (5293, 5322), True, 'import numpy as np\n'), ((14164, 14184), 'numpy.deg2rad', 'np.deg2rad', (['self.dec'], {}), '(self.dec)\n', (14174, 14184), True, 'import numpy as np\n'), ((18530, 18551), 'numpy.ones_like', 'np.ones_like', (['ucounts'], {}), '(ucounts)\n', (18542, 18551), True, 'import numpy as np\n'), ((19734, 19746), 'numpy.sum', 'np.sum', (['pmem'], {}), '(pmem)\n', (19740, 19746), True, 'import numpy as np\n'), ((26671, 26692), 'numpy.ones_like', 'np.ones_like', (['ucounts'], {}), '(ucounts)\n', (26683, 26692), True, 'import numpy as np\n'), ((27260, 27272), 'numpy.sum', 'np.sum', (['pmem'], {}), '(pmem)\n', (27266, 27272), True, 'import numpy as np\n'), ((20446, 20463), 'numpy.isfinite', 'np.isfinite', (['pcol'], {}), '(pcol)\n', (20457, 20463), True, 'import numpy as np\n'), ((15023, 15052), 'numpy.isfinite', 'np.isfinite', (['sigma_g_maskgals'], {}), '(sigma_g_maskgals)\n', (15034, 15052), True, 'import numpy as np\n'), ((19612, 19646), 'numpy.arange', 'np.arange', (['cpars.size'], {'dtype': 'float'}), '(cpars.size, dtype=float)\n', (19621, 19646), True, 'import numpy as np\n')] |
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
plt.rc('font', family='serif')
plt.rc('font', serif='Times New Roman')
plt.rcParams["mathtext.fontset"] = "stix"
def smooth(y):
return gaussian_filter1d(y, sigma=0.6)
base_path = os.path.dirname(".")
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
COLORS = {"mnist_balanced": colors[0],
"mnist_unbalanced": colors[1],
"synthetic_a0b0": colors[2],
"synthetic_a1b1": colors[3]}
LABELS = {'mnist_balanced': 'mnist balanced',
'mnist_unbalanced': 'mnist unbalanced',
'synthetic_a0b0': r'synthetic$(0,0)$',
"synthetic_a1b1": r'synthetic$(1,1)$'}
synthetic_a0b0_X = [5, 10, 20, 30, 50, 60, 80, 100, 125, 200]
synthetic_a0b0 = smooth([160, 101, 88, 87, 90, 92, 96, 106, 114, 139])
synthetic_a1b1_X = [5, 10, 20, 30, 50]
synthetic_a1b1 = smooth([189, 140, 143, 150, 194])
mnist_balanced_X = [10, 20, 30, 50, 60, 80, 100, 125, 150]
mnist_balanced = smooth([120, 50, 39, 28, 27, 22, 21, 20, 20])
mnist_unbalanced_X = [10, 20, 30, 50, 100, 200, 400]
mnist_unbalanced = smooth([400, 145, 114, 104, 119, 137, 205])
matplotlib.rcParams['font.family'] = 'Times New Roman'
stats_dict = {'mnist_unbalanced': (mnist_unbalanced_X, mnist_unbalanced),
'mnist_balanced': (mnist_balanced_X, mnist_balanced),
'synthetic_a0b0': (synthetic_a0b0_X, synthetic_a0b0),
'synthetic_a1b1': (synthetic_a1b1_X, synthetic_a1b1)}
plt.figure(figsize=(4, 3))
for data, stat in stats_dict.items():
plt.plot(np.array(stat[0])*10, np.array(stat[1]), linewidth=1.0, color=COLORS[data], label=LABELS[data])
plt.grid(True)
plt.legend(loc=0, borderaxespad=0., prop={'size': 10})
plt.ylabel(r'Required rounds ($T_{\epsilon}/E$)', fontdict={'size': 10})
plt.xlabel('Local steps ($E$)', fontdict={'size': 10})
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.xscale('log')
plt.tight_layout()
fig = plt.gcf()
fig.savefig('E.pdf')
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.gcf",
"os.path.dirname",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ytic... | [((28, 49), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (42, 49), False, 'import matplotlib\n'), ((153, 183), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (159, 183), True, 'import matplotlib.pyplot as plt\n'), ((184, 223), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'serif': '"""Times New Roman"""'}), "('font', serif='Times New Roman')\n", (190, 223), True, 'import matplotlib.pyplot as plt\n'), ((340, 360), 'os.path.dirname', 'os.path.dirname', (['"""."""'], {}), "('.')\n", (355, 360), False, 'import os\n'), ((1594, 1620), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (1604, 1620), True, 'import matplotlib.pyplot as plt\n'), ((1769, 1783), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1777, 1783), True, 'import matplotlib.pyplot as plt\n'), ((1784, 1839), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)', 'borderaxespad': '(0.0)', 'prop': "{'size': 10}"}), "(loc=0, borderaxespad=0.0, prop={'size': 10})\n", (1794, 1839), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1911), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Required rounds ($T_{\\\\epsilon}/E$)"""'], {'fontdict': "{'size': 10}"}), "('Required rounds ($T_{\\\\epsilon}/E$)', fontdict={'size': 10})\n", (1849, 1911), True, 'import matplotlib.pyplot as plt\n'), ((1912, 1966), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Local steps ($E$)"""'], {'fontdict': "{'size': 10}"}), "('Local steps ($E$)', fontdict={'size': 10})\n", (1922, 1966), True, 'import matplotlib.pyplot as plt\n'), ((1967, 1990), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (1977, 1990), True, 'import matplotlib.pyplot as plt\n'), ((1991, 2014), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (2001, 2014), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2032), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2025, 2032), True, 'import matplotlib.pyplot as plt\n'), ((2033, 2051), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2049, 2051), True, 'import matplotlib.pyplot as plt\n'), ((2058, 2067), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2065, 2067), True, 'import matplotlib.pyplot as plt\n'), ((294, 325), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['y'], {'sigma': '(0.6)'}), '(y, sigma=0.6)\n', (311, 325), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((1694, 1711), 'numpy.array', 'np.array', (['stat[1]'], {}), '(stat[1])\n', (1702, 1711), True, 'import numpy as np\n'), ((1672, 1689), 'numpy.array', 'np.array', (['stat[0]'], {}), '(stat[0])\n', (1680, 1689), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
#determine unconditional mean, sum R in each bin. But then devide by master counts
def boxbin(x,y,xedge,yedge,c=None,figsize=(5,5),cmap='viridis',mincnt=10,vmin=None,vmax=None,edgecolor=None,powernorm=False,
ax=None,normed=False,method='mean',quantile=None,alpha=1.0,cbar=True,unconditional=False,master_count=np.array([])):
""" This function will grid data for you and provide the counts if no variable c is given, or the median if
a variable c is given. In the future I will add functionallity to do the median, and possibly quantiles.
x: 1-D array
y: 1-D array
xedge: 1-D array for xbins
yedge: 1-D array for ybins
c: 1-D array, same len as x and y
returns
axis handle
cbar handle
C matrix (counts or median values in bin)
"""
midpoints = np.empty(xedge.shape[0]-1)
for i in np.arange(1,xedge.shape[0]):
midpoints[i-1] = xedge[i-1] + (np.abs(xedge[i] - xedge[i-1]))/2.
#note on digitize. bin 0 is outside to the left of the bins, bin -1 is outside to the right
ind1 = np.digitize(x,bins = xedge) #inds of x in each bin
ind2 = np.digitize(y,bins = yedge) #inds of y in each bin
#drop points outside range
outsideleft = np.where(ind1 != 0)
ind1 = ind1[outsideleft]
ind2 = ind2[outsideleft]
if c is None:
pass
else:
c = c[outsideleft]
outsideright = np.where(ind1 != len(xedge))
ind1 = ind1[outsideright]
ind2 = ind2[outsideright]
if c is None:
pass
else:
c = c[outsideright]
outsideleft = np.where(ind2 != 0)
ind1 = ind1[outsideleft]
ind2 = ind2[outsideleft]
if c is None:
pass
else:
c = c[outsideleft]
outsideright = np.where(ind2 != len(yedge))
ind1 = ind1[outsideright]
ind2 = ind2[outsideright]
if c is None:
pass
else:
c = c[outsideright]
if c is None:
c = np.zeros(len(ind1))
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
df2 = df.groupby(["x","y"]).count()
df = df2.where(df2.values >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = np.ma.masked_where(C == -9999,C)
if normed:
n_samples = np.ma.sum(C)
C = C/n_samples
C = C*100
print('n_samples= {}'.format(n_samples))
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,edgecolor=edgecolor,norm=colors.PowerNorm(gamma=0.5),vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
else:
pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,edgecolor=edgecolor,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
return ax,cbar,C
elif unconditional:
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
if method=='mean':
df2 = df.groupby(["x","y"])['c'].sum()
df3 = df.groupby(["x","y"]).count()
df2 = df2.to_frame()
df2.insert(1,'Count',df3.values)
df = df2.where(df2.Count >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = C/master_count.values
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,norm=colors.PowerNorm(gamma=0.5),alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
if method=='mean':
df2 = df.groupby(["x","y"])['c'].mean()
elif method=='std':
df2 = df.groupby(["x","y"])['c'].std()
elif method=='median':
df2 = df.groupby(["x","y"])['c'].median()
elif method=='qunatile':
if quantile is None:
print('No quantile given, defaulting to median')
quantile = 0.5
else:
pass
df2 = df.groupby(["x","y"])['c'].apply(percentile(quantile*100))
df3 = df.groupby(["x","y"]).count()
df2 = df2.to_frame()
df2.insert(1,'Count',df3.values)
df = df2.where(df2.Count >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = np.ma.masked_where(C == -9999,C)
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,norm=colors.PowerNorm(gamma=0.5),alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
else:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
return ax,cbar,C
| [
"numpy.abs",
"numpy.ones",
"numpy.ma.sum",
"numpy.digitize",
"numpy.where",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.colorbar",
"numpy.ma.masked_where",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.empty",
"pandas.DataFrame",
"numpy.percentile",
"numpy.arange"
] | [((582, 594), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (590, 594), True, 'import numpy as np\n'), ((1109, 1137), 'numpy.empty', 'np.empty', (['(xedge.shape[0] - 1)'], {}), '(xedge.shape[0] - 1)\n', (1117, 1137), True, 'import numpy as np\n'), ((1149, 1177), 'numpy.arange', 'np.arange', (['(1)', 'xedge.shape[0]'], {}), '(1, xedge.shape[0])\n', (1158, 1177), True, 'import numpy as np\n'), ((1363, 1389), 'numpy.digitize', 'np.digitize', (['x'], {'bins': 'xedge'}), '(x, bins=xedge)\n', (1374, 1389), True, 'import numpy as np\n'), ((1425, 1451), 'numpy.digitize', 'np.digitize', (['y'], {'bins': 'yedge'}), '(y, bins=yedge)\n', (1436, 1451), True, 'import numpy as np\n'), ((1536, 1555), 'numpy.where', 'np.where', (['(ind1 != 0)'], {}), '(ind1 != 0)\n', (1544, 1555), True, 'import numpy as np\n'), ((1895, 1914), 'numpy.where', 'np.where', (['(ind2 != 0)'], {}), '(ind2 != 0)\n', (1903, 1914), True, 'import numpy as np\n'), ((170, 189), 'numpy.percentile', 'np.percentile', (['x', 'n'], {}), '(x, n)\n', (183, 189), True, 'import numpy as np\n'), ((2287, 2339), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': ind1 - 1, 'y': ind2 - 1, 'c': c}"], {}), "({'x': ind1 - 1, 'y': ind2 - 1, 'c': c})\n", (2299, 2339), True, 'import pandas as pd\n'), ((2596, 2629), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(C == -9999)', 'C'], {}), '(C == -9999, C)\n', (2614, 2629), True, 'import numpy as np\n'), ((2441, 2490), 'numpy.ones', 'np.ones', (['[xedge.shape[0] - 1, yedge.shape[0] - 1]'], {}), '([xedge.shape[0] - 1, yedge.shape[0] - 1])\n', (2448, 2490), True, 'import numpy as np\n'), ((2681, 2693), 'numpy.ma.sum', 'np.ma.sum', (['C'], {}), '(C)\n', (2690, 2693), True, 'import numpy as np\n'), ((2847, 2873), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (2857, 2873), True, 'import matplotlib.pyplot as plt\n'), ((2890, 2899), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2897, 2899), True, 'import matplotlib.pyplot as plt\n'), ((3575, 3627), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': ind1 - 1, 'y': ind2 - 1, 'c': c}"], {}), "({'x': ind1 - 1, 'y': ind2 - 1, 'c': c})\n", (3587, 3627), True, 'import pandas as pd\n'), ((3889, 3938), 'numpy.ones', 'np.ones', (['[xedge.shape[0] - 1, yedge.shape[0] - 1]'], {}), '([xedge.shape[0] - 1, yedge.shape[0] - 1])\n', (3896, 3938), True, 'import numpy as np\n'), ((4662, 4714), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': ind1 - 1, 'y': ind2 - 1, 'c': c}"], {}), "({'x': ind1 - 1, 'y': ind2 - 1, 'c': c})\n", (4674, 4714), True, 'import pandas as pd\n'), ((5588, 5621), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(C == -9999)', 'C'], {}), '(C == -9999, C)\n', (5606, 5621), True, 'import numpy as np\n'), ((1217, 1248), 'numpy.abs', 'np.abs', (['(xedge[i] - xedge[i - 1])'], {}), '(xedge[i] - xedge[i - 1])\n', (1223, 1248), True, 'import numpy as np\n'), ((3176, 3199), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pm'], {'ax': 'ax'}), '(pm, ax=ax)\n', (3188, 3199), True, 'import matplotlib.pyplot as plt\n'), ((3422, 3445), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pm'], {'ax': 'ax'}), '(pm, ax=ax)\n', (3434, 3445), True, 'import matplotlib.pyplot as plt\n'), ((4119, 4145), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (4129, 4145), True, 'import matplotlib.pyplot as plt\n'), ((4162, 4171), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4169, 4171), True, 'import matplotlib.pyplot as plt\n'), ((5432, 5481), 'numpy.ones', 'np.ones', (['[xedge.shape[0] - 1, yedge.shape[0] - 1]'], {}), '([xedge.shape[0] - 1, yedge.shape[0] - 1])\n', (5439, 5481), True, 'import numpy as np\n'), ((5663, 5689), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (5673, 5689), True, 'import matplotlib.pyplot as plt\n'), ((5706, 5715), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5713, 5715), True, 'import matplotlib.pyplot as plt\n'), ((4407, 4430), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pm'], {'ax': 'ax'}), '(pm, ax=ax)\n', (4419, 4430), True, 'import matplotlib.pyplot as plt\n'), ((4598, 4621), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pm'], {'ax': 'ax'}), '(pm, ax=ax)\n', (4610, 4621), True, 'import matplotlib.pyplot as plt\n'), ((5951, 5974), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pm'], {'ax': 'ax'}), '(pm, ax=ax)\n', (5963, 5974), True, 'import matplotlib.pyplot as plt\n'), ((6186, 6209), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pm'], {'ax': 'ax'}), '(pm, ax=ax)\n', (6198, 6209), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import logging
import os
import random
import string
import sys
import time
import numpy as np
import pytest
import graphscope
graphscope.set_option(show_log=True)
from graphscope import property_sssp
from graphscope import sssp
from graphscope.framework.app import AppAssets
from graphscope.framework.app import AppDAGNode
from graphscope.framework.errors import AnalyticalEngineInternalError
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.loader import Loader
test_repo_dir = os.path.expandvars("${GS_TEST_DIR}")
prefix = os.path.join(test_repo_dir, "ogbn_mag_small")
new_property_dir = os.path.join(test_repo_dir, "new_property", "v2_e2")
@pytest.fixture(scope="module")
def sess():
session = graphscope.session(cluster_type="hosts", num_workers=2, mode="lazy")
session.as_default()
yield session
session.close()
@pytest.fixture(scope="function")
def student_v(data_dir=os.path.expandvars("${GS_TEST_DIR}/property_graph")):
return Loader("%s/student.v" % data_dir, header_row=True, delimiter=",")
@pytest.fixture(scope="function")
def teacher_v(data_dir=os.path.expandvars("${GS_TEST_DIR}/property_graph")):
return Loader("%s/teacher.v" % data_dir, header_row=True, delimiter=",")
@pytest.fixture(scope="function")
def student_group_e(data_dir=os.path.expandvars("${GS_TEST_DIR}/property_graph")):
return Loader("%s/group.e" % data_dir, header_row=True, delimiter=",")
@pytest.fixture(scope="function")
def teacher_group_e(data_dir=os.path.expandvars("${GS_TEST_DIR}/property_graph")):
return Loader("%s/teacher_group.e" % data_dir, header_row=True, delimiter=",")
def arrow_property_graph(graphscope_session):
g = graphscope_session.g(generate_eid=False)
g = g.add_vertices(f"{new_property_dir}/twitter_v_0", "v0")
g = g.add_vertices(f"{new_property_dir}/twitter_v_1", "v1")
g = g.add_edges(f"{new_property_dir}/twitter_e_0_0_0", "e0", ["weight"], "v0", "v0")
g = g.add_edges(f"{new_property_dir}/twitter_e_0_1_0", "e0", ["weight"], "v0", "v1")
g = g.add_edges(f"{new_property_dir}/twitter_e_1_0_0", "e0", ["weight"], "v1", "v0")
g = g.add_edges(f"{new_property_dir}/twitter_e_1_1_0", "e0", ["weight"], "v1", "v1")
g = g.add_edges(f"{new_property_dir}/twitter_e_0_0_1", "e1", ["weight"], "v0", "v0")
g = g.add_edges(f"{new_property_dir}/twitter_e_0_1_1", "e1", ["weight"], "v0", "v1")
g = g.add_edges(f"{new_property_dir}/twitter_e_1_0_1", "e1", ["weight"], "v1", "v0")
g = g.add_edges(f"{new_property_dir}/twitter_e_1_1_1", "e1", ["weight"], "v1", "v1")
return g
def test_vertices_omitted_form_loader(sess, student_group_e):
g = sess.g()
g1 = g.add_edges(student_group_e)
g2 = sess.run(g1) # g2 is a Graph instance
assert g2.loaded()
def test_construct_graph_step_by_step(sess):
_g = sess.g(generate_eid=False)
g = sess.run(_g)
_g1 = g.add_vertices(f"{new_property_dir}/twitter_v_0", "v0")
g1 = sess.run(_g1)
_g2 = g1.add_vertices(f"{new_property_dir}/twitter_v_1", "v1")
g2 = sess.run(_g2)
ug = g.unload()
ug1 = g1.unload()
ug2 = g2.unload()
sess.run([ug, ug1, ug2])
def test_unload_graph(sess, student_v, teacher_v, student_group_e):
# case 1
# 1. load empty g
# 2. unload g
g = sess.g()
ug = g.unload()
assert sess.run(ug) is None
# case 2
g = sess.g()
g1 = g.add_vertices(student_v, "student")
g2 = g.add_vertices(teacher_v, "teacher")
ug1 = g1.unload()
ug2 = g2.unload()
assert sess.run(ug1) is None
assert sess.run(ug2) is None
# case 3
g = sess.g()
g1 = g.add_vertices(student_v, "student")
g2 = g1.add_vertices(teacher_v, "teacher")
g3 = g2.add_edges(
student_group_e, "group", src_label="student", dst_label="student"
)
ug = g.unload()
ug1 = g1.unload()
ug2 = g2.unload()
ug3 = g3.unload()
sess.run([ug, ug1, ug2, ug3])
# case 4
# test unload twice
g = sess.g()
ug = g.unload()
assert sess.run(ug) is None
assert sess.run(ug) is None
def test_error_using_unload_graph(sess, student_v):
with pytest.raises(AnalyticalEngineInternalError):
g = sess.g()
ug = g.unload()
g1 = g.add_vertices(student_v, "student")
sess.run([ug, g1])
def test_unload_app(sess):
g = arrow_property_graph(sess)
# case 1
a1 = AppDAGNode(g, AppAssets(algo="property_sssp", context="labeled_vertex_data"))
ua1 = a1.unload()
assert sess.run(ua1) is None
# case 2
# unload app twice
a1 = AppDAGNode(g, AppAssets(algo="property_sssp", context="labeled_vertex_data"))
ua1 = a1.unload()
assert sess.run(ua1) is None
assert sess.run(ua1) is None
# case 3
# load app after unload
a1 = AppDAGNode(g, AppAssets(algo="property_sssp", context="labeled_vertex_data"))
ua1 = a1.unload()
assert sess.run(ua1) is None
c1 = a1(src=20)
r1 = c1.to_numpy("r:v0.dist_0")
r = sess.run(r1)
assert r.shape == (40521,)
def test_graph_to_numpy(sess):
g = arrow_property_graph(sess)
c = property_sssp(g, 20)
ctx_out_np = c.to_numpy("r:v0.dist_0")
g2 = g.add_column(c, {"result_0": "r:v0.dist_0"})
graph_out_np = g2.to_numpy("v:v0.result_0")
r = sess.run([ctx_out_np, graph_out_np])
assert np.all(r[0] == r[1])
# unload graph
ug = g.unload()
ug2 = g2.unload()
sess.run([ug, ug2])
def test_graph_to_dataframe(sess):
g = arrow_property_graph(sess)
c = property_sssp(g, 20)
ctx_out_df = c.to_dataframe({"result": "r:v0.dist_0"})
g2 = g.add_column(c, {"result_0": "r:v0.dist_0"})
graph_out_df = g2.to_dataframe({"result": "v:v0.result_0"})
r = sess.run([ctx_out_df, graph_out_df])
assert r[0].equals(r[1])
# unload graph
ug = g.unload()
ug2 = g2.unload()
sess.run([ug, ug2])
def test_context(sess):
g = arrow_property_graph(sess)
c = property_sssp(g, 20)
r1 = c.to_numpy("r:v0.dist_0")
r2 = c.to_dataframe({"id": "v:v0.id", "result": "r:v0.dist_0"})
r3 = c.to_vineyard_tensor("v:v0.id")
r4 = c.to_vineyard_dataframe(
{"id": "v:v0.id", "data": "v:v0.dist", "result": "r:v0.dist_0"}
)
r = sess.run([r1, r2, r3, r4])
assert r[0].shape == (40521,)
assert r[1].shape == (40521, 2)
assert r[2] is not None
assert r[3] is not None
def test_error_selector_context(sess):
# case 1
# labeled vertex data context
g = arrow_property_graph(sess)
c = property_sssp(g, 20)
with pytest.raises(
InvalidArgumentError,
match="Selector in labeled vertex data context cannot be None",
):
r = c.to_numpy(selector=None)
with pytest.raises(ValueError, match="not enough values to unpack"):
# missing ":" in selectot
r = c.to_numpy("r.v0.dist_0")
with pytest.raises(SyntaxError, match="Invalid selector"):
# must be "v/e/r:xxx"
r = c.to_numpy("c:v0.dist_0")
with pytest.raises(SyntaxError, match="Invalid selector"):
# format error
c.to_numpy("r:v0.dist_0.dist_1")
# case 2
# vertex data context
pg = g.project(vertices={"v0": ["id"]}, edges={"e0": ["weight"]})
c = sssp(pg, 20)
with pytest.raises(SyntaxError, match="Selector of v must be 'v.id' or 'v.data'"):
r = c.to_dataframe({"id": "v.ID"})
with pytest.raises(ValueError, match="selector of to_dataframe must be a dict"):
r = c.to_dataframe("id")
def test_query_after_project(sess):
g = arrow_property_graph(sess)
pg = g.project(vertices={"v0": ["id"]}, edges={"e0": ["weight"]})
# property sssp on property graph
# sssp on simple graph
c = sssp(pg, 20)
r1 = c.to_dataframe({"node": "v.id", "r": "r"})
r = sess.run(r1)
assert r.shape == (40521, 2)
def test_add_column(sess):
g = arrow_property_graph(sess)
pg = g.project(vertices={"v0": ["id"]}, edges={"e0": ["weight"]})
c = sssp(pg, 20)
g1 = g.add_column(c, {"id_col": "v.id", "data_col": "v.data", "result_col": "r"})
sess.run(g1)
def test_multi_src_dst_edge_loader(
sess, student_group_e, teacher_group_e, student_v, teacher_v
):
graph = sess.g()
graph = graph.add_vertices(
student_v, "student", ["name", "lesson_nums", "avg_score"], "student_id"
)
graph = graph.add_vertices(
teacher_v, "teacher", ["student_num", "score", "email", "tel"], "teacher_id"
)
graph = graph.add_edges(
student_group_e,
"group",
["group_id", "member_size"],
src_label="student",
dst_label="student",
src_field="leader_student_id",
dst_field="member_student_id",
)
graph = graph.add_edges(
teacher_group_e,
"group",
["group_id", "member_size"],
src_label="teacher",
dst_label="teacher",
src_field="leader_teacher_id",
dst_field="member_teacher_id",
)
g = sess.run(graph)
def test_simulate_eager(sess):
g1_node = arrow_property_graph(sess)
g1 = sess.run(g1_node)
c_node = property_sssp(g1, 20)
c = sess.run(c_node)
r_node = c.to_numpy("r:v0.dist_0")
r = sess.run(r_node)
assert r.shape == (40521,)
pg_node = g1.project(vertices={"v0": ["id"]}, edges={"e0": ["weight"]})
pg = sess.run(pg_node)
c_node = sssp(pg, 20)
c = sess.run(c_node)
g2_node = g1.add_column(
c, {"id_col": "v.id", "data_col": "v.data", "result_col": "r"}
)
g2 = sess.run(g2_node)
| [
"os.path.expandvars",
"os.path.join",
"graphscope.framework.app.AppAssets",
"graphscope.session",
"graphscope.framework.loader.Loader",
"graphscope.set_option",
"graphscope.property_sssp",
"graphscope.sssp",
"pytest.raises",
"pytest.fixture",
"numpy.all"
] | [((814, 850), 'graphscope.set_option', 'graphscope.set_option', ([], {'show_log': '(True)'}), '(show_log=True)\n', (835, 850), False, 'import graphscope\n'), ((1207, 1243), 'os.path.expandvars', 'os.path.expandvars', (['"""${GS_TEST_DIR}"""'], {}), "('${GS_TEST_DIR}')\n", (1225, 1243), False, 'import os\n'), ((1253, 1298), 'os.path.join', 'os.path.join', (['test_repo_dir', '"""ogbn_mag_small"""'], {}), "(test_repo_dir, 'ogbn_mag_small')\n", (1265, 1298), False, 'import os\n'), ((1319, 1371), 'os.path.join', 'os.path.join', (['test_repo_dir', '"""new_property"""', '"""v2_e2"""'], {}), "(test_repo_dir, 'new_property', 'v2_e2')\n", (1331, 1371), False, 'import os\n'), ((1375, 1405), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1389, 1405), False, 'import pytest\n'), ((1567, 1599), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1581, 1599), False, 'import pytest\n'), ((1757, 1789), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1771, 1789), False, 'import pytest\n'), ((1947, 1979), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1961, 1979), False, 'import pytest\n'), ((2141, 2173), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (2155, 2173), False, 'import pytest\n'), ((1432, 1500), 'graphscope.session', 'graphscope.session', ([], {'cluster_type': '"""hosts"""', 'num_workers': '(2)', 'mode': '"""lazy"""'}), "(cluster_type='hosts', num_workers=2, mode='lazy')\n", (1450, 1500), False, 'import graphscope\n'), ((1623, 1674), 'os.path.expandvars', 'os.path.expandvars', (['"""${GS_TEST_DIR}/property_graph"""'], {}), "('${GS_TEST_DIR}/property_graph')\n", (1641, 1674), False, 'import os\n'), ((1688, 1753), 'graphscope.framework.loader.Loader', 'Loader', (["('%s/student.v' % data_dir)"], {'header_row': '(True)', 'delimiter': '""","""'}), "('%s/student.v' % data_dir, header_row=True, delimiter=',')\n", (1694, 1753), False, 'from graphscope.framework.loader import Loader\n'), ((1813, 1864), 'os.path.expandvars', 'os.path.expandvars', (['"""${GS_TEST_DIR}/property_graph"""'], {}), "('${GS_TEST_DIR}/property_graph')\n", (1831, 1864), False, 'import os\n'), ((1878, 1943), 'graphscope.framework.loader.Loader', 'Loader', (["('%s/teacher.v' % data_dir)"], {'header_row': '(True)', 'delimiter': '""","""'}), "('%s/teacher.v' % data_dir, header_row=True, delimiter=',')\n", (1884, 1943), False, 'from graphscope.framework.loader import Loader\n'), ((2009, 2060), 'os.path.expandvars', 'os.path.expandvars', (['"""${GS_TEST_DIR}/property_graph"""'], {}), "('${GS_TEST_DIR}/property_graph')\n", (2027, 2060), False, 'import os\n'), ((2074, 2137), 'graphscope.framework.loader.Loader', 'Loader', (["('%s/group.e' % data_dir)"], {'header_row': '(True)', 'delimiter': '""","""'}), "('%s/group.e' % data_dir, header_row=True, delimiter=',')\n", (2080, 2137), False, 'from graphscope.framework.loader import Loader\n'), ((2203, 2254), 'os.path.expandvars', 'os.path.expandvars', (['"""${GS_TEST_DIR}/property_graph"""'], {}), "('${GS_TEST_DIR}/property_graph')\n", (2221, 2254), False, 'import os\n'), ((2268, 2339), 'graphscope.framework.loader.Loader', 'Loader', (["('%s/teacher_group.e' % data_dir)"], {'header_row': '(True)', 'delimiter': '""","""'}), "('%s/teacher_group.e' % data_dir, header_row=True, delimiter=',')\n", (2274, 2339), False, 'from graphscope.framework.loader import Loader\n'), ((5799, 5819), 'graphscope.property_sssp', 'property_sssp', (['g', '(20)'], {}), '(g, 20)\n', (5812, 5819), False, 'from graphscope import property_sssp\n'), ((6021, 6041), 'numpy.all', 'np.all', (['(r[0] == r[1])'], {}), '(r[0] == r[1])\n', (6027, 6041), True, 'import numpy as np\n'), ((6207, 6227), 'graphscope.property_sssp', 'property_sssp', (['g', '(20)'], {}), '(g, 20)\n', (6220, 6227), False, 'from graphscope import property_sssp\n'), ((6633, 6653), 'graphscope.property_sssp', 'property_sssp', (['g', '(20)'], {}), '(g, 20)\n', (6646, 6653), False, 'from graphscope import property_sssp\n'), ((7202, 7222), 'graphscope.property_sssp', 'property_sssp', (['g', '(20)'], {}), '(g, 20)\n', (7215, 7222), False, 'from graphscope import property_sssp\n'), ((7915, 7927), 'graphscope.sssp', 'sssp', (['pg', '(20)'], {}), '(pg, 20)\n', (7919, 7927), False, 'from graphscope import sssp\n'), ((8392, 8404), 'graphscope.sssp', 'sssp', (['pg', '(20)'], {}), '(pg, 20)\n', (8396, 8404), False, 'from graphscope import sssp\n'), ((8653, 8665), 'graphscope.sssp', 'sssp', (['pg', '(20)'], {}), '(pg, 20)\n', (8657, 8665), False, 'from graphscope import sssp\n'), ((9776, 9797), 'graphscope.property_sssp', 'property_sssp', (['g1', '(20)'], {}), '(g1, 20)\n', (9789, 9797), False, 'from graphscope import property_sssp\n'), ((10034, 10046), 'graphscope.sssp', 'sssp', (['pg', '(20)'], {}), '(pg, 20)\n', (10038, 10046), False, 'from graphscope import sssp\n'), ((4831, 4875), 'pytest.raises', 'pytest.raises', (['AnalyticalEngineInternalError'], {}), '(AnalyticalEngineInternalError)\n', (4844, 4875), False, 'import pytest\n'), ((5100, 5162), 'graphscope.framework.app.AppAssets', 'AppAssets', ([], {'algo': '"""property_sssp"""', 'context': '"""labeled_vertex_data"""'}), "(algo='property_sssp', context='labeled_vertex_data')\n", (5109, 5162), False, 'from graphscope.framework.app import AppAssets\n'), ((5279, 5341), 'graphscope.framework.app.AppAssets', 'AppAssets', ([], {'algo': '"""property_sssp"""', 'context': '"""labeled_vertex_data"""'}), "(algo='property_sssp', context='labeled_vertex_data')\n", (5288, 5341), False, 'from graphscope.framework.app import AppAssets\n'), ((5496, 5558), 'graphscope.framework.app.AppAssets', 'AppAssets', ([], {'algo': '"""property_sssp"""', 'context': '"""labeled_vertex_data"""'}), "(algo='property_sssp', context='labeled_vertex_data')\n", (5505, 5558), False, 'from graphscope.framework.app import AppAssets\n'), ((7232, 7336), 'pytest.raises', 'pytest.raises', (['InvalidArgumentError'], {'match': '"""Selector in labeled vertex data context cannot be None"""'}), "(InvalidArgumentError, match=\n 'Selector in labeled vertex data context cannot be None')\n", (7245, 7336), False, 'import pytest\n'), ((7403, 7465), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""not enough values to unpack"""'}), "(ValueError, match='not enough values to unpack')\n", (7416, 7465), False, 'import pytest\n'), ((7548, 7600), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {'match': '"""Invalid selector"""'}), "(SyntaxError, match='Invalid selector')\n", (7561, 7600), False, 'import pytest\n'), ((7679, 7731), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {'match': '"""Invalid selector"""'}), "(SyntaxError, match='Invalid selector')\n", (7692, 7731), False, 'import pytest\n'), ((7937, 8013), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {'match': '"""Selector of v must be \'v.id\' or \'v.data\'"""'}), '(SyntaxError, match="Selector of v must be \'v.id\' or \'v.data\'")\n', (7950, 8013), False, 'import pytest\n'), ((8067, 8141), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""selector of to_dataframe must be a dict"""'}), "(ValueError, match='selector of to_dataframe must be a dict')\n", (8080, 8141), False, 'import pytest\n')] |
from pathlib import Path
from unittest import TestCase
import numpy as np
from skimage import io
from detect_face import load_model
from src.emotion_predictor.face_detection.detect import extract_box
class TestDetection(TestCase):
def setUp(self) -> None:
image_path = Path("demo_images/shamil-smile.jpg")
weights_path = Path("weights/yolov5n-face.pt")
reference_crop_path = Path("demo_images/shamil-cropped.png")
self.image = io.imread(str(image_path))
self.model = load_model(weights=str(weights_path), device="cpu")
self.reference_crop = io.imread(str(reference_crop_path))
def test_face_detection(self):
coords = extract_box(self.image, self.model)
x1, y1, x2, y2 = coords[0]
cropped_face = self.image[y1:y2, x1:x2]
self.assertTrue(np.all(self.reference_crop == cropped_face), "Face detection boxes mismatch.")
| [
"numpy.all",
"src.emotion_predictor.face_detection.detect.extract_box",
"pathlib.Path"
] | [((286, 322), 'pathlib.Path', 'Path', (['"""demo_images/shamil-smile.jpg"""'], {}), "('demo_images/shamil-smile.jpg')\n", (290, 322), False, 'from pathlib import Path\n'), ((346, 377), 'pathlib.Path', 'Path', (['"""weights/yolov5n-face.pt"""'], {}), "('weights/yolov5n-face.pt')\n", (350, 377), False, 'from pathlib import Path\n'), ((408, 446), 'pathlib.Path', 'Path', (['"""demo_images/shamil-cropped.png"""'], {}), "('demo_images/shamil-cropped.png')\n", (412, 446), False, 'from pathlib import Path\n'), ((688, 723), 'src.emotion_predictor.face_detection.detect.extract_box', 'extract_box', (['self.image', 'self.model'], {}), '(self.image, self.model)\n', (699, 723), False, 'from src.emotion_predictor.face_detection.detect import extract_box\n'), ((833, 876), 'numpy.all', 'np.all', (['(self.reference_crop == cropped_face)'], {}), '(self.reference_crop == cropped_face)\n', (839, 876), True, 'import numpy as np\n')] |
import numpy as np
from math import sqrt
import time
import multiprocessing
import copy
from functools import partial
from datetime import date
import matplotlib.pyplot as plt
import matplotlib
from ROAI_class import ROAI, RANDOM, RR, WRR
# this function is used to generate the instance
def single_run_RR(instance, mean, std, k, sigma, delta, tol, pull_max, update_interval):
list_error = []
list_error_spec_tol = []
list_error_spec = []
alg_obj = RR(instance, mean, std, k, sigma, delta, tol)
alg_obj.initialization()
while alg_obj.t <= pull_max:
alg_obj.update()
if alg_obj.t % update_interval == 0:
error_at, error_spec_tol_at, error_spec_at = alg_obj.compute_error()
list_error.append(error_at)
list_error_spec_tol.append(error_spec_tol_at)
list_error_spec.append(error_spec_at)
return list_error, list_error_spec_tol, list_error_spec
def single_run_WRR(instance, mean, std, k, sigma, delta, tol, pull_max, update_interval):
list_error = []
list_error_spec_tol = []
list_error_spec = []
alg_obj = WRR(instance, mean, std, k, sigma, delta, tol)
alg_obj.initialization()
while alg_obj.t <= pull_max:
regular_arm = alg_obj.index_pull
alg_obj.update_regular()
if alg_obj.t % update_interval == 0:
error_at, error_spec_tol_at, error_spec_at = alg_obj.compute_error()
list_error.append(error_at)
list_error_spec_tol.append(error_spec_tol_at)
list_error_spec.append(error_spec_at)
while alg_obj.t <= pull_max and regular_arm in alg_obj.active_set and alg_obj.pulls[regular_arm] < alg_obj.threshold_pulls[regular_arm]:
alg_obj.update_additional(regular_arm)
if alg_obj.t % update_interval == 0:
error_at, error_spec_tol_at, error_spec_at = alg_obj.compute_error()
list_error.append(error_at)
list_error_spec_tol.append(error_spec_tol_at)
list_error_spec.append(error_spec_at)
return list_error, list_error_spec_tol, list_error_spec
def single_run_random(instance, n_select, mean, std, k, instance_type, sigma, delta, tol, pull_max, update_interval):
list_error = []
list_error_spec_tol = []
list_error_spec = []
# select all arms in the rondom sampling strategy
alg_obj = RANDOM(instance, n_select, mean, std, k, instance_type, sigma, delta, tol)
alg_obj.initialization()
while alg_obj.t <= pull_max:
alg_obj.update()
if alg_obj.t % update_interval == 0:
error_at, error_spec_tol, error_spec_at = alg_obj.compute_error()
list_error.append(error_at)
list_error_spec_tol.append(error_spec_tol)
list_error_spec.append(error_spec_at)
return list_error, list_error_spec_tol, list_error_spec
def single_run_ROAI_elimi(instance, n_select, mean, std, k, instance_type, sigma, delta, tol, pull_max, update_interval):
list_error = []
list_error_spec_tol = []
list_error_spec = []
alg_obj = ROAI(instance, n_select, mean, std, k, instance_type, sigma, delta, tol)
alg_obj.initialization_elimi()
while alg_obj.t <= pull_max:
alg_obj.update_elimi()
if alg_obj.t % update_interval == 0:
error_at, error_spec_tol, error_spec_at = alg_obj.compute_error()
list_error.append(error_at)
list_error_spec_tol.append(error_spec_tol)
list_error_spec.append(error_spec_at)
return list_error, list_error_spec_tol, list_error_spec
def single_run_ROAI_lucb(instance, n_select, mean, std, k, instance_type, sigma, delta, tol, pull_max, update_interval):
list_error = []
list_error_spec_tol = []
list_error_spec = []
alg_obj = ROAI(instance, n_select, mean, std, k, instance_type, sigma, delta, tol)
alg_obj.initialization_lucb()
while alg_obj.t <= pull_max:
alg_obj.update_lucb()
if alg_obj.t % update_interval == 0:
error_at, error_spec_tol, error_spec_at = alg_obj.compute_error()
list_error.append(error_at)
list_error_spec_tol.append(error_spec_tol)
list_error_spec.append(error_spec_at)
return list_error, list_error_spec_tol, list_error_spec
def find_threshold(instance, mean, std, k):
n = len(instance)
if n % 2 == 1:
start = int((n - 1) / 2)
end = int((n + 1) / 2)
else:
start = int((n - 2) / 2)
end = int((n + 2) / 2)
ranking = np.argsort(instance)
s_median = ranking[start:end]
median = sum(instance[i] for i in s_median) / len(s_median)
AD = np.zeros(n)
for i in range(n):
AD[i] = abs(instance[i] - median)
ranking_AD = np.argsort(AD)
s_median_AD = ranking_AD[start:end]
median_AD = sum(AD[i] for i in s_median_AD) / len(s_median_AD)
threshold_median = median + k * 1.4826 * median_AD
threshold_mean = np.mean(instance) + k * np.std(instance)
threshold_true = mean + k * std
return threshold_true, threshold_median, threshold_mean
def single_sim(n_select_list, mean, std, k_para, instance_type, sigma, delta, tol, pull_max, instance, update_interval):
np.random.seed()
list_error_spec_tol_multi = []
list_error_spec_multi = []
list_error_multi = []
for n_select in n_select_list:
list_error, list_error_spec_tol, list_error_spec = single_run_ROAI_lucb \
(instance, n_select, mean, std, k_para, instance_type, sigma, delta, tol, pull_max, update_interval)
list_error_multi.append(list_error)
list_error_spec_tol_multi.append(list_error_spec_tol)
list_error_spec_multi.append(list_error_spec)
print('finished ROAI_lucb')
for n_select in n_select_list:
list_error, list_error_spec_tol, list_error_spec = single_run_ROAI_elimi \
(instance, n_select, mean, std, k_para, instance_type, sigma, delta, tol, pull_max, update_interval)
list_error_multi.append(list_error)
list_error_spec_tol_multi.append(list_error_spec_tol)
list_error_spec_multi.append(list_error_spec)
print('finished ROAI_elimi')
for n_select in n_select_list:
list_error, list_error_spec_tol, list_error_spec = single_run_random \
(instance, n_select, mean, std, k_para, instance_type, sigma, delta, tol, pull_max, update_interval)
list_error_multi.append(list_error)
list_error_spec_tol_multi.append(list_error_spec_tol)
list_error_spec_multi.append(list_error_spec)
print('finished Random')
list_error, list_error_spec_tol, list_error_spec = single_run_WRR \
(instance, mean, std, k_para, sigma, delta, tol, pull_max, update_interval)
list_error_multi.append(list_error)
list_error_spec_tol_multi.append(list_error_spec_tol)
list_error_spec_multi.append(list_error_spec)
print('finished WRR')
list_error, list_error_spec_tol, list_error_spec = single_run_RR\
(instance, mean, std, k_para, sigma, delta, tol, pull_max, update_interval)
list_error_multi.append(list_error)
list_error_spec_tol_multi.append(list_error_spec_tol)
list_error_spec_multi.append(list_error_spec)
print('finished RR')
return list_error_multi, list_error_spec_tol_multi, list_error_spec_multi
def multi_sim(n_parallel, n_process, mean, std, k_para, instance_type, sigma, delta, tol,
pull_max, update_interval, n_select_list, instance):
time_start = time.time()
threshold_true, threshold_median, threshold_mean = find_threshold(instance, mean, std, k_para)
list_threshold = [threshold_true, threshold_median, threshold_mean]
minimum_true = min(abs(instance - threshold_true))
minimum_median = min(abs(instance - threshold_median))
minimum_mean = min(abs(instance - threshold_mean))
list_minimum_dist = [minimum_true, minimum_median, minimum_mean]
single_sim_partial = partial(single_sim, n_select_list, mean, std, k_para, instance_type,
sigma, delta, tol, pull_max, instance)
pool = multiprocessing.Pool(processes = n_process)
results = pool.map(single_sim_partial, list(map(int, update_interval * np.ones(n_parallel))))
print('multi_sim got results!')
# the order of the following sequences matters!!
measures = ['error', 'error_spec_tol', 'error_spec']
# error_spec calculate error with their own specific outlier threshold, which is the same as 'error' is this setting
# error_spec_tol calculate error with some allowed tolerance, the result is similar to 'error'
algs = ['ROAILUCB', 'ROAIElim', 'Random', 'WRR', 'RR']
dict_error_spec_tol = dict(zip(algs, [[] for alg in algs]))
dict_error_method_specific = dict(zip(algs, [[] for alg in algs]))
dict_error = dict(zip(algs, [[] for alg in algs]))
# orders need to match the previous one!
dict_results = dict(zip(measures, [dict_error_spec_tol, dict_error_method_specific, dict_error]))
dict_results_ave = copy.deepcopy(dict_results)
dict_results_std = copy.deepcopy(dict_results)
for i in range(n_parallel):
for j in range(len(measures)):
for k in range(len(algs)):
dict_results[measures[j]][algs[k]].append(results[i][j][k])
for measure in measures:
for alg in algs:
dict_results_ave[measure][alg] = np.mean(dict_results[measure][alg], axis=0)
dict_results_std[measure][alg] = np.std(dict_results[measure][alg], axis=0)
print('---- final average results ----')
print(dict_results_ave)
time_end = time.time()
print('total time spent', time_end - time_start)
# plot figures
matplotlib.rcParams.update({'font.size': 12})
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
std_adjust = 2/sqrt(500)
x = list(range(update_interval, pull_max + 1, update_interval))
fig = plt.figure(1)
for i in reversed(range(len(algs))):
alg_ave = np.array(dict_results_ave[measures[0]][algs[i]])
alg_std = std_adjust * np.array(dict_results_std[measures[0]][algs[i]])
plt.errorbar(x, alg_ave, yerr=alg_std, label=algs[i], linewidth=2, errorevery=2)
plt.xlabel('Number of pulls')
plt.ylabel('Error rate')
plt.grid(alpha=0.75)
plt.legend(loc='upper right')
plt.xlim(0, pull_max)
plt.ylim(0, 1)
plt.savefig('realdata_anytime.pdf')
plt.show()
plt.close(fig)
# save data in the following file
file = open('Real_data.txt', 'w')
file.write('{} - {}sigma - {}max - {}interval - {}n_parallel - {}mean - {}std -{}instance_type\n'.\
format(date.today(), sigma, pull_max, update_interval, n_parallel, mean, std, instance_type))
file.write('approximate threshold (from one realization) are as followings:\n')
file.write('threshold_true, threshold_median, threshold_mean \n')
file.write('threshold: {}\n'.format(list_threshold))
file.write('minimum distance: {}\n'.format(list_minimum_dist))
file.write('k={} - delta={}\n'.format(k_para, delta))
file.write('total time spent = {}\n'.format(time_end - time_start))
file.write('measures: {}'.format(measures))
file.write('algs: {}\n'.format(algs))
for measure in measures:
for alg in algs:
file.write('measure:{}, alg:{}, ave:\n'.format(measure, alg))
file.write('{}\n'.format(dict_results_ave[measure][alg]))
for measure in measures:
for alg in algs:
file.write('measure:{}, alg:{}, std:\n'.format(measure, alg))
file.write('{}\n'.format(dict_results_std[measure][alg]))
def for_multi_sim(y_normal, y_outlier):
# std is to control the scale of the means of arms
# while sigma is the subgaussian parameter for each arm/distribution if instance type is subgaussian
instance = np.hstack((y_outlier, y_normal))
instance_type = 'bernoulli'
# WRR and RR only works for bounded distribution
n_parallel = 500
n_process = 100
mean = 0.5
std = 0.1
# mean and std are not used anymore as we are working with real data
# the only requirement is that 0.5 + k * 0.1 \in [0.57, 0.84] (approximately)
# so such the true_threshold constructed could correctly identify the subset of outliers
k = 3
sigma = 0.1
# std of each arm if instance_type is normal, not used here due to Bernoulli
delta = 0.05
# confidence parameter
tol = 0.5 * std
# tolerance given for identification target, we output result both with and without tolerance
pull_max = 10000
update_interval = 200
n_total = len(instance)
n_select_list = [n_total]
# we test with all arms selected for the construction of the outlier threshold
multi_sim(n_parallel, n_process, mean, std, k, instance_type, sigma, delta, tol,
pull_max, update_interval, n_select_list, instance)
if __name__ == '__main__':
# First, get dataset `wine.mat` from http://odds.cs.stonybrook.edu/wine-dataset/. Next, preprocess the dataset based on the experiment description (remove 6 arms close to the threshold). Then, input the preprocessed means of normal and outlier arms into `y_normal` and `y_outlier`.
y_normal = np.array([])
y_outlier = np.array([])
if len(y_normal) == 0 or len(y_outlier) == 0:
raise Exception('input the preprocessed means of normal and outlier arms')
instance = np.hstack((y_outlier, y_normal))
for_multi_sim(y_normal, y_outlier)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"math.sqrt",
"ROAI_class.ROAI",
"numpy.argsort",
"numpy.array",
"copy.deepcopy",
"matplotlib.pyplot.errorbar",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.random.seed",
"matplotlib.pyplot.... | [((472, 517), 'ROAI_class.RR', 'RR', (['instance', 'mean', 'std', 'k', 'sigma', 'delta', 'tol'], {}), '(instance, mean, std, k, sigma, delta, tol)\n', (474, 517), False, 'from ROAI_class import ROAI, RANDOM, RR, WRR\n'), ((1124, 1170), 'ROAI_class.WRR', 'WRR', (['instance', 'mean', 'std', 'k', 'sigma', 'delta', 'tol'], {}), '(instance, mean, std, k, sigma, delta, tol)\n', (1127, 1170), False, 'from ROAI_class import ROAI, RANDOM, RR, WRR\n'), ((2401, 2475), 'ROAI_class.RANDOM', 'RANDOM', (['instance', 'n_select', 'mean', 'std', 'k', 'instance_type', 'sigma', 'delta', 'tol'], {}), '(instance, n_select, mean, std, k, instance_type, sigma, delta, tol)\n', (2407, 2475), False, 'from ROAI_class import ROAI, RANDOM, RR, WRR\n'), ((3108, 3180), 'ROAI_class.ROAI', 'ROAI', (['instance', 'n_select', 'mean', 'std', 'k', 'instance_type', 'sigma', 'delta', 'tol'], {}), '(instance, n_select, mean, std, k, instance_type, sigma, delta, tol)\n', (3112, 3180), False, 'from ROAI_class import ROAI, RANDOM, RR, WRR\n'), ((3823, 3895), 'ROAI_class.ROAI', 'ROAI', (['instance', 'n_select', 'mean', 'std', 'k', 'instance_type', 'sigma', 'delta', 'tol'], {}), '(instance, n_select, mean, std, k, instance_type, sigma, delta, tol)\n', (3827, 3895), False, 'from ROAI_class import ROAI, RANDOM, RR, WRR\n'), ((4565, 4585), 'numpy.argsort', 'np.argsort', (['instance'], {}), '(instance)\n', (4575, 4585), True, 'import numpy as np\n'), ((4693, 4704), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (4701, 4704), True, 'import numpy as np\n'), ((4787, 4801), 'numpy.argsort', 'np.argsort', (['AD'], {}), '(AD)\n', (4797, 4801), True, 'import numpy as np\n'), ((5252, 5268), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (5266, 5268), True, 'import numpy as np\n'), ((7554, 7565), 'time.time', 'time.time', ([], {}), '()\n', (7563, 7565), False, 'import time\n'), ((8006, 8117), 'functools.partial', 'partial', (['single_sim', 'n_select_list', 'mean', 'std', 'k_para', 'instance_type', 'sigma', 'delta', 'tol', 'pull_max', 'instance'], {}), '(single_sim, n_select_list, mean, std, k_para, instance_type, sigma,\n delta, tol, pull_max, instance)\n', (8013, 8117), False, 'from functools import partial\n'), ((8159, 8200), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'n_process'}), '(processes=n_process)\n', (8179, 8200), False, 'import multiprocessing\n'), ((9095, 9122), 'copy.deepcopy', 'copy.deepcopy', (['dict_results'], {}), '(dict_results)\n', (9108, 9122), False, 'import copy\n'), ((9146, 9173), 'copy.deepcopy', 'copy.deepcopy', (['dict_results'], {}), '(dict_results)\n', (9159, 9173), False, 'import copy\n'), ((9685, 9696), 'time.time', 'time.time', ([], {}), '()\n', (9694, 9696), False, 'import time\n'), ((9774, 9819), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 12}"], {}), "({'font.size': 12})\n", (9800, 9819), False, 'import matplotlib\n'), ((10018, 10031), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (10028, 10031), True, 'import matplotlib.pyplot as plt\n'), ((10315, 10344), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of pulls"""'], {}), "('Number of pulls')\n", (10325, 10344), True, 'import matplotlib.pyplot as plt\n'), ((10349, 10373), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error rate"""'], {}), "('Error rate')\n", (10359, 10373), True, 'import matplotlib.pyplot as plt\n'), ((10378, 10398), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.75)'}), '(alpha=0.75)\n', (10386, 10398), True, 'import matplotlib.pyplot as plt\n'), ((10403, 10432), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (10413, 10432), True, 'import matplotlib.pyplot as plt\n'), ((10437, 10458), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'pull_max'], {}), '(0, pull_max)\n', (10445, 10458), True, 'import matplotlib.pyplot as plt\n'), ((10463, 10477), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (10471, 10477), True, 'import matplotlib.pyplot as plt\n'), ((10482, 10517), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""realdata_anytime.pdf"""'], {}), "('realdata_anytime.pdf')\n", (10493, 10517), True, 'import matplotlib.pyplot as plt\n'), ((10522, 10532), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10530, 10532), True, 'import matplotlib.pyplot as plt\n'), ((10537, 10551), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10546, 10551), True, 'import matplotlib.pyplot as plt\n'), ((11961, 11993), 'numpy.hstack', 'np.hstack', (['(y_outlier, y_normal)'], {}), '((y_outlier, y_normal))\n', (11970, 11993), True, 'import numpy as np\n'), ((13343, 13355), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13351, 13355), True, 'import numpy as np\n'), ((13373, 13385), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13381, 13385), True, 'import numpy as np\n'), ((13536, 13568), 'numpy.hstack', 'np.hstack', (['(y_outlier, y_normal)'], {}), '((y_outlier, y_normal))\n', (13545, 13568), True, 'import numpy as np\n'), ((4985, 5002), 'numpy.mean', 'np.mean', (['instance'], {}), '(instance)\n', (4992, 5002), True, 'import numpy as np\n'), ((9928, 9937), 'math.sqrt', 'sqrt', (['(500)'], {}), '(500)\n', (9932, 9937), False, 'from math import sqrt\n'), ((10092, 10140), 'numpy.array', 'np.array', (['dict_results_ave[measures[0]][algs[i]]'], {}), '(dict_results_ave[measures[0]][algs[i]])\n', (10100, 10140), True, 'import numpy as np\n'), ((10229, 10314), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'alg_ave'], {'yerr': 'alg_std', 'label': 'algs[i]', 'linewidth': '(2)', 'errorevery': '(2)'}), '(x, alg_ave, yerr=alg_std, label=algs[i], linewidth=2, errorevery=2\n )\n', (10241, 10314), True, 'import matplotlib.pyplot as plt\n'), ((5009, 5025), 'numpy.std', 'np.std', (['instance'], {}), '(instance)\n', (5015, 5025), True, 'import numpy as np\n'), ((9463, 9506), 'numpy.mean', 'np.mean', (['dict_results[measure][alg]'], {'axis': '(0)'}), '(dict_results[measure][alg], axis=0)\n', (9470, 9506), True, 'import numpy as np\n'), ((9552, 9594), 'numpy.std', 'np.std', (['dict_results[measure][alg]'], {'axis': '(0)'}), '(dict_results[measure][alg], axis=0)\n', (9558, 9594), True, 'import numpy as np\n'), ((10172, 10220), 'numpy.array', 'np.array', (['dict_results_std[measures[0]][algs[i]]'], {}), '(dict_results_std[measures[0]][algs[i]])\n', (10180, 10220), True, 'import numpy as np\n'), ((10758, 10770), 'datetime.date.today', 'date.today', ([], {}), '()\n', (10768, 10770), False, 'from datetime import date\n'), ((8278, 8297), 'numpy.ones', 'np.ones', (['n_parallel'], {}), '(n_parallel)\n', (8285, 8297), True, 'import numpy as np\n')] |
import numpy as np
from ..optics import Wavefront, AgnosticOpticalElement, make_agnostic_forward, make_agnostic_backward
from ..field import Field, evaluate_supersampled
from ..fourier import FastFourierTransform, make_fft_grid, FourierFilter
class FresnelPropagator(AgnosticOpticalElement):
'''The monochromatic Fresnel propagator for scalar fields.
The Fresnel propagator is implemented as described in [Goodman2005]_.
.. [Goodman2005] <NAME>., 2005 Introduction to Fourier optics. Roberts and Company Publishers.
Parameters
----------
input_grid : anything
This argument is ignored. The input grid is taken from the incoming wavefront.
distance : scalar
The distance to propagate
num_oversampling : int
The number of times the transfer function is oversampled. Default is 2.
wavelength : scalar
The wavelength of the wavefront.
refractive_index : scalar
The refractive index of the medium that the wavefront is propagating in.
Raises
------
ValueError
If the `input_grid` is not regular and Cartesian.
'''
def __init__(self, input_grid, distance, num_oversampling=2, refractive_index=1):
self._distance = distance
self._num_oversampling = num_oversampling
self._refractive_index = refractive_index
AgnosticOpticalElement.__init__(self, grid_dependent=True, wavelength_dependent=True)
def make_instance(self, instance_data, input_grid, output_grid, wavelength):
if not input_grid.is_regular or not input_grid.is_('cartesian'):
raise ValueError('The input grid must be a regular, Cartesian grid.')
k = 2 * np.pi / wavelength * self.evaluate_parameter(self.refractive_index, input_grid, output_grid, wavelength)
L_max = np.max(input_grid.dims * input_grid.delta)
if np.any(input_grid.delta < wavelength * self.distance / L_max):
def transfer_function(fourier_grid):
enlarged_grid = make_fft_grid(fourier_grid)
fft_upscale = FastFourierTransform(enlarged_grid)
def impulse_response(grid):
r_squared = grid.x**2 + grid.y**2
return Field(np.exp(1j * k * self.distance) / (1j * wavelength * self.distance) * np.exp(1j * k * r_squared / (2 * self.distance)), grid)
impulse_response = evaluate_supersampled(impulse_response, enlarged_grid, self.num_oversampling)
return fft_upscale.forward(impulse_response)
else:
def transfer_function_native(fourier_grid):
k_squared = fourier_grid.as_('polar').r**2
phase_factor = np.exp(1j * k * self.distance)
return Field(np.exp(-0.5j * self.distance * k_squared / k) * phase_factor, fourier_grid)
def transfer_function(fourier_grid):
return evaluate_supersampled(transfer_function_native, fourier_grid, self.num_oversampling)
instance_data.fourier_filter = FourierFilter(input_grid, transfer_function, q=2)
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, distance):
self._distance = distance
self.clear_cache()
@property
def num_oversampling(self):
return self._num_oversampling
@num_oversampling.setter
def num_oversampling(self, num_oversampling):
self._num_oversampling = num_oversampling
self.clear_cache()
@property
def refractive_index(self):
return self._refractive_index
@refractive_index.setter
def refractive_index(self, refractive_index):
self._refractive_index = refractive_index
self.clear_cache()
def get_input_grid(self, output_grid, wavelength):
return output_grid
def get_output_grid(self, input_grid, wavelength):
return input_grid
@make_agnostic_forward
def forward(self, instance_data, wavefront):
'''Propagate a wavefront forward by a certain distance.
Parameters
----------
wavefront : Wavefront
The incoming wavefront.
Returns
-------
Wavefront
The wavefront after the propagation.
'''
filtered = instance_data.fourier_filter.forward(wavefront.electric_field)
return Wavefront(filtered, wavefront.wavelength, wavefront.input_stokes_vector)
@make_agnostic_backward
def backward(self, instance_data, wavefront):
'''Propagate a wavefront backward by a certain distance.
Parameters
----------
wavefront : Wavefront
The incoming wavefront.
Returns
-------
Wavefront
The wavefront after the propagation.
'''
filtered = instance_data.fourier_filter.backward(wavefront.electric_field)
return Wavefront(filtered, wavefront.wavelength, wavefront.input_stokes_vector)
| [
"numpy.exp",
"numpy.any",
"numpy.max"
] | [((1675, 1717), 'numpy.max', 'np.max', (['(input_grid.dims * input_grid.delta)'], {}), '(input_grid.dims * input_grid.delta)\n', (1681, 1717), True, 'import numpy as np\n'), ((1724, 1785), 'numpy.any', 'np.any', (['(input_grid.delta < wavelength * self.distance / L_max)'], {}), '(input_grid.delta < wavelength * self.distance / L_max)\n', (1730, 1785), True, 'import numpy as np\n'), ((2417, 2449), 'numpy.exp', 'np.exp', (['(1.0j * k * self.distance)'], {}), '(1.0j * k * self.distance)\n', (2423, 2449), True, 'import numpy as np\n'), ((2466, 2511), 'numpy.exp', 'np.exp', (['(-0.5j * self.distance * k_squared / k)'], {}), '(-0.5j * self.distance * k_squared / k)\n', (2472, 2511), True, 'import numpy as np\n'), ((2088, 2138), 'numpy.exp', 'np.exp', (['(1.0j * k * r_squared / (2 * self.distance))'], {}), '(1.0j * k * r_squared / (2 * self.distance))\n', (2094, 2138), True, 'import numpy as np\n'), ((2019, 2051), 'numpy.exp', 'np.exp', (['(1.0j * k * self.distance)'], {}), '(1.0j * k * self.distance)\n', (2025, 2051), True, 'import numpy as np\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from tensorflow_addons.layers import Sparsemax
from tensorflow_addons.utils import test_utils
test_obs = 17
def _np_sparsemax(z):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
@parameterized.parameters([np.float32, np.float64])
@test_utils.run_all_in_graph_and_eager_modes
class SparsemaxTest(tf.test.TestCase):
def test_sparsemax_layer_against_numpy(self, dtype):
"""check sparsemax kernel against numpy."""
random = np.random.RandomState(1)
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
test_utils.layer_test(
Sparsemax,
kwargs={'dtype': dtype},
input_data=z,
expected_output=_np_sparsemax(z).astype(dtype))
if __name__ == '__main__':
tf.test.main()
| [
"numpy.mean",
"numpy.sort",
"absl.testing.parameterized.parameters",
"numpy.argmax",
"tensorflow.test.main",
"numpy.random.RandomState",
"numpy.cumsum",
"numpy.maximum",
"numpy.arange"
] | [((1778, 1828), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[np.float32, np.float64]'], {}), '([np.float32, np.float64])\n', (1802, 1828), False, 'from absl.testing import parameterized\n'), ((1156, 1183), 'numpy.cumsum', 'np.cumsum', (['z_sorted'], {'axis': '(1)'}), '(z_sorted, axis=1)\n', (1165, 1183), True, 'import numpy as np\n'), ((1192, 1220), 'numpy.arange', 'np.arange', (['(1)', '(z.shape[1] + 1)'], {}), '(1, z.shape[1] + 1)\n', (1201, 1220), True, 'import numpy as np\n'), ((1750, 1774), 'numpy.maximum', 'np.maximum', (['(0)', '(z - tau_z)'], {}), '(0, z - tau_z)\n', (1760, 1774), True, 'import numpy as np\n'), ((2354, 2368), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (2366, 2368), True, 'import tensorflow as tf\n'), ((1091, 1109), 'numpy.sort', 'np.sort', (['z'], {'axis': '(1)'}), '(z, axis=1)\n', (1098, 1109), True, 'import numpy as np\n'), ((1553, 1588), 'numpy.argmax', 'np.argmax', (['z_check[:, ::-1]'], {'axis': '(1)'}), '(z_check[:, ::-1], axis=1)\n', (1562, 1588), True, 'import numpy as np\n'), ((2039, 2063), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (2060, 2063), True, 'import numpy as np\n'), ((1028, 1046), 'numpy.mean', 'np.mean', (['z'], {'axis': '(1)'}), '(z, axis=1)\n', (1035, 1046), True, 'import numpy as np\n'), ((1636, 1660), 'numpy.arange', 'np.arange', (['(0)', 'z.shape[0]'], {}), '(0, z.shape[0])\n', (1645, 1660), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Tools for calculating Alfvenic turbulence quantities derived from Elasser variables.
Bibliography
------------
[1] <NAME>., & <NAME>. (2013). The solar wind as a turbulence laboratory.
Living Reviews in Solar Physics, 10(1), 1–208. https://doi.org/10.12942/lrsp-2013-2
[2] <NAME>., & <NAME>. (2016). Linking fluid and kinetic scales in solar
wind turbulence. Monthly Notices of the Royal Astronomical Society: Letters,
463(1), L79–L83. https://doi.org/10.1093/mnrasl/slw135
[3] <NAME>., <NAME>., <NAME>. & <NAME>. The Role of Proton-
Cyclotron Resonance as a Dissipation Mechanism in Solar Wind Turbulence: A
Statistical Study at Ion-Kinetic Scales. Astrophys. J. 856, 49 (2018).
"""
import pdb # noqa: F401
import numpy as np
import pandas as pd
from collections import namedtuple
# We rely on views via DataFrame.xs to reduce memory size and do not
# `.copy(deep=True)`, so we want to make sure that this doesn't
# accidentally cause a problem.
pd.set_option("mode.chained_assignment", "raise")
try:
from . import base
except ImportError:
import base
AlvenicTurbAveraging = namedtuple("AlvenicTurbAveraging", "window,min_periods")
class AlfvenicTurbulence(base.Core):
r"""Handle and calculate Alfvenic turbulence quantities using the Elsasser
variables following Section B.3.1 in [1].
<NAME> <https://orcid.org/0000-0003-2845-4250> helped me define these
calculations at the 2018 AGU Fall Meeting and understand [1]. Please cite [3] if
using this module.
Parameters
----------
velocity : pd.DataFrame, pd.Series (?)
The velocity vector in the same basis as `bfield`.
Can be a single species, a CoM species, or a differential flow. The
differential flow case is an area of curiosity for me and I do not
suggest passing it as an input.
Expect [v] = km/s (i.e. default stored in `units_constants.Units`).
bfield : pd.DataFrame, pd.Series (?)
Magnetic field vector in the same basis as `velocity`.
Expect [b] = nT (i.e. default stored in `units_contants.Units`).
rho : pd.Series
The total mass density of the plasma used to define velocity.
Expect [rho] = m_p / cm^3 (i.e. default stored in
`units_constants.Units`).
species: str
The species string. Can contain `+`. Can contain at most one `,`.
Properties
----------
data, velocity, v, bfield, b, species, z_plus, zp, z_minus, zm, e_plus, ep,
e_minus, em, kinetic_energy, ev, magnetic_energy, eb, total_energy, etot,
residual_energy, eres, normalized_residual_energy, eres_norm, sigma_r,
cross_helicity, normalized_cross_helicity, sigma_c, alfven_ratio, rA,
elsasser_ratio, rE
Methods
-------
set_data
Notes
-----
"""
def __init__(self, velocity, bfield, rho, species, **kwargs):
r"""Initialize an :py:class:`AlfvenicTurbulence` object.
Parameters
----------
velocity: pd.DataFrame
Vector velocity measurments.
bfield: pd.DataFrame
Vector mangetic field measurements.
rho: pd.Series
Mass density measurments, used to put `bfield` into Alfven units.
kwargs:
Passed to `rolling` method when mean-subtracing in `set_data`.
"""
# print("<Module>",
# "__init__",
# sep="\n",
# end="\n")
super(AlfvenicTurbulence, self).__init__()
self.set_data(velocity, bfield, rho, species, **kwargs)
@property
def data(self):
r"""Mean-subtracted quantities used to calculated Elsasser variables.
"""
return self._data
@property
def averaging_info(self):
r"""Averaging window and minimum number of measurements / average used
in calculating background component in :math:`\delta B` and :math:`\delta v`.
"""
return self._averaging_info
@property
def measurements(self):
r"""Measurements used to calcualte mean-subtracted `data`.
"""
return self._measurements
@property
def velocity(self):
r"""Velocity in Plasma's v-units.
"""
return self.data.loc[:, "v"]
@property
def v(self):
r"""Shortcut for `AlfvenicTurbulence.velocity`
"""
return self.velocity
@property
def bfield(self):
r"""Magnetic field in Alfven units, where velocity is stored in Plasma's
v-units.
"""
return self.data.loc[:, "b"]
@property
def b(self):
r"""Shortcut for `AlfvenicTurbulence.bfield`.
"""
return self.bfield
@property
def species(self):
r"""Species used to create `AlfvenicTurbulence`. Defines mass density in Alfven
units.
"""
return self._species
@property
def z_plus(self):
r"""Z+ Elsasser variable.
"""
zp = self.v.add(self.b, axis=1)
return zp
@property
def zp(self):
r"""Shortcut for `AlfvenicTurbulence.z_plus`.
"""
return self.z_plus
@property
def z_minus(self):
r"""Z- Elsasser variable.
"""
zm = self.v.subtract(self.b, axis=1)
return zm
@property
def zm(self):
r"""Shortcut for `AlfvenicTurbulence.z_minus`.
"""
return self.z_minus
@property
def e_plus(self):
# I took the averages before I created the +/-z quantities in my
# previous test cases. Based on a more detailed read of Bruno and
# Carbone, I calculate +/-z before I take averages. Note that because
# I am adding v and b, the differene shouldn't matter.
ep = 0.5 * self.zp.pow(2).sum(axis=1)
return ep
@property
def ep(self):
return self.e_plus
@property
def e_minus(self):
em = 0.5 * self.zm.pow(2).sum(axis=1)
return em
@property
def em(self):
return self.e_minus
@property
def kinetic_energy(self):
ev = 0.5 * self.v.pow(2).sum(axis=1)
return ev
@property
def ev(self):
return self.kinetic_energy
@property
def magnetic_energy(self):
eb = 0.5 * self.b.pow(2).sum(axis=1)
return eb
@property
def eb(self):
return self.magnetic_energy
@property
def total_energy(self):
return self.ev.add(self.eb, axis=0)
@property
def etot(self):
return self.total_energy
@property
def residual_energy(self):
return self.ev.subtract(self.eb, axis=0)
@property
def eres(self):
return self.residual_energy
@property
def normalized_residual_energy(self):
return self.eres.divide(self.etot, axis=0)
@property
def eres_norm(self):
return self.normalized_residual_energy
@property
def sigma_r(self):
return self.normalized_residual_energy
@property
def cross_helicity(self):
v = self.v
b = self.b
c = 0.5 * v.multiply(b).sum(axis=1)
return c
@property
def normalized_cross_helicity(self):
ep = self.ep
em = self.em
num = ep.subtract(em)
den = ep.add(em)
out = num.divide(den)
return out
@property
def sigma_c(self):
return self.normalized_cross_helicity
@property
def alfven_ratio(self):
return self.ev.divide(self.eb, axis=0)
@property
def rA(self):
return self.alfven_ratio
@property
def elsasser_ratio(self):
return self.em.divide(self.ep, axis=0)
@property
def rE(self):
return self.elsasser_ratio
def set_data(self, v_in, b_in, rho, species, **kwargs):
r"""The `auto_reindex` kwarg can be set to False so that, if running a
large batch of analysis on the same data, one can reindex once outside
of this class and avoid many unnecessary reindexing cases within it.
Be sure to carefully check your reindexing so as to not introduce lots
of NaNs. I ran into that bug when first writing this class.
"""
species = self._clean_species_for_setting(species)
if not isinstance(v_in.index, pd.DatetimeIndex):
raise TypeError
if not isinstance(b_in.index, pd.DatetimeIndex):
raise TypeError
if not isinstance(rho.index, pd.DatetimeIndex):
raise TypeError
if not v_in.index.equals(b_in.index):
self.logger.warn("v and b have unequal indices. Results may be unexpected.")
if not v_in.index.equals(rho.index):
self.logger.warn(
"""v and rho have unequal indices. Results may be
unexpected."""
)
# auto_reindex = bool(auto_reindex)
# assert rho.ndim == 1
# Based on my read of Bruno and Carbone's definition in B.3.1 (p.166),
# we first define the magnetic field in Alfven units. Then we calculate
# averages. Note that I took the other option in my test cases in
# `TS-analysis` project. (20181120)
coef = self.units.b / ( # Convert b -> Alfven units.
np.sqrt(self.units.rho * self.constants.misc.mu0) * self.units.v
)
b_ca_units = b_in.divide(rho.pipe(np.sqrt), axis=0).multiply(coef)
# if auto_reindex:
# idx = v_in.index.union(b_in.index)
# i0 = idx.min()
# i1 = idx.max() + 1 # `stop` excludes `i1`, so use `i1 + 1`.
# idx = pd.RangeIndex(start=i0, stop=i1, step=1)
#
# v = v_in.reindex(idx, axis=0)
# b = b_ca_units.reindex(idx, axis=0)
#
# else:
# v = v_in
# b = b_ca_units
# print("<set_data>",
# "<species>: %s" % species,
# "<v_in>", type(v_in), v_in,
# "<v>", type(v), v,
# "<rho>", type(rho), rho,
# "<b_in>", type(b_in), b_in,
# "<coef>: %s" % coef,
# "<b>", type(b), b,
# sep="\n",
# end="\n\n")
window = kwargs.pop("window", "15min")
min_periods = kwargs.pop("min_periods", 5)
data = pd.concat(
{"v": v_in, "b": b_ca_units}, axis=1, names=["M"], sort=True
).sort_index(axis=1)
rolled = data.rolling(window, min_periods=min_periods, **kwargs)
agged = rolled.agg("mean")
deltas = data.subtract(agged, axis=1)
data.name = "measurements"
deltas.name = "deltas"
self._measurements = data
self._data = deltas
self._species = species
self._averaging_info = AlvenicTurbAveraging(window, min_periods)
def _clean_species_for_setting(self, species):
if not isinstance(species, str):
msg = "%s.species must be a single species w/ an optional `+` or `,`"
raise TypeError(msg % self.__class__.__name__)
if species.count(",") > 1:
msg = "%s.species can contain at most one `,`\nspecies: %s"
raise ValueError(msg % (self.__class__.__name__, species))
species = ",".join(
["+".join(tuple(sorted(s.split("+")))) for s in species.split(",")]
)
return species
| [
"collections.namedtuple",
"numpy.sqrt",
"pandas.concat",
"pandas.set_option"
] | [((998, 1047), 'pandas.set_option', 'pd.set_option', (['"""mode.chained_assignment"""', '"""raise"""'], {}), "('mode.chained_assignment', 'raise')\n", (1011, 1047), True, 'import pandas as pd\n'), ((1137, 1193), 'collections.namedtuple', 'namedtuple', (['"""AlvenicTurbAveraging"""', '"""window,min_periods"""'], {}), "('AlvenicTurbAveraging', 'window,min_periods')\n", (1147, 1193), False, 'from collections import namedtuple\n'), ((9273, 9322), 'numpy.sqrt', 'np.sqrt', (['(self.units.rho * self.constants.misc.mu0)'], {}), '(self.units.rho * self.constants.misc.mu0)\n', (9280, 9322), True, 'import numpy as np\n'), ((10476, 10547), 'pandas.concat', 'pd.concat', (["{'v': v_in, 'b': b_ca_units}"], {'axis': '(1)', 'names': "['M']", 'sort': '(True)'}), "({'v': v_in, 'b': b_ca_units}, axis=1, names=['M'], sort=True)\n", (10485, 10547), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
QISKit visualization library.
"""
import numpy as np
from qiskit.tools.visualization import VisualizationError
from ._iplot_blochsphere import iplot_blochsphere
from ._iplot_cities import iplot_cities
from ._iplot_hinton import iplot_hinton
from ._iplot_paulivec import iplot_paulivec
from ._iplot_qsphere import iplot_qsphere
def iplot_state(quantum_state, method='city', options=None):
"""Plot the quantum state.
Args:
quantum_state (ndarray): statevector or density matrix
representation of a quantum state.
method (str): Plotting method to use.
options (dict): Plotting settings.
Raises:
VisualizationError: if the input is not a statevector or density
matrix, or if the state is not an multi-qubit quantum state.
"""
# Check if input is a statevector, and convert to density matrix
rho = np.array(quantum_state)
if rho.ndim == 1:
rho = np.outer(rho, np.conj(rho))
# Check the shape of the input is a square matrix
shape = np.shape(rho)
if len(shape) != 2 or shape[0] != shape[1]:
raise VisualizationError("Input is not a valid quantum state.")
# Check state is an n-qubit state
num = int(np.log2(len(rho)))
if 2 ** num != len(rho):
raise VisualizationError("Input is not a multi-qubit quantum state.")
if method == "city":
iplot_cities(rho, options)
elif method == "paulivec":
iplot_paulivec(rho, options)
elif method == "qsphere":
iplot_qsphere(rho, options)
elif method == "bloch":
iplot_blochsphere(rho, options)
elif method == "hinton":
iplot_hinton(rho, options)
else:
print("Unknown method '" + method + "'.")
| [
"numpy.conj",
"numpy.array",
"numpy.shape",
"qiskit.tools.visualization.VisualizationError"
] | [((1095, 1118), 'numpy.array', 'np.array', (['quantum_state'], {}), '(quantum_state)\n', (1103, 1118), True, 'import numpy as np\n'), ((1249, 1262), 'numpy.shape', 'np.shape', (['rho'], {}), '(rho)\n', (1257, 1262), True, 'import numpy as np\n'), ((1325, 1382), 'qiskit.tools.visualization.VisualizationError', 'VisualizationError', (['"""Input is not a valid quantum state."""'], {}), "('Input is not a valid quantum state.')\n", (1343, 1382), False, 'from qiskit.tools.visualization import VisualizationError\n'), ((1497, 1560), 'qiskit.tools.visualization.VisualizationError', 'VisualizationError', (['"""Input is not a multi-qubit quantum state."""'], {}), "('Input is not a multi-qubit quantum state.')\n", (1515, 1560), False, 'from qiskit.tools.visualization import VisualizationError\n'), ((1169, 1181), 'numpy.conj', 'np.conj', (['rho'], {}), '(rho)\n', (1176, 1181), True, 'import numpy as np\n')] |
"""
Tools for running multiple environments at once.
This code is influenced by openai/baselines and
unixpickle/anyrl-py, but it was rewritten specifically for
this contest.
One feature of this code is that it automatically deals
with environments that hang due to bugs. When this occurs,
the environment is killed and restarted automatically.
"""
from abc import ABC, abstractmethod, abstractproperty
from multiprocessing import Process, Queue
import os
from queue import Empty
import sys
import cloudpickle
import numpy as np
class BatchedEnv(ABC):
def __init__(self, action_space, obs_space):
self.action_space = action_space
self.observation_space = obs_space
@abstractproperty
def num_envs(self):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def step(self, actions):
pass
def close(self):
pass
class BatchedGymEnv(BatchedEnv):
def __init__(self, action_space, obs_space, env_fns):
super().__init__(action_space, obs_space)
self._procs = []
self._command_queues = []
self._result_queues = []
self._env_fns = env_fns
for env_fn in env_fns:
cmd_queue = Queue()
res_queue = Queue()
proc = Process(target=self._worker,
args=(cmd_queue, res_queue, cloudpickle.dumps(env_fn)))
proc.start()
self._procs.append(proc)
self._command_queues.append(cmd_queue)
self._result_queues.append(res_queue)
for q in self._result_queues:
self._queue_get(q)
@property
def num_envs(self):
return len(self._procs)
def reset(self):
for q in self._command_queues:
q.put(('reset', None))
return np.array([self._queue_get(q) for q in self._result_queues])
def step(self, actions):
for q, action in zip(self._command_queues, actions):
q.put(('step', action))
obses = []
rews = []
dones = []
infos = []
for i, q in enumerate(self._result_queues.copy()):
try:
obs, rew, done, info = self._queue_get(q)
except Empty:
sys.stderr.write('restarting worker %d due to hang.\n' % i)
self._restart_worker(i)
q = self._result_queues[i]
self._command_queues[i].put(('reset', None))
self._queue_get(q)
self._command_queues[i].put(('step', actions[i]))
obs, rew, done, info = self._queue_get(q)
done = True
obses.append(obs)
rews.append(rew)
dones.append(done)
infos.append(info)
return np.array(obses), np.array(rews), np.array(dones), infos
def close(self):
for q in self._command_queues:
q.put(('close', None))
for proc in self._procs:
proc.join()
def _restart_worker(self, idx):
os.system('kill -9 $(ps -o pid= --ppid %d)' % self._procs[idx].pid)
self._procs[idx].terminate()
self._procs[idx].join()
cmd_queue = Queue()
res_queue = Queue()
proc = Process(target=self._worker,
args=(cmd_queue, res_queue, cloudpickle.dumps(self._env_fns[idx]),))
proc.start()
self._procs[idx] = proc
self._command_queues[idx] = cmd_queue
self._result_queues[idx] = res_queue
self._queue_get(res_queue)
@staticmethod
def _worker(cmd_queue, res_queue, env_str):
try:
env = cloudpickle.loads(env_str)()
res_queue.put((None, None))
try:
while True:
cmd, arg = cmd_queue.get()
if cmd == 'reset':
res_queue.put((env.reset(), None))
elif cmd == 'step':
obs, rew, done, info = env.step(arg)
if done:
obs = env.reset()
res_queue.put(((obs, rew, done, info), None))
elif cmd == 'close':
return
finally:
env.close()
except Exception as exc:
res_queue.put((None, exc))
@staticmethod
def _queue_get(queue):
value, exc = queue.get(timeout=20)
if exc is not None:
raise exc
return value
class BatchedWrapper(BatchedEnv):
def __init__(self, env):
self.env = env
self.action_space = env.action_space
self.observation_space = env.observation_space
@property
def num_envs(self):
return self.env.num_envs
def reset(self):
return self.env.reset()
def step(self, actions):
return self.env.step(actions)
def close(self):
self.env.close()
| [
"cloudpickle.dumps",
"numpy.array",
"cloudpickle.loads",
"sys.stderr.write",
"os.system",
"multiprocessing.Queue"
] | [((3029, 3096), 'os.system', 'os.system', (["('kill -9 $(ps -o pid= --ppid %d)' % self._procs[idx].pid)"], {}), "('kill -9 $(ps -o pid= --ppid %d)' % self._procs[idx].pid)\n", (3038, 3096), False, 'import os\n'), ((3186, 3193), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (3191, 3193), False, 'from multiprocessing import Process, Queue\n'), ((3214, 3221), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (3219, 3221), False, 'from multiprocessing import Process, Queue\n'), ((1225, 1232), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1230, 1232), False, 'from multiprocessing import Process, Queue\n'), ((1257, 1264), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1262, 1264), False, 'from multiprocessing import Process, Queue\n'), ((2775, 2790), 'numpy.array', 'np.array', (['obses'], {}), '(obses)\n', (2783, 2790), True, 'import numpy as np\n'), ((2792, 2806), 'numpy.array', 'np.array', (['rews'], {}), '(rews)\n', (2800, 2806), True, 'import numpy as np\n'), ((2808, 2823), 'numpy.array', 'np.array', (['dones'], {}), '(dones)\n', (2816, 2823), True, 'import numpy as np\n'), ((3635, 3661), 'cloudpickle.loads', 'cloudpickle.loads', (['env_str'], {}), '(env_str)\n', (3652, 3661), False, 'import cloudpickle\n'), ((2248, 2307), 'sys.stderr.write', 'sys.stderr.write', (["('restarting worker %d due to hang.\\n' % i)"], {}), "('restarting worker %d due to hang.\\n' % i)\n", (2264, 2307), False, 'import sys\n'), ((3317, 3354), 'cloudpickle.dumps', 'cloudpickle.dumps', (['self._env_fns[idx]'], {}), '(self._env_fns[idx])\n', (3334, 3354), False, 'import cloudpickle\n'), ((1368, 1393), 'cloudpickle.dumps', 'cloudpickle.dumps', (['env_fn'], {}), '(env_fn)\n', (1385, 1393), False, 'import cloudpickle\n')] |
import numpy as np
import datetime
from concurrent.futures.process import ProcessPoolExecutor as Process
from threading import Thread
inicio = datetime.datetime.now()
def sigmoid(soma):
return 1 / (1 + np.exp(-soma))
def sigmoidDerivada(sig):
return sig * (1 - sig)
def processar(epocas, entradas,saidas,taxaAprendizagem,momento,pesos0, pesos1):
for j in range(epocas):
camadaEntrada = entradas
somaSinapse0 = np.dot(camadaEntrada, pesos0)
camadaOculta = sigmoid(somaSinapse0)
somaSinapse1 = np.dot(camadaOculta, pesos1)
camadaSaida = sigmoid(somaSinapse1)
erroCamadaSaida = saidas - camadaSaida
mediaAbsoluta = np.mean(np.abs(erroCamadaSaida))
print(f"Epocas {j}.... Erro: {str(mediaAbsoluta)}")
derivadaSaida = sigmoidDerivada(camadaSaida)
deltaSaida = erroCamadaSaida * derivadaSaida
pesos1Transposta = pesos1.T
deltaSaidaXPeso = deltaSaida.dot(pesos1Transposta)
deltaCamadaOculta = deltaSaidaXPeso * sigmoidDerivada(camadaOculta)
camadaOcultaTransposta = camadaOculta.T
pesosNovo1 = camadaOcultaTransposta.dot(deltaSaida)
pesos1 = (pesos1 * momento) + (pesosNovo1 * taxaAprendizagem)
camadaEntradaTransposta = camadaEntrada.T
pesosNovo0 = camadaEntradaTransposta.dot(deltaCamadaOculta)
pesos0 = (pesos0 * momento) + (pesosNovo0 * taxaAprendizagem)
entradas = np.array([[0,0],
[0,1],
[1,0],
[1,1]])
saidas = np.array([[0],[1],[1],[0]])
pesos0 = 2*np.random.random((2,3)) - 1
pesos1 = 2*np.random.random((3,1)) - 1
epocas = 10000
taxaAprendizagem = 0.5
momento = 1
if __name__ == '__main__':
with Process() as chamada:
futuro = chamada.submit(processar, epocas, entradas,saidas,taxaAprendizagem,momento,pesos0, pesos1)
th = Thread(target=futuro)
th.start()
th.join()
tempo_atual = datetime.datetime.now() - inicio
print(f'Tempo de duração foi de {tempo_atual.total_seconds():.5f} segundos')
| [
"numpy.abs",
"numpy.random.random",
"concurrent.futures.process.ProcessPoolExecutor",
"numpy.exp",
"datetime.datetime.now",
"numpy.dot",
"numpy.array",
"threading.Thread"
] | [((144, 167), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (165, 167), False, 'import datetime\n'), ((1468, 1510), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (1476, 1510), True, 'import numpy as np\n'), ((1580, 1610), 'numpy.array', 'np.array', (['[[0], [1], [1], [0]]'], {}), '([[0], [1], [1], [0]])\n', (1588, 1610), True, 'import numpy as np\n'), ((1989, 2012), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2010, 2012), False, 'import datetime\n'), ((443, 472), 'numpy.dot', 'np.dot', (['camadaEntrada', 'pesos0'], {}), '(camadaEntrada, pesos0)\n', (449, 472), True, 'import numpy as np\n'), ((546, 574), 'numpy.dot', 'np.dot', (['camadaOculta', 'pesos1'], {}), '(camadaOculta, pesos1)\n', (552, 574), True, 'import numpy as np\n'), ((1619, 1643), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (1635, 1643), True, 'import numpy as np\n'), ((1658, 1682), 'numpy.random.random', 'np.random.random', (['(3, 1)'], {}), '((3, 1))\n', (1674, 1682), True, 'import numpy as np\n'), ((1773, 1782), 'concurrent.futures.process.ProcessPoolExecutor', 'Process', ([], {}), '()\n', (1780, 1782), True, 'from concurrent.futures.process import ProcessPoolExecutor as Process\n'), ((1914, 1935), 'threading.Thread', 'Thread', ([], {'target': 'futuro'}), '(target=futuro)\n', (1920, 1935), False, 'from threading import Thread\n'), ((208, 221), 'numpy.exp', 'np.exp', (['(-soma)'], {}), '(-soma)\n', (214, 221), True, 'import numpy as np\n'), ((703, 726), 'numpy.abs', 'np.abs', (['erroCamadaSaida'], {}), '(erroCamadaSaida)\n', (709, 726), True, 'import numpy as np\n')] |
"""Installation file for ansys-mapdl-reader"""
import platform
import re
import subprocess
import struct
import os
import sys
from io import open as io_open
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
try:
import numpy as np
except ImportError:
raise Exception('Please install numpy first with "pip install numpy"')
# Facilities to install properly on Mac using clang
def is_clang(bin):
proc = subprocess.Popen([bin, '-v'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
output = str(b'\n'.join([stdout, stderr]).decode('ascii', 'ignore'))
return not re.search(r'clang', output) is None
def check_cython():
"""Check if binaries exist and if not check if Cython is installed"""
has_binary_reader = False
for filename in os.listdir('ansys/mapdl/reader'):
if '_binary_reader' in filename:
has_binary_reader = True
if not has_binary_reader:
# ensure cython is installed before trying to build
try:
import cython
except ImportError:
raise ImportError('\n\n\nTo build pyansys please install Cython with:\n\n'
'pip install cython\n\n') from None
check_cython()
class build_ext(_build_ext):
""" build class that includes numpy directory """
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
def build_extensions(self):
if os.name != 'nt':
binary = self.compiler.compiler[0]
if is_clang(binary):
for e in self.extensions:
e.extra_compile_args.append('-stdlib=libc++')
if platform.system() == 'Darwin':
# get the minor version
mac_version, _, _ = platform.mac_ver()
minor = [int(n) for n in mac_version.split('.')][1]
# libstdc++ is deprecated in recent versions of XCode
if minor >= 9:
e.extra_compile_args.append('-mmacosx-version-min=10.9')
e.extra_compile_args.append('-stdlib=libc++')
e.extra_link_args.append('-mmacosx-version-min=10.9')
e.extra_link_args.append('-stdlib=libc++')
else:
e.extra_compile_args.append('-mmacosx-version-min=10.7')
e.extra_link_args.append('-mmacosx-version-min=10.7')
_build_ext.build_extensions(self)
def compilerName():
""" Check compiler and assign compile arguments accordingly """
import re
import distutils.ccompiler
comp = distutils.ccompiler.get_default_compiler()
getnext = False
for a in sys.argv[2:]:
if getnext:
comp = a
getnext = False
continue
# separated by space
if a == '--compiler' or re.search('^-[a-z]*c$', a):
getnext = True
continue
# without space
m = re.search('^--compiler=(.+)', a)
if m is None:
m = re.search('^-[a-z]*c(.+)', a)
if m:
comp = m.group(1)
return comp
# Assign arguments based on compiler
compiler = compilerName()
if compiler == 'unix':
cmp_arg = ['-O3', '-w']
else:
cmp_arg = ['/Ox', '-w']
# Get version from version info
__version__ = None
this_file = os.path.dirname(__file__)
version_file = os.path.join(this_file, 'ansys', 'mapdl', 'reader', '_version.py')
with io_open(version_file, mode='r') as fd:
# execute file from raw string
exec(fd.read())
install_requires = ['numpy>=1.16.0',
'pyvista>=0.32.0',
'appdirs>=1.4.0',
'matplotlib>=3.0.0',
'tqdm>=4.45.0']
# perform python version checking
# this is necessary to avoid the new pip package checking as vtk does
# not support Python 32-bit as of 17 June 2021.
if not struct.calcsize("P")*8 == 64:
try:
import vtk
except ImportError:
raise RuntimeError('\n\n``ansys-mapdl-reader`` requires 64-bit Python due to vtk.\n'
'Please check the version of Python installed at\n'
'%s' % sys.executable)
# Actual setup
setup(
name='ansys-mapdl-reader',
packages=['ansys.mapdl.reader', 'ansys.mapdl.reader.examples'],
version=__version__,
description='Pythonic interface to files generated by MAPDL',
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
url='https://github.com/pyansys/pymapdl-reader',
# Build cython modules
cmdclass={'build_ext': build_ext},
ext_modules=[
Extension('ansys.mapdl.reader._archive',
['ansys/mapdl/reader/cython/_archive.pyx',
'ansys/mapdl/reader/cython/archive.c'],
extra_compile_args=cmp_arg,
language='c',),
Extension('ansys.mapdl.reader._reader',
['ansys/mapdl/reader/cython/_reader.pyx',
'ansys/mapdl/reader/cython/reader.c',
'ansys/mapdl/reader/cython/vtk_support.c'],
extra_compile_args=cmp_arg,
language='c',),
Extension("ansys.mapdl.reader._relaxmidside",
["ansys/mapdl/reader/cython/_relaxmidside.pyx"],
extra_compile_args=cmp_arg,
language='c'),
Extension("ansys.mapdl.reader._cellqual",
["ansys/mapdl/reader/cython/_cellqual.pyx"],
extra_compile_args=cmp_arg,
language='c'),
Extension("ansys.mapdl.reader._binary_reader",
["ansys/mapdl/reader/cython/_binary_reader.pyx",
"ansys/mapdl/reader/cython/binary_reader.cpp"],
extra_compile_args=cmp_arg,
language='c++'),
],
python_requires='>=3.6.*',
keywords='vtk MAPDL ANSYS cdb full rst',
package_data={'ansys.mapdl.reader.examples': ['TetBeam.cdb',
'HexBeam.cdb',
'file.rst',
'file.full',
'sector.rst',
'sector.cdb']},
install_requires=install_requires,
)
| [
"struct.calcsize",
"os.listdir",
"subprocess.Popen",
"os.path.join",
"platform.mac_ver",
"io.open",
"setuptools.Extension",
"os.path.dirname",
"platform.system",
"numpy.get_include",
"setuptools.command.build_ext.build_ext.finalize_options",
"setuptools.command.build_ext.build_ext.build_extens... | [((3696, 3721), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3711, 3721), False, 'import os\n'), ((3737, 3803), 'os.path.join', 'os.path.join', (['this_file', '"""ansys"""', '"""mapdl"""', '"""reader"""', '"""_version.py"""'], {}), "(this_file, 'ansys', 'mapdl', 'reader', '_version.py')\n", (3749, 3803), False, 'import os\n'), ((471, 548), 'subprocess.Popen', 'subprocess.Popen', (["[bin, '-v']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "([bin, '-v'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n", (487, 548), False, 'import subprocess\n'), ((859, 891), 'os.listdir', 'os.listdir', (['"""ansys/mapdl/reader"""'], {}), "('ansys/mapdl/reader')\n", (869, 891), False, 'import os\n'), ((3809, 3840), 'io.open', 'io_open', (['version_file'], {'mode': '"""r"""'}), "(version_file, mode='r')\n", (3816, 3840), True, 'from io import open as io_open\n'), ((1424, 1457), 'setuptools.command.build_ext.build_ext.finalize_options', '_build_ext.finalize_options', (['self'], {}), '(self)\n', (1451, 1457), True, 'from setuptools.command.build_ext import build_ext as _build_ext\n'), ((2777, 2810), 'setuptools.command.build_ext.build_ext.build_extensions', '_build_ext.build_extensions', (['self'], {}), '(self)\n', (2804, 2810), True, 'from setuptools.command.build_ext import build_ext as _build_ext\n'), ((3311, 3343), 're.search', 're.search', (['"""^--compiler=(.+)"""', 'a'], {}), "('^--compiler=(.+)', a)\n", (3320, 3343), False, 'import re\n'), ((677, 703), 're.search', 're.search', (['"""clang"""', 'output'], {}), "('clang', output)\n", (686, 703), False, 'import re\n'), ((1629, 1648), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1646, 1648), False, 'import numpy\n'), ((3199, 3225), 're.search', 're.search', (['"""^-[a-z]*c$"""', 'a'], {}), "('^-[a-z]*c$', a)\n", (3208, 3225), False, 'import re\n'), ((3382, 3411), 're.search', 're.search', (['"""^-[a-z]*c(.+)"""', 'a'], {}), "('^-[a-z]*c(.+)', a)\n", (3391, 3411), False, 'import re\n'), ((4255, 4275), 'struct.calcsize', 'struct.calcsize', (['"""P"""'], {}), "('P')\n", (4270, 4275), False, 'import struct\n'), ((5597, 5775), 'setuptools.Extension', 'Extension', (['"""ansys.mapdl.reader._archive"""', "['ansys/mapdl/reader/cython/_archive.pyx',\n 'ansys/mapdl/reader/cython/archive.c']"], {'extra_compile_args': 'cmp_arg', 'language': '"""c"""'}), "('ansys.mapdl.reader._archive', [\n 'ansys/mapdl/reader/cython/_archive.pyx',\n 'ansys/mapdl/reader/cython/archive.c'], extra_compile_args=cmp_arg,\n language='c')\n", (5606, 5775), False, 'from setuptools import setup, Extension\n'), ((5892, 6114), 'setuptools.Extension', 'Extension', (['"""ansys.mapdl.reader._reader"""', "['ansys/mapdl/reader/cython/_reader.pyx',\n 'ansys/mapdl/reader/cython/reader.c',\n 'ansys/mapdl/reader/cython/vtk_support.c']"], {'extra_compile_args': 'cmp_arg', 'language': '"""c"""'}), "('ansys.mapdl.reader._reader', [\n 'ansys/mapdl/reader/cython/_reader.pyx',\n 'ansys/mapdl/reader/cython/reader.c',\n 'ansys/mapdl/reader/cython/vtk_support.c'], extra_compile_args=cmp_arg,\n language='c')\n", (5901, 6114), False, 'from setuptools import setup, Extension\n'), ((6255, 6401), 'setuptools.Extension', 'Extension', (['"""ansys.mapdl.reader._relaxmidside"""', "['ansys/mapdl/reader/cython/_relaxmidside.pyx']"], {'extra_compile_args': 'cmp_arg', 'language': '"""c"""'}), "('ansys.mapdl.reader._relaxmidside', [\n 'ansys/mapdl/reader/cython/_relaxmidside.pyx'], extra_compile_args=\n cmp_arg, language='c')\n", (6264, 6401), False, 'from setuptools import setup, Extension\n'), ((6492, 6629), 'setuptools.Extension', 'Extension', (['"""ansys.mapdl.reader._cellqual"""', "['ansys/mapdl/reader/cython/_cellqual.pyx']"], {'extra_compile_args': 'cmp_arg', 'language': '"""c"""'}), "('ansys.mapdl.reader._cellqual', [\n 'ansys/mapdl/reader/cython/_cellqual.pyx'], extra_compile_args=cmp_arg,\n language='c')\n", (6501, 6629), False, 'from setuptools import setup, Extension\n'), ((6721, 6922), 'setuptools.Extension', 'Extension', (['"""ansys.mapdl.reader._binary_reader"""', "['ansys/mapdl/reader/cython/_binary_reader.pyx',\n 'ansys/mapdl/reader/cython/binary_reader.cpp']"], {'extra_compile_args': 'cmp_arg', 'language': '"""c++"""'}), "('ansys.mapdl.reader._binary_reader', [\n 'ansys/mapdl/reader/cython/_binary_reader.pyx',\n 'ansys/mapdl/reader/cython/binary_reader.cpp'], extra_compile_args=\n cmp_arg, language='c++')\n", (6730, 6922), False, 'from setuptools import setup, Extension\n'), ((1923, 1940), 'platform.system', 'platform.system', ([], {}), '()\n', (1938, 1940), False, 'import platform\n'), ((2046, 2064), 'platform.mac_ver', 'platform.mac_ver', ([], {}), '()\n', (2062, 2064), False, 'import platform\n')] |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data = pd.read_csv(path)
#Code starts here
data.rename(columns={'Total':'Total_Medals'}, inplace=True)
data.head()
# --------------
#Code starts here
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] == data['Total_Winter'],'Both',data['Better_Event'])
better_event = data['Better_Event'].value_counts().idxmax()
# --------------
#Code starts here
from functools import reduce
top_countries = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries.drop(index=len(top_countries) - 1, axis=0, inplace=True)
def top_ten(df, column_name):
country_list = []
country_list = list(df.nlargest(10, column_name)['Country_Name'])
return country_list
top_10_summer = top_ten(top_countries, 'Total_Summer')
top_10_winter = top_ten(top_countries, 'Total_Winter')
top_10 = top_ten(top_countries, 'Total_Medals')
common = list(reduce(np.intersect1d, (top_10_summer, top_10_winter, top_10)))
# --------------
#Code starts here
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
plt.plot(summer_df['Country_Name'], summer_df['Total_Summer'], color='blue')
plt.xlabel('Country_Name')
plt.ylabel('Total_Summer')
plt.title('Summer')
plt.show()
plt.plot(winter_df['Country_Name'], winter_df['Total_Winter'], color='red')
plt.xlabel('Country_Name')
plt.ylabel('Total_Winter')
plt.title('Winter')
plt.show()
plt.plot(top_df['Country_Name'], top_df['Total_Medals'], color='black')
plt.xlabel('Country_Name')
plt.ylabel('Total_Medals')
plt.title('Total')
plt.show()
# --------------
#Code starts here
summer_df['Golden_Ratio'] = summer_df['Gold_Summer'] / summer_df['Total_Summer']
summer_max_ratio = max(summer_df['Golden_Ratio'])
summer_country_gold = summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
print(summer_max_ratio, summer_country_gold)
winter_df['Golden_Ratio'] = winter_df['Gold_Winter'] / winter_df['Total_Winter']
winter_max_ratio = max(winter_df['Golden_Ratio'])
winter_country_gold = winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
print(winter_max_ratio, winter_country_gold)
top_df['Golden_Ratio'] = top_df['Gold_Total'] / top_df['Total_Medals']
top_max_ratio = max(top_df['Golden_Ratio'])
top_country_gold = top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
print(top_max_ratio, top_country_gold)
# --------------
#Code starts here
data_1=data[:-1]
data_1['Total_Points'] = (data_1['Gold_Total'] * 3) + (data_1['Silver_Total'] * 2) + (data_1['Bronze_Total'])
most_points = max(data_1['Total_Points'])
best_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
# --------------
#Code starts here
best = data[data['Country_Name'] == best_country]
best = best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar()
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"functools.reduce",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((144, 161), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (155, 161), True, 'import pandas as pd\n'), ((318, 391), 'numpy.where', 'np.where', (["(data['Total_Summer'] > data['Total_Winter'])", '"""Summer"""', '"""Winter"""'], {}), "(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')\n", (326, 391), True, 'import numpy as np\n'), ((418, 507), 'numpy.where', 'np.where', (["(data['Total_Summer'] == data['Total_Winter'])", '"""Both"""', "data['Better_Event']"], {}), "(data['Total_Summer'] == data['Total_Winter'], 'Both', data[\n 'Better_Event'])\n", (426, 507), True, 'import numpy as np\n'), ((1398, 1474), 'matplotlib.pyplot.plot', 'plt.plot', (["summer_df['Country_Name']", "summer_df['Total_Summer']"], {'color': '"""blue"""'}), "(summer_df['Country_Name'], summer_df['Total_Summer'], color='blue')\n", (1406, 1474), True, 'import matplotlib.pyplot as plt\n'), ((1476, 1502), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Country_Name"""'], {}), "('Country_Name')\n", (1486, 1502), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1530), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total_Summer"""'], {}), "('Total_Summer')\n", (1514, 1530), True, 'import matplotlib.pyplot as plt\n'), ((1532, 1551), 'matplotlib.pyplot.title', 'plt.title', (['"""Summer"""'], {}), "('Summer')\n", (1541, 1551), True, 'import matplotlib.pyplot as plt\n'), ((1553, 1563), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1561, 1563), True, 'import matplotlib.pyplot as plt\n'), ((1567, 1642), 'matplotlib.pyplot.plot', 'plt.plot', (["winter_df['Country_Name']", "winter_df['Total_Winter']"], {'color': '"""red"""'}), "(winter_df['Country_Name'], winter_df['Total_Winter'], color='red')\n", (1575, 1642), True, 'import matplotlib.pyplot as plt\n'), ((1644, 1670), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Country_Name"""'], {}), "('Country_Name')\n", (1654, 1670), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1698), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total_Winter"""'], {}), "('Total_Winter')\n", (1682, 1698), True, 'import matplotlib.pyplot as plt\n'), ((1700, 1719), 'matplotlib.pyplot.title', 'plt.title', (['"""Winter"""'], {}), "('Winter')\n", (1709, 1719), True, 'import matplotlib.pyplot as plt\n'), ((1721, 1731), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1729, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1806), 'matplotlib.pyplot.plot', 'plt.plot', (["top_df['Country_Name']", "top_df['Total_Medals']"], {'color': '"""black"""'}), "(top_df['Country_Name'], top_df['Total_Medals'], color='black')\n", (1743, 1806), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1834), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Country_Name"""'], {}), "('Country_Name')\n", (1818, 1834), True, 'import matplotlib.pyplot as plt\n'), ((1836, 1862), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total_Medals"""'], {}), "('Total_Medals')\n", (1846, 1862), True, 'import matplotlib.pyplot as plt\n'), ((1864, 1882), 'matplotlib.pyplot.title', 'plt.title', (['"""Total"""'], {}), "('Total')\n", (1873, 1882), True, 'import matplotlib.pyplot as plt\n'), ((1884, 1894), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1892, 1894), True, 'import matplotlib.pyplot as plt\n'), ((3205, 3232), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""United States"""'], {}), "('United States')\n", (3215, 3232), True, 'import matplotlib.pyplot as plt\n'), ((3234, 3260), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals Tally"""'], {}), "('Medals Tally')\n", (3244, 3260), True, 'import matplotlib.pyplot as plt\n'), ((3262, 3285), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (3272, 3285), True, 'import matplotlib.pyplot as plt\n'), ((1124, 1186), 'functools.reduce', 'reduce', (['np.intersect1d', '(top_10_summer, top_10_winter, top_10)'], {}), '(np.intersect1d, (top_10_summer, top_10_winter, top_10))\n', (1130, 1186), False, 'from functools import reduce\n')] |
# Copyright 2022 The BladeDISC Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import time
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import tensorflow as tf
from tensorflow.python.saved_model import loader
# huggingface
from transformers import TFBertModel
from tf_blade.gpu.tf_to_trt import Tf2TrtOpt
from tf_blade.common.tf_grappler import GrapplerBasicOpt
def get_tf_bert_model():
model = TFBertModel.from_pretrained("bert-base-cased") # Automatically loads the config
return model
def get_tf_graph_def():
bert_model = get_tf_bert_model()
model_dir = 'bert_pretrained'
if os.path.exists(model_dir) and not loader.maybe_saved_model_directory(model_dir):
shutil.rmtree(model_dir)
tf.saved_model.save(bert_model, model_dir)
elif not os.path.exists(model_dir):
tf.saved_model.save(bert_model, model_dir)
loaded = tf.saved_model.load(model_dir)
# using default signature
func = loaded.signatures["serving_default"]
fetches = [output.name for output in func.outputs]
from tensorflow.python.framework.convert_to_constants import (
convert_variables_to_constants_v2,
)
func = convert_variables_to_constants_v2(func)
graph_def = func.graph.as_graph_def()
opt = GrapplerBasicOpt()
output_graph_def = opt.optimize_graph_def(graph_def, fetches)
return output_graph_def, fetches
def run_benchmark(graph_def, fetches, feed_dict, model_name):
tf.compat.v1.reset_default_graph()
session_config = tf.compat.v1.ConfigProto()
session_config.allow_soft_placement = True
session_config.gpu_options.allow_growth = True
with tf.compat.v1.Session(config=session_config) as sess:
sess.graph.as_default()
tf.import_graph_def(graph_def, name="")
output = sess.run(fetches, feed_dict)
# Warmup!
for i in range(0, 100):
sess.run(fetches, feed_dict)
# Benchmark!
num_runs = 300
start = time.time()
for i in range(0, num_runs):
sess.run(fetches, feed_dict)
elapsed = time.time() - start
rt_ms = elapsed / num_runs * 1000.0
# Show the result!
print("Latency of {} model: {:.2f}".format(model_name, rt_ms))
return output
origin_graph_def, fetches = get_tf_graph_def()
feed_dicts = list()
feed_dicts.append({
'input_ids:0' : np.ones((1, 5), dtype=int),
})
opt_pass = Tf2TrtOpt()
model_outputs = [fetch.split(":")[0] for fetch in fetches]
opt_graph_def = opt_pass.optimize_graph_def(
origin_graph_def, model_outputs, feed_dicts, True,
)
output_origin = run_benchmark(origin_graph_def, fetches, feed_dicts[0], "origin")
output_opt = run_benchmark(opt_graph_def, fetches, feed_dicts[0], "optimized")
assert(len(output_origin) == len(output_opt))
for i in range(len(output_origin)):
assert(np.allclose(output_origin[i], output_opt[i], rtol=1e-6, atol=1e-3))
| [
"tensorflow.saved_model.load",
"tensorflow.compat.v1.ConfigProto",
"os.path.exists",
"numpy.allclose",
"transformers.TFBertModel.from_pretrained",
"numpy.ones",
"tf_blade.common.tf_grappler.GrapplerBasicOpt",
"tensorflow.python.saved_model.loader.maybe_saved_model_directory",
"tf_blade.gpu.tf_to_trt... | [((3019, 3030), 'tf_blade.gpu.tf_to_trt.Tf2TrtOpt', 'Tf2TrtOpt', ([], {}), '()\n', (3028, 3030), False, 'from tf_blade.gpu.tf_to_trt import Tf2TrtOpt\n'), ((1013, 1059), 'transformers.TFBertModel.from_pretrained', 'TFBertModel.from_pretrained', (['"""bert-base-cased"""'], {}), "('bert-base-cased')\n", (1040, 1059), False, 'from transformers import TFBertModel\n'), ((1484, 1514), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['model_dir'], {}), '(model_dir)\n', (1503, 1514), True, 'import tensorflow as tf\n'), ((1775, 1814), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_variables_to_constants_v2', (['func'], {}), '(func)\n', (1808, 1814), False, 'from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2\n'), ((1867, 1885), 'tf_blade.common.tf_grappler.GrapplerBasicOpt', 'GrapplerBasicOpt', ([], {}), '()\n', (1883, 1885), False, 'from tf_blade.common.tf_grappler import GrapplerBasicOpt\n'), ((2057, 2091), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (2089, 2091), True, 'import tensorflow as tf\n'), ((2113, 2139), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (2137, 2139), True, 'import tensorflow as tf\n'), ((3448, 3516), 'numpy.allclose', 'np.allclose', (['output_origin[i]', 'output_opt[i]'], {'rtol': '(1e-06)', 'atol': '(0.001)'}), '(output_origin[i], output_opt[i], rtol=1e-06, atol=0.001)\n', (3459, 3516), True, 'import numpy as np\n'), ((1215, 1240), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (1229, 1240), False, 'import os\n'), ((1304, 1328), 'shutil.rmtree', 'shutil.rmtree', (['model_dir'], {}), '(model_dir)\n', (1317, 1328), False, 'import shutil\n'), ((1337, 1379), 'tensorflow.saved_model.save', 'tf.saved_model.save', (['bert_model', 'model_dir'], {}), '(bert_model, model_dir)\n', (1356, 1379), True, 'import tensorflow as tf\n'), ((2247, 2290), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'session_config'}), '(config=session_config)\n', (2267, 2290), True, 'import tensorflow as tf\n'), ((2340, 2379), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (2359, 2379), True, 'import tensorflow as tf\n'), ((2578, 2589), 'time.time', 'time.time', ([], {}), '()\n', (2587, 2589), False, 'import time\n'), ((2976, 3002), 'numpy.ones', 'np.ones', (['(1, 5)'], {'dtype': 'int'}), '((1, 5), dtype=int)\n', (2983, 3002), True, 'import numpy as np\n'), ((1249, 1294), 'tensorflow.python.saved_model.loader.maybe_saved_model_directory', 'loader.maybe_saved_model_directory', (['model_dir'], {}), '(model_dir)\n', (1283, 1294), False, 'from tensorflow.python.saved_model import loader\n'), ((1393, 1418), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (1407, 1418), False, 'import os\n'), ((1428, 1470), 'tensorflow.saved_model.save', 'tf.saved_model.save', (['bert_model', 'model_dir'], {}), '(bert_model, model_dir)\n', (1447, 1470), True, 'import tensorflow as tf\n'), ((2686, 2697), 'time.time', 'time.time', ([], {}), '()\n', (2695, 2697), False, 'import time\n')] |
import os
import yaml
import torch
import skbio
import numpy as np
import pandas as pd
from catvae.trainer import MultVAE
from gneiss.balances import sparse_balance_basis
def load_model(model_path):
""" Loads VAE model.
Parameters
----------
model_path : str
Path to the pretrained VAE model
Returns
----------
vae_model : MultVAE
Pretrained Multinomial VAE
tree : skbio.TreeNode
The tree used to train the VAE
"""
ckpt_path = os.path.join(model_path, 'last_ckpt.pt')
params = os.path.join(model_path, 'hparams.yaml')
nwk_path = os.path.join(model_path, 'tree.nwk')
tree = skbio.TreeNode.read(nwk_path)
with open(params, 'r') as stream:
params = yaml.safe_load(stream)
params['basis'] = nwk_path
vae_model = MultVAE.load_from_checkpoint(ckpt_path, **params)
return vae_model, tree
def extract_sample_embeddings(vae_model, tree, table, return_type='dataframe'):
""" Extracts sample embeddings from model
Parameters
----------
vae_model : MultVAE
Pretrained Multinomial VAE
tree : skbio.TreeNode
The tree used to train the VAE
table : biom.Table
The biom table one wishes to convert to sample embeddings
return_type : str
Options include 'tensor', 'array', 'dataframe' (default='tensor').
If 'tensor' is specified, a `torch.Tensor` object is returned.
If 'array' is specified, a `numpy.array` object is returned.
If 'dataframe' is specified, a `pandas.DataFrame` object is returned.
"""
X = table.to_dataframe()
tips = [n.name for n in tree.tips()]
X = X.reindex(index=tips).fillna(0)
X_embed = vae_model.to_latent(
torch.Tensor(X.values.T).float())
if return_type == 'tensor':
return X_embed
X_embed = X_embed.detach().cpu().numpy()
if return_type == 'array':
return X_embed
elif return_type == 'dataframe':
return pd.DataFrame(X_embed, index=table.ids(axis='sample'))
else:
ValueError(f'return type {return_type} is not supported.')
def extract_observation_embeddings(vae_model, tree, return_type='dataframe'):
""" Extracts observation embeddings from model (i.e. OTUs).
The observation embeddings are all represented in CLR coordinates.
Parameters
----------
vae_model : MultVAE
Pretrained Multinomial VAE
tree : skbio.TreeNode
The tree used to train the VAE
return_type : str
Options include 'tensor', 'array', 'dataframe' (default='dataframe')
"""
# ILR representation of the VAE decoder loadings
W = vae_model.vae.decoder.weight
Psi, _ = sparse_balance_basis(tree)
if return_type == 'torch':
indices = np.vstack((Psi.row, Psi.col))
Psi = torch.sparse_coo_tensor(
indices.copy(), Psi.data.astype(np.float32).copy(),
requires_grad=False).coalesce()
return Psi.T @ W
if return_type == 'array':
return Psi.T @ W.detach().numpy()
if return_type == 'dataframe':
names = [n.name for n in tree.tips()]
return pd.DataFrame(Psi.T @ W.detach().numpy(), index=names)
else:
ValueError(f'return type {return_type} is not supported.')
| [
"skbio.TreeNode.read",
"catvae.trainer.MultVAE.load_from_checkpoint",
"gneiss.balances.sparse_balance_basis",
"os.path.join",
"torch.Tensor",
"yaml.safe_load",
"numpy.vstack"
] | [((494, 534), 'os.path.join', 'os.path.join', (['model_path', '"""last_ckpt.pt"""'], {}), "(model_path, 'last_ckpt.pt')\n", (506, 534), False, 'import os\n'), ((548, 588), 'os.path.join', 'os.path.join', (['model_path', '"""hparams.yaml"""'], {}), "(model_path, 'hparams.yaml')\n", (560, 588), False, 'import os\n'), ((604, 640), 'os.path.join', 'os.path.join', (['model_path', '"""tree.nwk"""'], {}), "(model_path, 'tree.nwk')\n", (616, 640), False, 'import os\n'), ((652, 681), 'skbio.TreeNode.read', 'skbio.TreeNode.read', (['nwk_path'], {}), '(nwk_path)\n', (671, 681), False, 'import skbio\n'), ((807, 856), 'catvae.trainer.MultVAE.load_from_checkpoint', 'MultVAE.load_from_checkpoint', (['ckpt_path'], {}), '(ckpt_path, **params)\n', (835, 856), False, 'from catvae.trainer import MultVAE\n'), ((2684, 2710), 'gneiss.balances.sparse_balance_basis', 'sparse_balance_basis', (['tree'], {}), '(tree)\n', (2704, 2710), False, 'from gneiss.balances import sparse_balance_basis\n'), ((737, 759), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (751, 759), False, 'import yaml\n'), ((2760, 2789), 'numpy.vstack', 'np.vstack', (['(Psi.row, Psi.col)'], {}), '((Psi.row, Psi.col))\n', (2769, 2789), True, 'import numpy as np\n'), ((1732, 1756), 'torch.Tensor', 'torch.Tensor', (['X.values.T'], {}), '(X.values.T)\n', (1744, 1756), False, 'import torch\n')] |
"""Setup hierarchical primitives."""
from setuptools import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from itertools import dropwhile
import numpy as np
from os import path
def collect_docstring(lines):
"""Return document docstring if it exists"""
lines = dropwhile(lambda x: not x.startswith('"""'), lines)
doc = ""
for line in lines:
doc += line
if doc.endswith('"""\n'):
break
return doc[3:-4].replace("\r", "").replace("\n", " ")
def collect_metadata():
meta = {}
with open(path.join("hierarchical_primitives", "__init__.py")) as f:
lines = iter(f)
meta["description"] = collect_docstring(lines)
for line in lines:
if line.startswith("__"):
key, value = map(lambda x: x.strip(), line.split("="))
meta[key[2:-2]] = value[1:-1]
return meta
def get_extensions():
return cythonize([
Extension(
"hierarchical_primitives.fast_sampler._sampler",
[
"hierarchical_primitives/fast_sampler/_sampler.pyx",
"hierarchical_primitives/fast_sampler/sampling.cpp"
],
language="c++11",
libraries=["stdc++"],
include_dirs=[np.get_include()],
extra_compile_args=["-std=c++11", "-O3"]
)
])
def get_install_requirements():
return [
"numpy",
"trimesh",
"torch",
"torchvision",
"cython",
"Pillow",
"pyquaternion",
"pykdtree",
"matplotlib",
"simple-3dviz"
]
def setup_package():
with open("README.md") as f:
long_description = f.read()
meta = collect_metadata()
setup(
name="hierarchical_primitives",
version=meta["version"],
long_description=long_description,
long_description_content_type="text/markdown",
maintainer=meta["maintainer"],
maintainer_email=meta["email"],
url=meta["url"],
license=meta["license"],
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
install_requires=get_install_requirements(),
ext_modules=get_extensions()
)
if __name__ == "__main__":
setup_package()
| [
"os.path.join",
"numpy.get_include"
] | [((582, 633), 'os.path.join', 'path.join', (['"""hierarchical_primitives"""', '"""__init__.py"""'], {}), "('hierarchical_primitives', '__init__.py')\n", (591, 633), False, 'from os import path\n'), ((1302, 1318), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (1316, 1318), True, 'import numpy as np\n')] |
import os
import time
import shutil
from collections import deque
from datetime import timedelta, datetime
from math import floor
import numpy as np
import scipy.misc
import tensorflow as tf
TF_VERSION = list(map(int, tf.__version__.split('.')[:2]))
class DenseNet:
# -------------------------------------------------------------------------
# --------------------------- CLASS INITIALIZER ---------------------------
# -------------------------------------------------------------------------
def __init__(self, data_provider, growth_rate, layer_num_list,
keep_prob, num_inter_threads, num_intra_threads,
weight_decay, nesterov_momentum, model_type, dataset,
should_self_construct, should_change_lr,
self_constructing_var, self_constr_rlr, block_count,
layer_cs, asc_thresh, patience_param,
std_tolerance, std_window, preserve_transition_l,
expansion_rate, dkCS_softening, dkCS_std_window,
dkCS_stl_thresh, usefulness_thresh, uselessness_thresh,
auto_usefulness_thresh, auto_uselessness_thresh,
m_asc_thresh, m_patience_param, impr_thresh, complementarity,
should_save_logs, should_save_ft_logs, ft_period,
ft_comma, ft_decimal, ft_filters, ft_kernels,
ft_cross_entropies,
should_save_model, should_save_images,
renew_logs=False,
reduction=1.0,
bc_mode=False,
**kwargs):
"""
Class to implement DenseNet networks as defined in this paper:
https://arxiv.org/pdf/1611.05552.pdf
Args:
data_provider: data provider object for the required data set;
growth_rate: `int`, number of convolutions in a new dense layer;
layer_num_list: `str`, list of number of layers in each block,
separated by commas (e.g. '12,12,12');
keep_prob: `float`, keep probability for dropout. If keep_prob = 1
dropout will be disabled;
weight_decay: `float`, weight decay for L2 loss, paper = 1e-4;
nesterov_momentum: `float`, momentum for Nesterov optimizer;
model_type: `str`, model type name ('DenseNet' or 'DenseNet-BC'),
should we use bottleneck layers and compression or not;
dataset: `str`, dataset name;
should_self_construct: `bool`, should use self-constructing or not;
should_change_lr: `bool`, should change the learning rate or not;
self_constructing_var: `int`, variant of the self-constructing
algorithm to be used, if the int does not identify any variant
the most recent (default) variant is used;
self_constr_rlr: `int`, learning rate reduction variant to be used
with the self-constructing algorithm, if the int does not
identify any variant the most recent (default) variant is used;
block_count: `int`, maximum number of blocks to self-construct;
layer_cs: `str`, 'layer CS', preferred interpretation of CS values
when evaluating layers (using 'relevance' or 'spread');
asc_thresh: `int`, ascension threshold for self-constructing;
patience_param: `int`, patience parameter for self-constructing;
std_tolerance: `int`, std tolerance for self-constructing;
std_window: `int`, std window for self-constructing;
preserve_transition_l: `bool`, should preserve transition to
classes after layer additions or not;
expansion_rate: `int`, rate at which new convolutions are added
together during the self-construction of a dense layer;
dkCS_softening: `int`, memory window for each kernel's CS during
kernel-level self-constructing (to soften the derivate);
dkCS_std_window: `int`, std window for each kernel's CS derivate
during kernel-level self-constructing;
dkCS_stl_thresh: `float`, settling threshold for each kernel's CS
derivate during kernel-level self-constructing;
usefulness_thresh: `float`, usefulness threshold for kernels during
kernel-level self-constructing;
uselessness_thresh: `float`, uselessness threshold for kernels
during kernel-level self-constructing;
auto_usefulness_thresh: `float`, usefulness threshold as a fraction
between 0 and 1 (used for automatic usefulness threshold).
auto_uselessness_thresh: `float`, uselessness threshold as a
fraction between 0 and 1 (used for automatic uselessness
threshold).
m_asc_thresh: `int`, micro-ascension threshold for kernel-level
self-constructing;
m_patience_param: `int`, micro-patience parameter for kernel-level
self-constructing;
impr_thresh: `int`, improvement threshold used during kernel-level
self-constructing;
complementarity: `bool`, whether or not to use complementarity when
adding new kernels during kernel-level self-constructing.
should_save_logs: `bool`, should tensorflow logs be saved or not;
should_save_ft_logs: `bool`, should feature logs be saved or not;
ft_period: `int`, number of epochs between two measurements of
feature values (e.g. accuracy, loss, weight mean and std);
ft_comma: `str`, 'comma' separator in the CSV feature logs;
ft_decimal: `str`, 'decimal' separator in the CSV feature logs;
ft_filters: `bool`, should check filter features or not;
ft_kernels: `bool`, should check kernel features or not;
ft_cross_entropies: `bool`, should measure cross-entropies for
each individual layer in the last block or not;
should_save_model: `bool`, should the model be saved or not;
should_save_images: `bool`, should images be saved or not;
renew_logs: `bool`, remove previous logs for current model;
reduction: `float`, reduction (theta) at transition layers for
DenseNets with compression (DenseNet-BC);
bc_mode: `bool`, boolean equivalent of model_type, should we use
bottleneck layers and compression (DenseNet-BC) or not.
"""
# Main DenseNet and DenseNet-BC parameters.
self.creation_time = datetime.now().strftime("%Y_%m_%d_%H%M%S")
self.data_provider = data_provider
self.data_shape = data_provider.data_shape
self.n_classes = data_provider.n_classes
self.growth_rate = growth_rate
self.num_inter_threads = num_inter_threads
self.num_intra_threads = num_intra_threads
# Number of outputs (feature maps) produced by the initial convolution
# (2*k, same value as in the original Torch code).
self.first_output_features = growth_rate * 2
self.layer_num_list = list(map(int, layer_num_list.split(',')))
self.total_blocks = len(self.layer_num_list)
self.bc_mode = bc_mode
self.reduction = reduction
print("Build %s model with %d blocks, "
"The number of layers in each block is:" % (
model_type, self.total_blocks))
if not bc_mode:
print('\n'.join('Block %d: %d composite layers.' % (
k, self.layer_num_list[k]) for k in range(len(
self.layer_num_list))))
if bc_mode:
print('\n'.join('Block %d: %d bottleneck layers and %d composite'
'layers.' % (k, self.layer_num_list[k],
self.layer_num_list[k])
for k in range(len(self.layer_num_list))))
print("Reduction at transition layers: %.1f" % self.reduction)
self.keep_prob = keep_prob
self.weight_decay = weight_decay
self.nesterov_momentum = nesterov_momentum
self.model_type = model_type
self.dataset_name = dataset
self.should_self_construct = should_self_construct
self.has_micro_algo = should_self_construct
self.preserve_transition_l = should_self_construct
self.should_change_lr = should_change_lr
self.block_count = max(1, block_count)
self.layer_cs = layer_cs
# Manage self construction only when self-constructing.
if should_self_construct:
# Choice of the self-constructing algorithm variant.
if self_constructing_var == 0:
self.self_constructing_step = self.self_constructing_var0
elif self_constructing_var == 1:
self.self_constructing_step = self.self_constructing_var1
elif self_constructing_var == 2:
self.self_constructing_step = self.self_constructing_var2
elif self_constructing_var == 3:
self.self_constructing_step = self.self_constructing_var3
elif self_constructing_var == 4:
self.self_constructing_step = self.self_constructing_var4
elif self_constructing_var == 5:
self.self_constructing_step = self.self_constructing_var5
# elif self_constructing_var >= 6:
# self.self_constructing_step = self.self_constructing_minimal
else:
self_constructing_var = 5
self.self_constructing_step = self.self_constructing_var5
self.has_micro_algo = self_constructing_var >= 4
# Choice of the self-constructing learning rate reduction variant.
if self_constr_rlr == 0:
self.self_constr_rlr = self.self_constr_rlr0
else:
self.self_constr_rlr = self.self_constr_rlr1
# Self-construction parameters.
self.asc_thresh = asc_thresh
self.patience_param = patience_param
self.patience_cntdwn = patience_param
self.std_tolerance = std_tolerance
self.std_window = std_window
self.preserve_transition_l = preserve_transition_l
self.pruned_varnames = []
# Accuracy FIFO list, only used in variants #2 and #3.
if self_constructing_var == 2 or self_constructing_var == 3:
self.accuracy_FIFO = deque(maxlen=self.std_window)
# Micro self-construction parameters.
if self.has_micro_algo:
self.expansion_rate = expansion_rate
self.dkCS_softening = dkCS_softening+1 # actual num of elems
self.dkCS_std_window = dkCS_std_window
self.dkCS_stl_thresh = dkCS_stl_thresh
self.usefulness_thresh = usefulness_thresh
self.uselessness_thresh = uselessness_thresh
self.auto_usefulness_thresh = auto_usefulness_thresh
self.auto_uselessness_thresh = auto_uselessness_thresh
# self.alt_uselessness_thresh = 0.25
self.m_asc_thresh = m_asc_thresh
self.m_patience_param = m_patience_param
self.m_patience_cntdwn = m_patience_param
self.impr_thresh = impr_thresh
self.complementarity = complementarity
# Data saving parameters.
self.should_save_logs = should_save_logs
self.should_save_ft_logs = should_save_ft_logs
self.ft_period = ft_period
self.ftc = ft_comma
self.ftd = ft_decimal
self.ft_filters = ft_filters and not ft_kernels
self.ft_kernels = ft_kernels
self.ft_cross_entropies = ft_cross_entropies
self.should_save_model = should_save_model
self.should_save_images = should_save_images
self.renew_logs = renew_logs
self.batches_step = 0
self._define_inputs()
self._build_graph()
self._initialize_session()
self._count_trainable_params_in_use()
# -------------------------------------------------------------------------
# ------------------------ SAVING AND LOADING DATA ------------------------
# -------------------------------------------------------------------------
def update_paths(self):
"""
Update all paths for saving data to their proper values.
This is used after the graph is modified (new block or layer).
This is also used after an AttributeError when calling these paths.
"""
save_path = 'saves/%s' % self.model_identifier
if self.should_save_model:
os.makedirs(save_path, exist_ok=True)
save_path = '%s/%s' % (save_path, 'model.chkpt')
self._save_path = save_path
logs_path = 'logs/%s' % self.model_identifier
if self.should_save_logs:
if self.renew_logs:
shutil.rmtree(logs_path, ignore_errors=True)
os.makedirs(logs_path, exist_ok=True)
self._logs_path = logs_path
ft_logs_path = 'ft_logs/%s' % self.run_identifier
if self.should_save_ft_logs:
os.makedirs('ft_logs/', exist_ok=True)
self._ft_logs_path = ft_logs_path
images_path = 'images/%s' % self.run_identifier
if self.should_save_images:
os.makedirs(images_path, exist_ok=True)
self._images_path = images_path
return save_path, logs_path, ft_logs_path, images_path
@property
def model_identifier(self):
"""
Returns an identifier `str` for the current DenseNet model.
It gives the model's type ('DenseNet' or 'DenseNet-BC'),
its growth rate k, the number of layers in each block,
and the dataset that was used.
"""
return "{}_dataset_{}_growth_rate={}_layer_num_list={}".format(
self.model_type, self.dataset_name, self.growth_rate, ",".join(map(
str, self.layer_num_list)))
@property
def run_identifier(self):
"""
Returns an identifier `str` for the current execution of the algorithm.
It gives the model's type ('DenseNet' or 'DenseNet-BC'),
its growth rate k, the dataset that was used,
and the date and hour at which the execution started.
"""
return "{}_{}_dataset_{}_growth_rate={}".format(
self.model_type, self.creation_time, self.dataset_name,
self.growth_rate)
@property
def save_path(self):
"""
Returns a path where the saver should save the current model.
"""
try:
save_path = self._save_path
except AttributeError:
save_path = self.update_paths()[0]
return save_path
@property
def logs_path(self):
"""
Returns a path where the logs for the current model should be written.
"""
try:
logs_path = self._logs_path
except AttributeError:
logs_path = self.update_paths()[1]
return logs_path
@property
def ft_logs_path(self):
"""
Returns a path where the evolution of features in the current execution
should be recorded.
"""
try:
ft_logs_path = self._ft_logs_path
except AttributeError:
ft_logs_path = self.update_paths()[2]
return ft_logs_path
@property
def images_path(self):
"""
Returns a path where images from the current execution should be saved.
"""
try:
images_path = self._images_path
except AttributeError:
images_path = self.update_paths()[3]
return images_path
def save_model(self, global_step=None):
"""
Saves the current trained model at the proper path, using the saver.
Args:
global_step: `int` or None, used for numbering saved model files
"""
self.saver.save(self.sess, self.save_path, global_step=global_step)
def load_model(self):
"""
Loads a saved model to use (instead of a new one) using the saver.
This is a previously trained and saved model using the model_type
('DenseNet' or 'DenseNet-BC'), growth rate, layers in each block,
and dataset that was specified in the program arguments.
"""
try:
self.saver.restore(self.sess, self.save_path)
except Exception as e:
raise IOError("Failed to to load model "
"from save path: %s" % self.save_path)
self.saver.restore(self.sess, self.save_path)
print("Successfully load model from save path: %s" % self.save_path)
def log_loss_accuracy(self, loss, accuracy, epoch, prefix,
should_print=True):
"""
Writes a log of the current mean loss (cross_entropy) and accuracy.
Args:
loss: `float`, loss (cross_entropy) for the current log;
accuracy: `float`, accuracy for the current log;
epoch: `int`, current training epoch (or batch);
prefix: `str`, is this log for a batch ('per_batch'), a
training epoch ('train') or a validation epoch ('valid');
should_print: `bool`, should we print this log on console or not.
"""
if should_print:
print("mean cross_entropy: %f, mean accuracy: %f" % (
loss, accuracy))
summary = tf.Summary(value=[
tf.Summary.Value(
tag='loss_%s' % prefix, simple_value=float(loss)),
tf.Summary.Value(
tag='accuracy_%s' % prefix, simple_value=float(accuracy))
])
self.summary_writer.add_summary(summary, epoch)
def ft_log_filters(self, b, cs_table_ls, lcs_dst, lcs_src):
"""
Write a feature log with data concerning filters: the CS of every
connection in a given block, the 'layer CS' (relevance or spread) for
destinations and sources for all layers in the same block.
Args:
b: `int`, identifier number for the block;
cs_table_ls: `list` of `list` of `float`, the table of CS for each
connection to a layer l from a previous layer s;
lcs_dst: `list` of `float`, 'layer CS' for destinations
for all layers in the block;
lcs_src: `list` of `float`, 'layer CS' for sources
for all layers in the block.
"""
# printing and saving the data to feature logs
for l in range(self.layer_num_list[b]):
# 'layer CS' for destinations of l-1
print(' - %s for destinations = %f' % (
self.layer_cs.capitalize(), lcs_dst[l]))
# destination layer CS (sent from l-1 towards d)
for d in range(l, self.layer_num_list[b]):
print(' - Towards layer %d: CS = %f' % (
d, cs_table_ls[d][l]))
# /max(fwd[l] for fwd in cs_table_ls if len(fwd) > l)
print('\n* Block %d filter %d:' % (b, l))
# source layer CS (received at l from s)
for s in range(len(cs_table_ls[l])):
print(' - From layer %d: CS = %f' % (
s, cs_table_ls[l][s])) # /max(cs_table_ls[l])))
# 'layer CS' for sources of l
print(' - %s for sources = %f' % (
self.layer_cs.capitalize(), lcs_src[l]))
if self.should_save_ft_logs:
# write all of the above in the feature log
self.feature_writer.write(('%s\"%f\"' % (self.ftc, lcs_dst[l])
).replace(".", self.ftd))
self.feature_writer.write('%s\"\"' % self.ftc)
for d in range(l, self.layer_num_list[b]):
self.feature_writer.write((
'%s\"%f\"' % (self.ftc, cs_table_ls[d][l])).replace(
".", self.ftd))
self.feature_writer.write('%s\"\"' % self.ftc)
for s in range(len(cs_table_ls[l])):
self.feature_writer.write((
'%s\"%f\"' % (self.ftc, cs_table_ls[l][s])).replace(
".", self.ftd))
self.feature_writer.write('%s\"\"' % self.ftc)
self.feature_writer.write(('%s\"%f\"' % (self.ftc, lcs_src[l])
).replace(".", self.ftd))
# -------------------------------------------------------------------------
# ----------------------- PROCESSING FEATURE VALUES -----------------------
# -------------------------------------------------------------------------
def get_cs_list(self, f_image, f_num):
"""
Get the list of connection strengths (CS) for all connections to a
given filter layer.
The CS of a connection is equal to the mean of its associated absolute
kernel weights (sum divided by num of weights).
Args:
f_image: `np.ndarray`, an array representation of the filter;
f_num: `int`, identifier for the filter within the block.
"""
# split kernels by groups, depending on which connection they belong to
# for this, use filter numbering (different in BC mode!)
splitting_guide = []
for i in range(int(f_num/(1+int(self.bc_mode))), 0, -1):
splitting_guide.append(f_image.shape[0] - i*self.growth_rate)
if len(splitting_guide) > 0:
f_split_image = np.split(f_image, splitting_guide)
else:
f_split_image = [f_image]
# calculate CS (means of abs weights) by groups of kernels
cs_list = []
for split in range(len(f_split_image)):
cs_list.append(np.mean(np.abs(f_split_image[split])))
return cs_list
def get_relev_dst(self, b, cs_table_ls, tresh_fract=0.67):
"""
Get the relevance for destinations for all layers (filters) in a block.
The relevance for destinations of a layer l expresses the portion of
the connections sent from l-1 that are 'relevant enough' for their
destination layers to receive information through them.
For each connection from l-1 to a future layer d, add +1/n_connections
if the connection's CS is >= tresh_fract * the max CS out of all
connections received by d.
N.B.: For l=0, the preceding l-1 is the output from the previous block.
Args:
b: `int`, identifier number for the block;
cs_table_ls: `list` of `list` of `float`, the table of CS for each
connection to a layer l from a previous layer s;
tresh_fract: `float`, the fraction of a layer's max CS that a CS
is compared to to be considered 'relevant enough'.
"""
relev_dst = []
max_cs = 0 # the max CS for each future layer
for l in range(self.layer_num_list[b]):
relev_dst.append(0)
for d in range(l, self.layer_num_list[b]):
max_cs = max(cs_table_ls[d])
relev_dst[l] += int(cs_table_ls[d][l]/max_cs >= tresh_fract)
# normalised: 0 = no relevant connections, 1 = all relevant
relev_dst[l] /= self.layer_num_list[b] - l
return relev_dst
def get_relev_src(self, b, cs_table_ls, tresh_fract=0.67):
"""
Get the relevance for sources for all layers (filters) in a block.
The relevance for sources of a layer l expresses the portion of the
connections received by l that are 'relevant enough' for their source
layers to send information through them.
For each connection from a past layer s-1 to l, add +1/n_connections
if the connection's CS is >= tresh_fract * the max CS out of all
connections sent from s-1.
N.B.: For s=0, the preceding s-1 is the output from the previous block.
Args:
b: `int`, identifier number for the block;
cs_table_ls: `list` of `list` of `float`, the table of CS for each
connection to a layer l from a previous layer s;
tresh_fract: `float`, the fraction of a layer's max CS that a CS
is compared to to be considered 'relevant enough'.
"""
relev_src = []
max_cs = 0 # the max CS for each past layer
for l in range(self.layer_num_list[b]):
relev_src.append(0)
for s in range(len(cs_table_ls[l])):
max_cs = max(fwd[s] for fwd in cs_table_ls[s:])
relev_src[l] += int(cs_table_ls[l][s]/max_cs >= tresh_fract)
# normalised: 0 = no relevant connections, 1 = all relevant
relev_src[l] /= l+1
return relev_src
def get_spread_emi(self, b, cs_table_ls, tresh_fract=0.67):
"""
Get the spread of emission for all layers (filters) in a block.
The spread of emission of a layer l expresses the portion of the
connections sent from l-1 that are 'relevant enough' for l-1 to send
(emit) information through them.
For each connection from l-1 to a future layer d, add +1/n_connections
if the connection's CS is >= tresh_fract * the max CS out of all
connections sent from l-1.
N.B.: For l=0, the preceding l-1 is the output from the previous block.
Args:
b: `int`, identifier number for the block;
cs_table_ls: `list` of `list` of `float`, the table of CS for each
connection to a layer l from a previous layer s;
tresh_fract: `float`, the fraction of a layer's max CS that a CS
is compared to to be considered 'relevant enough'.
"""
spread_emi = []
max_cs = 0 # the max CS for each future layer
for l in range(self.layer_num_list[b]):
spread_emi.append(0)
max_cs = max(fwd[l] for fwd in cs_table_ls[l:])
for d in range(l, self.layer_num_list[b]):
spread_emi[l] += int(cs_table_ls[d][l]/max_cs >= tresh_fract)
# normalised: 0 = no relevant connections, 1 = all relevant
spread_emi[l] /= self.layer_num_list[b] - l
return spread_emi
def get_spread_rec(self, b, cs_table_ls, tresh_fract=0.67):
"""
Get the spread of reception for all layers (filters) in a block.
The spread of reception of a layer l expresses the portion of the
connections received by l that are 'relevant enough' for l to receive
information through them.
For each connection from a past layer s-1 to l, add +1/n_connections
if the connection's CS is >= tresh_fract * the max CS out of all
connections received by l.
N.B.: For s=0, the preceding s-1 is the output from the previous block.
Args:
b: `int`, identifier number for the block;
cs_table_ls: `list` of `list` of `float`, the table of CS for each
connection to a layer l from a previous layer s;
tresh_fract: `float`, the fraction of a layer's max CS that a CS
is compared to to be considered 'relevant enough'.
"""
spread_rec = []
max_cs = 0 # the max CS for each past layer
for l in range(self.layer_num_list[b]):
spread_rec.append(0)
max_cs = max(cs_table_ls[l])
for s in range(len(cs_table_ls[l])):
spread_rec[l] += int(cs_table_ls[l][s]/max_cs >= tresh_fract)
# normalised: 0 = no relevant connections, 1 = all relevant
spread_rec[l] /= l+1
return spread_rec
def process_filter(self, filter, block_num, filter_num, epoch):
"""
Process a given convolution filter's kernel weights, in some cases
save a representation of the filter and its weights as a PNG image.
Returns a list with the connection strengths (CS) for connections
between any given layer l and each past layer s.
Args:
filter: tensor, the filter whose kernel weights are processed;
block_num: `int`, identifier number for the filter's block;
filter_num: `int`, identifier for the filter within the block;
epoch: `int`, current training epoch (or batch).
"""
# get an array representation of the filter, then get its dimensions
f_image = self.sess.run(filter)
f_d = filter.get_shape().as_list()
f_image = f_image.transpose()
f_image = np.moveaxis(f_image, [0, 1], [1, 0])
# calculate connection strength for all connections
cs_list = self.get_cs_list(f_image, filter_num)
if self.should_save_images:
# properly place the kernels to save the filter as an image
f_image = np.moveaxis(f_image, [1, 2], [0, 1])
f_image = np.resize(f_image, (f_d[1]*f_d[3], f_d[0]*f_d[2]))
# save the image in the proper file
im_filepath = './%s/block_%d_filter_%d' % (
self.images_path, block_num, filter_num)
os.makedirs(im_filepath, exist_ok=True)
im_filepath += '/epoch_%d.png' % epoch
scipy.misc.imsave(im_filepath, f_image)
return cs_list
def process_kernel(self, kernel):
"""
Process a given kernel's weights, returns the connection strength (CS)
for that kernel (the mean of its absolute weights).
Args:
kernel: tensor, the kernel whose weights are processed.
"""
# get an array representation of the kernel
k_image = self.sess.run(kernel)
# calculate its connection strength and return it
k_cs = np.mean(np.abs(k_image))
return k_cs
def process_block_filters(self, b, epoch):
"""
Process a given block's filters. Return values for features related to
the filters' kernel weights: connection strengths, 'layer CS' for
destinations, and 'layer CS' for sources. The 'layer CS' can be either
relevance or spread, depending on what is required by the algorithm.
Args:
b: `int`, identifier number for the block;
epoch: `int`, current training epoch (or batch).
"""
cs_table_ls = []
# process each filter separately (except BC bottlenecks),
# get the conection strength between each layer l and any past layer s
for f in range(len(self.filter_ref_list[b+1])):
if not self.bc_mode or not f % 2:
cs_table_ls.append(self.process_filter(
self.filter_ref_list[b+1][f], b, f, epoch))
# if the required 'layer CS' is relevance
if self.layer_cs == 'relevance':
# relevance for destinations: what portion of all the connections
# sent from a layer l-1 are relevant for their destination layers?
lcs_dst = self.get_relev_dst(b, cs_table_ls)
# relevance for sources: what portion of all the connections
# received by a layer l are relevant for their source layers?
lcs_src = self.get_relev_src(b, cs_table_ls)
# else (if the required 'layer CS' is spread)
else:
# spread of emission (for destinations): what portion of all the
# connections sent from a layer l-1 are relevant for l-1?
lcs_dst = self.get_spread_emi(b, cs_table_ls)
# spread of reception (for sources): what portion of all the
# connections received by a layer l are relevant for l?
lcs_src = self.get_spread_rec(b, cs_table_ls)
return(cs_table_ls, lcs_dst, lcs_src)
def process_layer_kernels(self, b, l, epoch):
"""
Process a given layer's kernels. Return the connection strenght value
for each kernel in the layer.
Args:
b: `int`, identifier number for the block;
l: `int`, identifier number for the layer inside the block;
epoch: `int`, current training epoch (or batch).
"""
cs_table_kernels = []
# get the conection strength for each kernel in layer l of block b
for k in range(len(self.kernels_ref_list[b][l])):
cs_table_kernels.append(
self.process_kernel(self.kernels_ref_list[b][l][k]))
return cs_table_kernels
# -------------------------------------------------------------------------
# ---------------------- DEFINING INPUT PLACEHOLDERS ----------------------
# -------------------------------------------------------------------------
def _define_inputs(self):
"""
Defines some imput placeholder tensors:
images, labels, learning_rate, is_training.
"""
shape = [None]
shape.extend(self.data_shape)
self.images = tf.placeholder(
tf.float32,
shape=shape,
name='input_images')
self.labels = tf.placeholder(
tf.float32,
shape=[None, self.n_classes],
name='labels')
self.learning_rate = tf.placeholder(
tf.float32,
shape=[],
name='learning_rate')
self.is_training = tf.placeholder(tf.bool, shape=[])
# -------------------------------------------------------------------------
# ---------------------- BUILDING THE DENSENET GRAPH ----------------------
# -------------------------------------------------------------------------
# SIMPLEST OPERATIONS -----------------------------------------------------
# -------------------------------------------------------------------------
def weight_variable_msra(self, shape, name):
"""
Creates weights for a fully-connected layer, using an initialization
method which does not scale the variance.
Args:
shape: `list` of `int`, shape of the weight matrix;
name: `str`, a name for identifying the weight matrix.
"""
# print("CREATING WEIGHT VARIABLE: " + name)
# print(shape)
return tf.get_variable(
name=name,
shape=shape,
initializer=tf.contrib.layers.variance_scaling_initializer())
def avg_pool(self, _input, k):
"""
Performs average pooling on a given input (_input),
within square kernels of side k and stride k.
Args:
_input: tensor, the operation's input;
k: `int`, the size and stride for the kernels.
"""
ksize = [1, k, k, 1]
strides = [1, k, k, 1]
padding = 'VALID'
output = tf.nn.avg_pool(_input, ksize, strides, padding)
return output
def batch_norm(self, _input, scope='BatchNorm'):
"""
Performs batch normalisation on a given input (_input).
Args:
_input: tensor, the operation's input.
scope: `str`, a variable scope for the operation.
"""
output = tf.contrib.layers.batch_norm(
_input, scale=True, is_training=self.is_training,
updates_collections=None, scope=scope)
return output
def conv2d(self, _input, out_features, kernel_size,
strides=[1, 1, 1, 1], padding='SAME'):
"""
Creates a 2d convolutional filter layer (applies a certain number of
kernels on some input features to obtain output features).
Returns the output of the layer and a reference to its filter.
Args:
_input: tensor, the operation's input;
out_features: `int`, number of feature maps at the output;
kernel_size: `int`, size of the square kernels (their side);
strides: `list` of `int`, strides in each direction for kernels;
padding: `str`, should we use padding ('SAME') or not ('VALID').
"""
in_features = int(_input.get_shape()[-1])
filter_ref = self.weight_variable_msra(
[kernel_size, kernel_size, in_features, out_features],
name='filter')
output = tf.nn.conv2d(_input, filter_ref, strides, padding)
return output, filter_ref
def conv2d_with_kernels(self, _input, out_features, kernel_size,
strides=[1, 1, 1, 1], padding='SAME'):
"""
Creates a 2d convolutional filter layer, by producing a list of 3d
kernels and then stacking them together to create the filter.
Returns the output of the layer and a reference to its convolutional
filter, as well as the newly generated list of kernels.
Args:
_input: tensor, the operation's input;
out_features: `int`, number of feature maps at the output;
kernel_size: `int`, size of the square kernels (their side);
strides: `list` of `int`, strides in each direction for kernels;
padding: `str`, should we use padding ('SAME') or not ('VALID').
"""
in_features = int(_input.get_shape()[-1])
# First create a list with the 3d kernels (easily modifiable):
kernels = []
for o in range(out_features):
kernels.append(self.weight_variable_msra(
[kernel_size, kernel_size, in_features], name='kernel'+str(o)))
# The kernels are stacked together so as to create a 4d filter
# (dimension 3 = output features).
filter_ref = tf.stack(kernels, axis=3, name='filter')
# Using the filter, the convolution is defined.
output = tf.nn.conv2d(_input, filter_ref, strides, padding)
return output, filter_ref, kernels
def conv2d_with_given_kernels(self, _input, kernels,
strides=[1, 1, 1, 1], padding='SAME'):
"""
Creates a 2d convolutional filter layer, by using a given list of 3d
kernels to create a filter (stacking them together).
Returns the output of the layer and a reference to its filter.
Args:
_input: tensor, the operation's input;
kernels: `list` of tensors, contains each of the kernels from which
the convolution will be built;
strides: `list` of `int`, strides in each direction for kernels;
padding: `str`, should we use padding ('SAME') or not ('VALID').
"""
# The kernels are stacked together so as to create a 4d filter.
# Using the same name = good idea?
filter_ref = tf.stack(kernels, axis=3, name='filter')
output = tf.nn.conv2d(_input, filter_ref, strides, padding)
return output, filter_ref
def dropout(self, _input):
"""
If the given keep_prob is not 1 AND if the graph is being trained,
performs a random dropout operation on a given input (_input).
The dropout probability is the keep_prob parameter.
Args:
_input: tensor, the operation's input.
"""
if self.keep_prob < 1:
output = tf.cond(
self.is_training,
lambda: tf.nn.dropout(_input, self.keep_prob),
lambda: _input
)
else:
output = _input
return output
# SIMPLEST OPERATIONS (FULLY CONNECTED) -----------------------------------
# -------------------------------------------------------------------------
def weight_variable_xavier(self, shape, name):
"""
Creates weights for a fully-connected layer, using the Xavier
initializer (keeps gradient scale roughly the same in all layers).
Args:
shape: `list` of `int`, shape of the weight matrix;
name: `str`, a name for identifying the weight matrix.
"""
return tf.get_variable(
name,
shape=shape,
initializer=tf.contrib.layers.xavier_initializer())
def bias_variable(self, shape, name='bias'):
"""
Creates bias terms for a fully-connected layer, initialized to 0.0.
Args:
shape: `list` of `int`, shape of the bias matrix;
name: `str`, a name for identifying the bias matrix.
"""
initial = tf.constant(0.0, shape=shape)
return tf.get_variable(name, initializer=initial)
# COMPOSITE FUNCTION + BOTTLENECK -----------------------------------------
# -------------------------------------------------------------------------
def composite_function(self, _input, out_features, kernel_size=3):
"""
Composite function H_l([x_0, ..., x_l-1]) for a dense layer.
Takes a concatenation of previous outputs and performs:
- batch normalisation;
- ReLU activation function;
- 2d convolution, with required kernel size (side);
- dropout, if required (training the graph and keep_prob not set to 1).
Returns the output tensor and a reference to the 2d convolution filter,
as well as a list of the kernels in that filter, and the input tensor
for the 2d convolution.
Args:
_input: tensor, the operation's input;
out_features: `int`, number of feature maps at the output;
kernel_size: `int`, size of the square kernels (their side).
"""
with tf.variable_scope("composite_function"):
# batch normalisation
in_cv = self.batch_norm(_input)
# ReLU activation function
in_cv = tf.nn.relu(in_cv)
# 2d convolution
output, filter_ref, kernels = self.conv2d_with_kernels(
in_cv, out_features=out_features, kernel_size=kernel_size)
# dropout (if the graph is being trained and keep_prob is not 1)
output = self.dropout(output)
return output, filter_ref, kernels, in_cv
def reconstruct_composite_function(self, in_cv, kernels):
"""
Reconstruct the output of the composite function H_l([x_0, ..., x_l-1])
for a dense layer, given the convolution's input and its kernels.
Args:
in_cv: tensor, the input of the convolution;
kernels: `list` of tensors, the kernels for the convolution.
"""
# 2d convolution
output, filter_ref = self.conv2d_with_given_kernels(
in_cv, kernels)
# dropout
output = self.dropout(output)
return output, filter_ref
def bottleneck(self, _input, out_features):
"""
Bottleneck function, used before the composite function H_l in the
dense layers of DenseNet-BC.
Takes a concatenation of previous outputs and performs:
- batch normalisation,
- ReLU activation function,
- 2d convolution, with kernel size 1 (produces 4x the features of H_l),
- dropout, if required (training the graph and keep_prob not set to 1).
Returns the output tensor and a reference to the 2d convolution kernel.
Args:
_input: tensor, the operation's input;
out_features: `int`, number of feature maps at the output of H_l;
kernel_size: `int`, size of the square kernels (their side).
"""
with tf.variable_scope("bottleneck"):
# batch normalisation
output = self.batch_norm(_input)
# ReLU activation function
output = tf.nn.relu(output)
inter_features = out_features * 4
# 2d convolution (produces intermediate features)
output, filter_ref = self.conv2d(
output, out_features=inter_features, kernel_size=1,
padding='VALID')
# dropout (if the graph is being trained and keep_prob is not 1)
output = self.dropout(output)
return output, filter_ref
# BLOCKS AND THEIR INTERNAL LAYERS ----------------------------------------
# -------------------------------------------------------------------------
def add_new_kernels_to_layer(self, _input, in_cv, layer, kernel_num,
complementarity=True, kernel_size=3):
"""
Adds new convolution kernels to a layer within a block:
creates the kernels, reconstructs the composite function, and
concatenates outputs to ensure the DenseNet paradigm.
If required, uses a complementarity mechanism to initialise the new
kernels: the sign configuration is the opposite of that of the kernels
with lowest CS, unless that configuration is already taken (in which
case it must be differnet, but close to the opposite).
Returns the layer's new output tensor.
N.B.: This function is meant to be used ONLY in self-constructing mode
(i.e. when should_self_construct is true).
Args:
_input: tensor, the layer's input;
in_cv: tensor, the input for the layer's convolution;
layer: `int`, identifier number for this layer (within a block);
kernel_num: `int`, number of new (square) kernels to be added;
complementarity: `bool`, whether the complementarity mechanism
should be used to initialise new kernels or not;
kernel_size: `int`, size of the kernels (their side).
"""
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("composite_function"):
# if using the complementarity mechanism
if complementarity:
# get the sign distribution of all kernels in the layer
kernel_signs = []
for old_kernel in self.kernels_ref_list[-1][-1]:
kernel_signs.append(
np.sign(self.sess.run(old_kernel)))
# get the ids of the kernels with lowest CS
compl_kernels = sorted(
range(len(self.kCS_FIFO)),
key=lambda i: self.kCS_FIFO[i][-1])[:kernel_num]
# create and initialise kernel_num new kernels
in_features = int(in_cv.get_shape()[-1])
for new_k in range(kernel_num):
self.kernel_name_counter += 1
self.kernels_ref_list[-1][-1].append(
self.weight_variable_msra(
[kernel_size, kernel_size, in_features],
name='kernel'+str(self.kernel_name_counter)))
self.sess.run(tf.variables_initializer(
[self.kernels_ref_list[-1][-1][-1]]))
# if complementarity, make each new kernel complementary to
# one of the previously identified low-CS kernels
if complementarity:
# get the abs value contents of the new kernel
new_k_image = self.sess.run(
self.kernels_ref_list[-1][-1][-1])
new_k_image = np.absolute(new_k_image)
# sign distribution = opposite to the low-CS kernel
new_k_signs = -1*kernel_signs[compl_kernels[new_k]]
# check if sign distribution already exists
new_k_signs_try = new_k_signs
sign_distr_exists = True
patience = kernel_size*kernel_size*in_features
while sign_distr_exists and patience:
# compare with each of the distributions
sign_distr_exists = False
for sign_distr in kernel_signs:
sign_distr_exists = sign_distr_exists and (
new_k_signs == sign_distr).all()
# if so, switch one of the signs randomly
if sign_distr_exists:
new_k_signs_try = np.copy(new_k_signs)
new_k_signs_try[
np.random.randint(kernel_size)][
np.random.randint(kernel_size)][
np.random.randint(in_features)
] *= -1
patience -= 1
# finally, apply the sign distr and add it to the list
new_k_image = np.multiply(new_k_image, new_k_signs_try)
kernel_signs.append(new_k_signs_try)
# assign the new weight values to the kernel
self.sess.run(self.kernels_ref_list[-1][-1][-1].assign(
new_k_image))
# reconstruct the composite function from the current kernels
comp_out, filter_ref = self.reconstruct_composite_function(
in_cv, self.kernels_ref_list[-1][-1])
# save a reference to the composite function's filter
self.filter_ref_list[-1][-1] = filter_ref
# concatenate output with layer input to ensure DenseNet paradigm
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 0:
output = tf.concat(axis=3, values=(_input, comp_out))
else:
output = tf.concat(3, (_input, comp_out))
# Keep track of kernel CS.
self.kCS_FIFO.extend([
deque(maxlen=self.dkCS_softening) for i in range(kernel_num)])
self.dkCS_FIFO.extend([
deque(maxlen=self.dkCS_std_window) for i in range(kernel_num)])
return output
def remove_kernels_from_layer(self, _input, in_cv, layer,
kernels_to_prune):
"""
Removes specific convolution kernels in a layer within a block:
removes the kernels from the list, reconstructs the composite function,
and concatenates outputs to ensure the DenseNet paradigm.
Returns the layer's new output tensor.
N.B.: This function is meant to be used ONLY in self-constructing mode
(i.e. when should_self_construct is true).
Args:
_input: tensor, the layer's input;
in_cv: tensor, the input for the layer's convolution;
layer: `int`, identifier number for this layer (within a block);
kernels_to_prune: `list` of `int`, the specific kernels to remove.
"""
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("composite_function"):
# remove the kernels specified in kernels_to_prune
in_features = int(in_cv.get_shape()[-1])
print("\nPre-pruning kernels_ref_list length: %d" % len(
self.kernels_ref_list[-1][-1]))
for i in reversed(kernels_to_prune):
# iterate backwards so that kernel ids remain meaningful
self.pruned_varnames.append(
self.kernels_ref_list[-1][-1][i].name)
del self.kernels_ref_list[-1][-1][i]
for elem in self.pruned_varnames:
print(elem)
print("Post-pruning kernels_ref_list length: %d\n" % len(
self.kernels_ref_list[-1][-1]))
# reconstruct the composite function from the current kernels
comp_out, filter_ref = self.reconstruct_composite_function(
in_cv, self.kernels_ref_list[-1][-1])
# save a reference to the composite function's filter
self.filter_ref_list[-1][-1] = filter_ref
# concatenate output with layer input to ensure DenseNet paradigm
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 0:
output = tf.concat(axis=3, values=(_input, comp_out))
else:
output = tf.concat(3, (_input, comp_out))
# Keep track of kernel CS.
for i in reversed(kernels_to_prune):
del self.kCS_FIFO[i], self.dkCS_FIFO[i]
return output
def add_internal_layer(self, _input, layer, growth_rate):
"""
Adds a new convolutional (dense) layer within a block.
This layer will perform the composite function H_l([x_0, ..., x_l-1])
to obtain its output x_l.
It will then concatenate x_l with the layer's input: all the outputs of
the previous layers, resulting in [x_0, ..., x_l-1, x_l].
Returns the layer's output, as well as the input of its conv2d.
Args:
_input: tensor, the operation's input;
layer: `int`, identifier number for this layer (within a block);
growth_rate: `int`, number of new convolutions per dense layer.
"""
with tf.variable_scope("layer_%d" % layer):
# use the composite function H_l (3x3 kernel conv)
if not self.bc_mode:
comp_out, filter_ref, kernels, in_cv = self.composite_function(
_input, out_features=growth_rate, kernel_size=3)
# in DenseNet-BC mode, add a bottleneck layer before H_l (1x1 conv)
elif self.bc_mode:
bottleneck_out, filter_ref = self.bottleneck(
_input, out_features=growth_rate)
if self.ft_filters or self.should_self_construct:
self.filter_ref_list[-1].append(filter_ref)
comp_out, filter_ref, kernels, in_cv = self.composite_function(
bottleneck_out, out_features=growth_rate, kernel_size=3)
# save a reference to the composite function's filter
if self.ft_filters or self.should_self_construct:
self.filter_ref_list[-1].append(filter_ref)
if self.ft_kernels or self.should_self_construct:
self.kernel_name_counter = growth_rate-1
self.kernels_ref_list[-1].append(kernels)
# concatenate output of H_l with layer input (all previous outputs)
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 0:
output = tf.concat(axis=3, values=(_input, comp_out))
else:
output = tf.concat(3, (_input, comp_out))
# If self-constructing at kernel level, keep track of kernel CS.
if self.has_micro_algo:
self.kCS_FIFO = [
deque(maxlen=self.dkCS_softening) for i in range(growth_rate)]
self.dkCS_FIFO = [
deque(maxlen=self.dkCS_std_window) for i in range(growth_rate)]
return output, in_cv
def add_block(self, _input, block, growth_rate, layers_in_block, is_last):
"""
Adds a new block containing several convolutional (dense) layers.
These are connected together following a DenseNet architecture,
as defined in the paper.
Returns the block's output, as well as the inputs to the last layer
and to its conv2d.
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for this block;
growth_rate: `int`, number of new convolutions per dense layer;
layers_in_block: `int`, number of dense layers in this block;
is_last: `bool`, is this the last block in the network or not.
"""
if self.ft_filters or self.should_self_construct:
self.filter_ref_list.append([])
if self.ft_kernels or self.should_self_construct:
self.kernels_ref_list.append([])
if is_last:
self.cross_entropy = []
with tf.variable_scope("Block_%d" % block) as self.current_block:
output = _input
for layer in range(layers_in_block):
# The inputs of the last layer and its conv2d must be saved
# (useful for self-construction kernel by kernel)
input_lt_lay = output
output, input_lt_cnv = self.add_internal_layer(
input_lt_lay, layer, growth_rate)
if self.ft_cross_entropies and is_last:
# Save the cross-entropy for all layers except the last one
# (it is always saved as part of the end-graph operations)
if layer != layers_in_block-1:
_, cross_entropy = self.cross_entropy_loss(
output, self.labels, block, layer,
preserve_transition=self.preserve_transition_l)
self.cross_entropy.append(cross_entropy)
return output, input_lt_lay, input_lt_cnv
# TRANSITION LAYERS -------------------------------------------------------
# -------------------------------------------------------------------------
def transition_layer(self, _input, block):
"""
Adds a new transition layer after a block. This layer's inputs are the
concatenated feature maps of each layer in the block.
The layer first runs the composite function with kernel size 1:
- In DenseNet mode, it produces as many feature maps as the input had.
- In DenseNet-BC mode, it produces reduction (theta) times as many,
compressing the output.
Afterwards, an average pooling operation (of size 2) is carried to
change the output's size.
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the previous block.
"""
with tf.variable_scope("Transition_after_block_%d" % block):
# add feature map compression in DenseNet-BC mode
out_features = int(int(_input.get_shape()[-1]) * self.reduction)
# use the composite function H_l (1x1 kernel conv)
output, filter_ref, kernels, in_cv = self.composite_function(
_input, out_features=out_features, kernel_size=1)
# save a reference to the composite function's filter
if self.ft_filters or self.should_self_construct:
self.filter_ref_list[-1].append(filter_ref)
if self.ft_kernels or self.should_self_construct:
self.kernels_ref_list[-1].append(kernels)
# use average pooling to reduce feature map size
output = self.avg_pool(output, k=2)
return output
def transition_layer_to_classes(self, _input, block, layer):
"""
Adds the transition layer after the last block. This layer outputs the
estimated probabilities by classes.
It performs:
- batch normalisation,
- ReLU activation function,
- wider-than-normal average pooling,
- reshaping the output into a 1d tensor,
- fully-connected layer (matrix multiplication, weights and biases).
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block.
"""
self.features_total = int(_input.get_shape()[-1])
var_scope = "Transition_to_FC_block_%d" % block
FC_name = "FC_block_%d" % block
if not self.preserve_transition_l:
var_scope += "_layer_%d" % layer
FC_name += "_layer_%d" % layer
with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):
# Batch normalisation.
self.batch_norm_counter = 0
output = self.batch_norm(
_input, scope='BatchNorm_'+str(self.batch_norm_counter))
# ReLU activation function.
output = tf.nn.relu(output)
# Wide average pooling.
last_pool_kernel = int(output.get_shape()[-2])
output = self.avg_pool(output, k=last_pool_kernel)
# Reshaping the output into 1d.
output = tf.reshape(output, [-1, self.features_total])
# FC (fully-connected) layer.
self.FC_W = []
for i in range(self.features_total):
self.FC_W.append(self.weight_variable_xavier(
[self.n_classes], name=FC_name+("_W%d" % i)))
self.FC_W_counter = self.features_total-1
self.FC_bias = self.bias_variable(
[self.n_classes], name=FC_name+"_bias")
stacked_FC_W = tf.stack(self.FC_W, axis=0)
logits = tf.matmul(output, stacked_FC_W) + self.FC_bias
return logits
def reconstruct_transition_to_classes(self, _input, block, layer):
"""
Reconstruct the transition layer to classes after adding a new kernel
or layer in the last block (in such a case, the transition layer must
remain mostly unchanged except for the new weights).
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block.
"""
new_features_total = int(_input.get_shape()[-1])
var_scope = "Transition_to_FC_block_%d" % block
FC_name = "FC_block_%d" % block
if not self.preserve_transition_l:
var_scope += "_layer_%d" % layer
FC_name += "_layer_%d" % layer
with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):
# The batch norm contains beta and gamma params for each kernel,
# we first copy the param values from old kernels.
beta_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[self.features_total]))
gamma_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[self.features_total]))
# Then we create a new batch norm and initialize its params.
self.batch_norm_counter += 1
output = self.batch_norm(
_input, scope='BatchNorm_'+str(self.batch_norm_counter))
new_beta = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[new_features_total])
new_gamma = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[new_features_total])
self.sess.run(tf.variables_initializer([new_beta, new_gamma]))
# For these params, we copy the old param values, and leave
# the remaining new values for the new kernels.
new_beta_values = self.sess.run(new_beta)
new_gamma_values = self.sess.run(new_gamma)
difference = new_features_total-self.features_total
new_beta_values[:-difference] = beta_values
new_gamma_values[:-difference] = gamma_values
# Then we assign the modified values to reconstruct the batch norm.
self.sess.run(new_beta.assign(new_beta_values))
self.sess.run(new_gamma.assign(new_gamma_values))
self.features_total = new_features_total
# ReLU, average pooling, and reshaping into 1d
# these do not contain any trainable params, so they are rewritten.
output = tf.nn.relu(output)
last_pool_kernel = int(output.get_shape()[-2])
output = self.avg_pool(output, k=last_pool_kernel)
features_total = int(output.get_shape()[-1])
output = tf.reshape(output, [-1, features_total])
# For the FC layer: add new weights, keep biases and old weights.
for i in range(len(self.FC_W), features_total):
self.FC_W_counter += 1
self.FC_W.append(self.weight_variable_xavier(
[self.n_classes],
name=FC_name+("_W%d" % self.FC_W_counter)))
stacked_FC_W = tf.stack(self.FC_W, axis=0)
logits = tf.matmul(output, stacked_FC_W) + self.FC_bias
return logits
def reconstruct_transition_to_classes_post_pruning(self, _input,
block, layer,
kernels_to_prune):
"""
Reconstruct the transition layer to classes after pruning kernels in
the last layer of the last block (in such a case, the transition layer
must remain mostly unchanged and unused weights must be removed).
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block;
kernels_to_prune: `list` of `int`, gives the specific kernels that
were pruned in the last layer.
"""
new_features_total = int(_input.get_shape()[-1])
var_scope = "Transition_to_FC_block_%d" % block
if not self.preserve_transition_l:
var_scope += "_layer_%d" % layer
with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):
# Copy the batch norm beta and gamma param values from old kernels.
beta_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[self.features_total]))
gamma_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[self.features_total]))
# Create a new batch norm and get its param variables.
self.batch_norm_counter += 1
output = self.batch_norm(
_input, scope='BatchNorm_'+str(self.batch_norm_counter))
new_beta = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[new_features_total])
new_gamma = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[new_features_total])
# self.sess.run(tf.variables_initializer([new_beta, new_gamma]))
# Copy the param values corresponding to the remaining kernels.
prepruning_kernel_count = len(self.kernels_ref_list[block][-1])
prepruning_kernel_count += len(kernels_to_prune)
difference = self.features_total - prepruning_kernel_count
new_beta_values = beta_values[:difference]
new_gamma_values = gamma_values[:difference]
for k in range(prepruning_kernel_count):
if k not in kernels_to_prune:
new_beta_values = np.append(
new_beta_values, beta_values[k+difference])
new_gamma_values = np.append(
new_gamma_values, gamma_values[k+difference])
print(new_features_total)
print(len(new_beta_values))
print("%d (difference) = %d (new_features_total) - %d (current_kernel_count)" % (
difference, new_features_total, prepruning_kernel_count-len(kernels_to_prune)))
print("%d (old difference) = %d (features_total) - %d (prepruning_kernel_count)" % (
difference, self.features_total, prepruning_kernel_count))
# Assign those param values to reconstruct the batch norm.
self.sess.run(new_beta.assign(new_beta_values))
self.sess.run(new_gamma.assign(new_gamma_values))
self.features_total = new_features_total
# Rewrite: ReLU, average pooling, and reshaping into 1d.
output = tf.nn.relu(output)
last_pool_kernel = int(output.get_shape()[-2])
output = self.avg_pool(output, k=last_pool_kernel)
features_total = int(output.get_shape()[-1])
output = tf.reshape(output, [-1, features_total])
# For the FC layer: remove weights for unpruned kernels, keep all else.
for i in reversed(kernels_to_prune):
self.pruned_varnames.append(self.FC_W[difference+i].name)
del self.FC_W[difference+i]
stacked_FC_W = tf.stack(self.FC_W, axis=0)
logits = tf.matmul(output, stacked_FC_W) + self.FC_bias
return logits
# END GRAPH OPERATIONS ----------------------------------------------------
# -------------------------------------------------------------------------
def cross_entropy_loss(self, _input, labels, block, layer,
preserve_transition=False, kernels_to_prune=None):
"""
Takes an input and adds a transition layer to obtain predictions for
classes. Then calculates the cross-entropy loss for that input with
respect to expected labels. Returns the prediction tensor and the
calculated cross-entropy.
Args:
_input: tensor, the operation's input;
labels: tensor, the expected labels (classes) for the data;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block;
preserve_transition: `bool`, whether or not to preserve the
transition to classes (if yes, adapts the previous transition,
otherwise creates a new one).
kernels_to_prune: `list` of `int` or None, identifiers of recently
pruned kernels (used after kernel-level pruning, otherwise its
value is None).
"""
# add the FC transition layer to the classes (+ softmax)
if preserve_transition:
# the reconstruction depends on the las self-constructing action
if kernels_to_prune is None:
logits = self.reconstruct_transition_to_classes(
_input, block, layer)
else:
logits = self.reconstruct_transition_to_classes_post_pruning(
_input, block, layer, kernels_to_prune)
else:
logits = self.transition_layer_to_classes(_input, block, layer)
prediction = tf.nn.softmax(logits)
# set the calculation for the losses (cross_entropy and l2_loss)
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 5:
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
labels=labels))
else:
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=labels))
return prediction, cross_entropy
def _define_end_graph_operations(self, preserve_transition=False,
kernels_to_prune=None):
"""
Adds the last layer on top of the (editable portion of the) graph.
Then defines the operations for cross-entropy, the training step,
and the accuracy.
Args:
preserve_transition: `bool`, whether or not to preserve the
transition to classes (if yes, adapts the previous transition,
otherwise creates a new one);
kernels_to_prune: `list` of `int` or None, identifiers of recently
pruned kernels (used after kernel-level pruning, otherwise its
value is None).
"""
# obtain the predicted logits, set the calculation for the losses
# (cross_entropy and l2_loss)
prediction, cross_entropy = self.cross_entropy_loss(
self.output, self.labels, self.total_blocks-1,
self.layer_num_list[-1]-1, preserve_transition, kernels_to_prune)
self.cross_entropy.append(cross_entropy)
var_list = self.get_variables_in_use()
l2_loss = tf.add_n(
[tf.nn.l2_loss(var) for var in var_list])
# set the optimizer and define the training step
optimizer = tf.train.MomentumOptimizer(
self.learning_rate, self.nesterov_momentum, use_nesterov=True)
self.train_step = optimizer.minimize(
cross_entropy + l2_loss * self.weight_decay, var_list=var_list)
# set the calculation for the accuracy
correct_prediction = tf.equal(
tf.argmax(prediction, 1),
tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# MAIN GRAPH BUILDING FUNCTIONS -------------------------------------------
# -------------------------------------------------------------------------
def _new_kernels_to_last_layer(self, kernel_num, complementarity=True):
"""
Add new convolution kernels to the current last (composite) layer.
The number of kernels to be added is given by the kernel_num parameter.
Args:
kernel_num: `int`, number of new kernels (i.e. convolutions) to add
to the last layer (usually the specified expansion rate);
complementarity: `bool`, whether a complementarity mechanism
should be used to initialise new kernels or not
(see add_new_kernels_to_layer for more on this).
"""
# Safely access the current block's variable scope.
with tf.variable_scope(self.current_block,
auxiliary_name_scope=False) as cblock_scope:
with tf.name_scope(cblock_scope.original_name_scope):
# Add the kernel and save the new relevant inputs and outputs.
self.output = self.add_new_kernels_to_layer(
self.input_lt_lay, self.input_lt_cnv,
self.layer_num_list[-1]-1, kernel_num,
complementarity=complementarity)
# Delete the last cross-entropy from the list, we will recreate it.
del self.cross_entropy[-1]
print("ADDED %d NEW KERNEL(S) TO LAYER #%d (BLOCK #%d)! "
"It now has got %d kernels." %
(kernel_num, self.layer_num_list[-1]-1, self.total_blocks-1,
len(self.kernels_ref_list[-1][-1])))
self._define_end_graph_operations(preserve_transition=True)
self._initialize_uninitialized_variables()
self._count_trainable_params_in_use()
def _prune_kernels_in_last_layer(self, kernels_to_prune):
"""
Prune some convolution kernels in the current last (composite) layer.
The specific kernels to be pruned are given as a list (the
kernels_to_prune parameter).
Args:
kernels_to_prune: `list` of `int`, gives the specific kernels (i.e.
convolutions) to prune in the last layer.
"""
# Safely access the current block's variable scope.
with tf.variable_scope(self.current_block,
auxiliary_name_scope=False) as cblock_scope:
with tf.name_scope(cblock_scope.original_name_scope):
# Remove the kernels, save the new relevant inputs and outputs.
self.output = self.remove_kernels_from_layer(
self.input_lt_lay, self.input_lt_cnv,
self.layer_num_list[-1]-1, kernels_to_prune)
# Delete the last cross-entropy from the list, we will recreate it.
del self.cross_entropy[-1]
print("PRUNED KERNELS (%s) IN LAYER #%d (BLOCK #%d)! "
"It now has got %d kernels." %
(', '.join(map(str, kernels_to_prune)),
self.layer_num_list[-1]-1, self.total_blocks-1,
len(self.kernels_ref_list[-1][-1])))
# Register which kernels were pruned in the ft-logs.
if self.should_save_ft_logs:
self.feature_writer.write(
'\"Pruned: {}\"\n'.format(kernels_to_prune))
self._define_end_graph_operations(preserve_transition=True,
kernels_to_prune=kernels_to_prune)
self._initialize_uninitialized_variables()
self._count_trainable_params_in_use()
def _new_layer(self, growth_rate):
"""
Add a new layer at the end of the current last block.
In DenseNet-BC mode, two layers (bottleneck and composite/convolution)
will be added instead of just one.
Args:
growth_rate: `int`, number of kernels (i.e. convolutions) in the
new composite layer (usually the specified growth rate).
"""
# Safely access the current block's variable scope.
with tf.variable_scope(self.current_block,
auxiliary_name_scope=False) as cblock_scope:
with tf.name_scope(cblock_scope.original_name_scope):
# Add the layer and save the new relevant inputs and outputs.
self.input_lt_lay = self.output
self.output, self.input_lt_cnv = self.add_internal_layer(
self.input_lt_lay, self.layer_num_list[-1], growth_rate)
self.layer_num_list[-1] += 1
# Refresh cross-entropy list if not measuring layer cross-entropies.
if not self.ft_cross_entropies:
self.cross_entropy = []
if not self.bc_mode:
print("ADDED A NEW LAYER (%d kernels) to the last block (#%d)! "
"It now has got %d layers." %
(growth_rate, self.total_blocks-1, self.layer_num_list[-1]))
if self.bc_mode:
print("ADDED A NEW BOTTLENECK AND A NEW COMPOSITE LAYER "
"(%d kernels) to the last block (#%d)! "
"It now has got %d bottleneck and %d composite layers." %
(growth_rate, self.total_blocks-1,
self.layer_num_list[-1], self.layer_num_list[-1]))
self.update_paths()
self._define_end_graph_operations(
preserve_transition=self.preserve_transition_l)
self._initialize_uninitialized_variables()
self._count_trainable_params_in_use()
def _new_block(self):
"""
Add a transition layer, and a new block (with one layer) at the end
of the current last block.
In DenseNet-BC mode, the new module will begin with two layers
(bottleneck and composite/convolution) instead of just one.
"""
# The input of the last block is useful if the block must be ditched.
self.input_lt_blc = self.transition_layer(
self.output, self.total_blocks-1)
# The inputs of the last layer and conv are for kernel-wise self-const.
self.output, self.input_lt_lay, self.input_lt_cnv = self.add_block(
self.input_lt_blc, self.total_blocks, self.growth_rate, 1, True)
self.layer_num_list.append(1)
self.total_blocks += 1
print("ADDED A NEW BLOCK (#%d), "
"The number of layers in each block is now:" %
(self.total_blocks-1))
if not self.bc_mode:
print('\n'.join('Block %d: %d composite layers.' % (
k, self.layer_num_list[k]) for k in range(len(
self.layer_num_list))))
if self.bc_mode:
print('\n'.join('Block %d: %d bottleneck layers and %d composite'
'layers.' % (k, self.layer_num_list[k],
self.layer_num_list[k])
for k in range(len(self.layer_num_list))))
self.update_paths()
self._define_end_graph_operations()
self._initialize_uninitialized_variables()
self._count_trainable_params_in_use()
def _build_graph(self):
"""
Builds the graph and defines the operations for:
cross-entropy (also l2_loss and a momentum optimizer),
training step (minimize momentum optimizer using l2_loss + cross-entr),
accuracy (reduce mean).
"""
growth_rate = self.growth_rate
layers_in_each_block = self.layer_num_list
self.output = self.images
# first add a 3x3 convolution layer with first_output_features outputs
with tf.variable_scope("Initial_convolution"):
self.input_lt_blc, filter_ref = self.conv2d(
self.output, out_features=self.first_output_features,
kernel_size=3)
if self.ft_filters or self.should_self_construct:
self.filter_ref_list = [[filter_ref]]
if self.ft_kernels or self.should_self_construct:
self.kernels_ref_list = []
# then add the required blocks (and save the relevant inputs)
for block in range(self.total_blocks):
self.output, self.input_lt_lay, self.input_lt_cnv = self.add_block(
self.input_lt_blc, block, growth_rate,
layers_in_each_block[block], block == self.total_blocks - 1)
# all blocks except the last have transition layers
if block != self.total_blocks - 1:
self.input_lt_blc = self.transition_layer(self.output, block)
self._define_end_graph_operations()
# -------------------------------------------------------------------------
# ------------------ INITIALIZING THE TENSORFLOW SESSION ------------------
# -------------------------------------------------------------------------
def _initialize_uninitialized_variables(self):
"""
Finds the references to all uninitialized variables, then tells
TensorFlow to initialize these variables.
"""
# get a set with all the names of uninitialized variables
uninit_varnames = list(map(str, self.sess.run(
tf.report_uninitialized_variables())))
uninit_vars = []
# for every variable, check if its name is in the uninitialized set
for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
varname = 'b\'' + var.name.split(':')[0] + '\''
if varname in uninit_varnames:
uninit_vars.append(var)
# initialize all the new variables
self.sess.run(tf.variables_initializer(uninit_vars))
def _initialize_all_variables(self):
"""
Tells TensorFlow to initialize all variables, using the proper method
for the TensorFlow version.
"""
if TF_VERSION[0] >= 0 and TF_VERSION[1] >= 10:
self.sess.run(tf.global_variables_initializer())
else:
self.sess.run(tf.initialize_all_variables())
def _initialize_session(self):
"""
Starts a TensorFlow session with the correct configuration.
Then tells TensorFlow to initialize all variables, create a saver
and a log file writer.
"""
config = tf.ConfigProto()
# specify the CPU inter and intra threads used by MKL
config.intra_op_parallelism_threads = self.num_intra_threads
config.inter_op_parallelism_threads = self.num_inter_threads
# restrict model GPU memory utilization to the minimum required
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
# initialize variables, create saver, create log file writers
self._initialize_all_variables()
self.saver = tf.train.Saver()
if self.should_save_logs:
if TF_VERSION[0] >= 0 and TF_VERSION[1] >= 10:
logswriter = tf.summary.FileWriter
else:
logswriter = tf.train.SummaryWriter
self.summary_writer = logswriter(self.logs_path)
if self.should_save_ft_logs:
self.feature_writer = open('./%s.csv' % self.ft_logs_path, "w")
# -------------------------------------------------------------------------
# ------------------- COUNTING ALL TRAINABLE PARAMETERS -------------------
# -------------------------------------------------------------------------
def _count_trainable_params(self):
"""
Uses TensorFlow commands to count the number of trainable parameters
in the graph (sum of the multiplied dimensions of each TF variable).
Then prints the number of parameters.
"""
total_parameters = 0
# print("Variable names:")
for variable in tf.trainable_variables():
# print(variable.name)
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
print("Total trainable params: %.1fk" % (total_parameters / 1e3))
def _count_trainable_params_in_use(self, write_to_ft_logs=False):
"""
Uses TensorFlow commands to count the total number of trainable
parameters in the graph, as well as the number of parameters that are
currently 'in use'. This refers specifically to: the multiplied
dimensions of each TF variable that is not a discarded transition to
classes or batch normalization, or a pruned element.
The method prints not only the number of parameters, but also the
number of parameters in the convolutional and fully connected parts
of the TensorFlow graph.
Args:
write_to_ft_logs: `bool`, if feature logs are being written,
whether or not to write the parameter counts to those logs;
"""
total_parameters = 0
conv_params_in_use = 0
fc_params_in_use = 0
fc_name = 'FC_'
t2fc_name = 'Transition_to_FC_'
suffix = 'block_%d' % (self.total_blocks-1)
if not self.preserve_transition_l:
suffix += '_layer_%d' % (self.layer_num_list[-1]-1)
true_fc_name = fc_name + suffix
true_t2fc_name = t2fc_name + suffix
true_t2fc_name += '/BatchNorm_%d' % self.batch_norm_counter
# print("Variable names:")
for variable in tf.trainable_variables():
# print(variable.name)
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
# Add all identified parameters to total_parameters.
total_parameters += variable_parameters
# From here onwards only consider non-pruned parameters.
if not (self.should_self_construct and
variable.name in self.pruned_varnames):
# Add params from the current FC layer to fc_params_in_use.
if variable.name.startswith(true_fc_name):
fc_params_in_use += variable_parameters
# Add params from the current batchnorm to conv_params_in_use.
elif variable.name.startswith(true_t2fc_name):
conv_params_in_use += variable_parameters
# Add params not in a rejected batchnorm or FC layer (to conv).
elif (not variable.name.startswith(fc_name) and
not variable.name.startswith(t2fc_name)):
conv_params_in_use += variable_parameters
# Add together the two counts for parameters 'in use'.
total_parameters_in_use = conv_params_in_use + fc_params_in_use
print("Total trainable params: %.1fk" % (total_parameters / 1e3))
print("Total params in use: %.1fk" % (total_parameters_in_use / 1e3))
print("\tConvolutional: %.1fk" % (conv_params_in_use / 1e3))
print("\tFully Connected: %.1fk" % (fc_params_in_use / 1e3))
if self.should_save_ft_logs and write_to_ft_logs:
self.feature_writer.write("\nTotal trainable params: %.1fk\n" % (
total_parameters / 1e3))
self.feature_writer.write("Total params in use: %.1fk\n" % (
total_parameters_in_use / 1e3))
self.feature_writer.write("Convolutional: %.1fk\n" % (
conv_params_in_use / 1e3))
self.feature_writer.write("Fully Connected: %.1fk\n" % (
fc_params_in_use / 1e3))
def get_variables_in_use(self):
"""
Get a list of the trainable variables in the graph that are currently
'in use' (all variables except those in discarded transitions to
classes or batch normalizations, or in pruned elements).
"""
vars_in_use = []
fc_name = 'FC_'
t2fc_name = 'Transition_to_FC_'
suffix = 'block_%d' % (self.total_blocks-1)
if not self.preserve_transition_l:
suffix += '_layer_%d' % (self.layer_num_list[-1]-1)
true_fc_name = fc_name + suffix
true_t2fc_name = t2fc_name + suffix
true_t2fc_name += '/BatchNorm_%d' % self.batch_norm_counter
for variable in tf.trainable_variables():
# From here onwards only consider non-pruned parameters.
if not (self.should_self_construct and
variable.name in self.pruned_varnames):
# Add variables from the current FC layer.
if variable.name.startswith(true_fc_name):
vars_in_use.append(variable)
# Add variables from the current batchnorm.
elif variable.name.startswith(true_t2fc_name):
vars_in_use.append(variable)
# Add variables not in a rejected batchnorm or FC layer.
elif (not variable.name.startswith(fc_name) and
not variable.name.startswith(t2fc_name)):
vars_in_use.append(variable)
# print("Variables in use:")
# for var in vars_in_use:
# print(var.name)
# if len(self.pruned_varnames) != 0:
# print("Pruned variable names:")
# for varname in self.pruned_varnames:
# print(varname)
return vars_in_use
# -------------------------------------------------------------------------
# -------------------- TRAINING AND TESTING THE MODEL ---------------------
# -------------------------------------------------------------------------
def print_pertinent_features(self, loss, accuracy, epoch, validation_set):
"""
Prints on console the current values of pertinent features.
The loss and accuracy are those on the validation set if such a set is
being used, otherwise they are those on the training set.
If feature logs are being saved, this function saves feature values.
If images are being saved, it also saves filter features as images.
Args:
loss: `list` of `float` (if validation_set == True, else `float`),
loss (cross_entropy) for this epoch, in some cases (as `list`
of `float`) contains several loss values, each corresponding to
each internal layer of the last block;
accuracy: `float`, accuracy for this epoch;
epoch: `int`, current training epoch;
validation_set: `bool`, whether a validation set is used or not.
"""
# print the current accuracy
print("Current accuracy = %f" % accuracy)
if validation_set:
# print a cross-entropy value for each layer, if calculating them
if self.ft_cross_entropies:
print("Cross-entropy per layer in block #%d:" % (
self.total_blocks-1))
for l in range(len(loss)):
print("* Layer #%d: cross-entropy = %f" % (l, loss[l]))
# else print only the current validation cross-entropy
else:
print("Current cross-entropy = %f" % loss[-1])
else:
print("Current cross-entropy = %f" % loss)
if self.should_save_ft_logs:
# save the previously printed feature values
self.feature_writer.write(("\"%d\"%s\"%f\"%s" % (
epoch, self.ftc, accuracy, self.ftc)).replace(".", self.ftd))
if validation_set:
for l in range(len(loss)):
self.feature_writer.write(("\"%f\"%s" % (loss[l], self.ftc)
).replace(".", self.ftd))
else:
self.feature_writer.write(("\"%f\"%s" % (loss, self.ftc)
).replace(".", self.ftd))
self.feature_writer.write('\"\"')
if self.ft_filters:
# process filters, sometimes save their state as images
print('-' * 40 + "\nProcessing filters:")
print('\n* Global input data (post-processed):')
for b in range(0, self.total_blocks):
cs, lcs_dst, lcs_src = self.process_block_filters(b, epoch)
self.ft_log_filters(b, cs, lcs_dst, lcs_src)
elif self.ft_kernels:
# process kernels instead
print('-' * 40 + "\nProcessing kernels:")
for b in range(0, self.total_blocks):
for l in range(0, self.layer_num_list[b]):
print('\n* Block %d filter %d:' % (b, l))
cs = self.process_layer_kernels(b, l, epoch)
for k in range(len(cs)):
print(' - Kernel %d: CS = %f' % (k, cs[k]))
if self.should_save_ft_logs:
for k in range(len(cs)):
self.feature_writer.write((
'%s\"%f\"' % (self.ftc, cs[k])).replace(
".", self.ftd))
self.feature_writer.write('%s\"\"' % self.ftc)
print('-' * 40)
if self.should_save_ft_logs:
self.feature_writer.write('\n')
# SELF-CONSTRUCTING ALGORITHM VARIANTS ------------------------------------
# -------------------------------------------------------------------------
def self_constructing_var0(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #0) for one
training epoch.
Adds new layers to the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists in a succession of two stages:
- Ascension: add one layer every asc_thresh training epochs, break the
loop (end the stage) when a layer settles (its layer cs for sources
is == 1).
- Improvement: end the stage when a total of max_n_ep epochs have
elapsed (since the addition of the last block).
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch (here unused).
"""
continue_training = True
cs, lcs_dst, lcs_src = self.process_block_filters(
self.total_blocks-1, epoch)
# calculate number of settled layers (layers with lcs_src == 1)
settled_layers = 0
for src in range(1, len(lcs_src)):
if lcs_src[src] >= 1:
settled_layers += 1
# stage #0 = ascension stage
if self.algorithm_stage == 0:
if settled_layers > 0:
self.settled_layers_ceil = settled_layers
self.algorithm_stage += 1
elif (epoch-1) % self.asc_thresh == 0:
self._new_layer(self.growth_rate)
# stage #1 = improvement stage
if self.algorithm_stage == 1:
if epoch >= self.max_n_ep:
# stop algorithm and reset everything
continue_training = False
self.algorithm_stage = 0
return continue_training
def self_constructing_var1(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #1) for one
training epoch.
Adds new layers to the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists in a succession of two stages:
- Ascension: add one layer every asc_thresh training epochs, break the
loop (end the stage) when a layer settles (its layer cs for sources
is == 1).
- Improvement: end the stage when a total of max_n_ep epochs have
elapsed (since the addition of the last block);
if another layer settles, add a layer and restart the countdown.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch (here unused).
"""
continue_training = True
cs, lcs_dst, lcs_src = self.process_block_filters(
self.total_blocks-1, epoch)
# calculate number of settled layers (layers with lcs_src == 1)
settled_layers = 0
for src in range(1, len(lcs_src)):
if lcs_src[src] >= 1:
settled_layers += 1
# stage #0 = ascension stage
if self.algorithm_stage == 0:
if settled_layers > 0:
self.settled_layers_ceil = settled_layers
self.algorithm_stage += 1
elif (epoch-1) % self.asc_thresh == 0:
self._new_layer(self.growth_rate)
# stage #1 = improvement stage
if self.algorithm_stage == 1:
if epoch >= self.max_n_ep:
# stop algorithm and reset everything
continue_training = False
self.algorithm_stage = 0
elif settled_layers > self.settled_layers_ceil:
self.settled_layers_ceil = settled_layers
self._new_layer(self.growth_rate)
return continue_training
def self_constructing_var2(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #2) for one
training epoch.
Adds new layers to the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists in a succession of two stages:
- Ascension: add one layer every asc_thresh training epochs, break the
loop (end the stage) when a layer settles (its layer cs for sources
is == 1), or after std_window epochs or more if accuracy hasn't
changed much.
- Improvement: countdown of patience_param epochs until the stage ends;
if another layer settles, add a layer and restart the countdown.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch.
"""
continue_training = True
cs, lcs_dst, lcs_src = self.process_block_filters(
self.total_blocks-1, epoch)
# calculate number of settled layers (layers with lcs_src == 1)
settled_layers = 0
for src in range(1, len(lcs_src)):
if lcs_src[src] >= 1:
settled_layers += 1
# stage #0 = ascension stage
if self.algorithm_stage == 0:
# the ascension stage uses a FIFO list of past accuracies.
self.accuracy_FIFO.append(accuracy)
# after std_window ascension stage epochs, end the stage if the
# accuracy didn't change much in a while.
if (len(self.accuracy_FIFO) == self.std_window and
np.std(self.accuracy_FIFO) < self.std_tolerance):
self.algorithm_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.patience_param + 1
# else follow the usual protocol based on settled layers.
else:
if settled_layers > 0 and self.layer_num_list[-1] > 2:
self.settled_layers_ceil = settled_layers
self.algorithm_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.patience_param + 1
elif (epoch-1) % self.asc_thresh == 0:
self._new_layer(self.growth_rate)
# stage #1 = improvement stage
if self.algorithm_stage == 1:
if self.patience_cntdwn <= 0:
# stop algorithm and reset everything
continue_training = False
self.algorithm_stage = 0
self.patience_cntdwn = self.patience_param
elif settled_layers > self.settled_layers_ceil:
# if a layer settles, add a layer and restart the countdown
self.settled_layers_ceil = settled_layers
self._new_layer(self.growth_rate)
self.patience_cntdwn = self.patience_param
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.patience_param + 1
else:
self.patience_cntdwn -= 1
return continue_training
def self_constructing_var3(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #3) for one
training epoch.
Adds new layers to the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists in a succession of two stages:
- Ascension: add one layer every asc_thresh training epochs, break the
loop (end the stage) after std_window epochs or more if accuracy
hasn't changed much.
- Improvement: countdown of patience_param epochs until the stage ends;
if another layer settles, add a layer and restart the countdown.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch.
"""
continue_training = True
cs, lcs_dst, lcs_src = self.process_block_filters(
self.total_blocks-1, epoch)
# calculate number of settled layers (layers with lcs_src == 1)
settled_layers = 0
for src in range(1, len(lcs_src)):
if lcs_src[src] >= 1:
settled_layers += 1
# stage #0 = ascension stage
if self.algorithm_stage == 0:
# the ascension stage uses a FIFO list of past accuracies.
self.accuracy_FIFO.append(accuracy)
# after std_window ascension stage epochs, end the stage if the
# accuracy didn't change much in a while.
if (len(self.accuracy_FIFO) == self.std_window and
np.std(self.accuracy_FIFO) < self.std_tolerance):
self.algorithm_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.patience_param + 1
elif (epoch-1) % self.asc_thresh == 0:
self._new_layer(self.growth_rate)
# stage #1 = improvement stage
if self.algorithm_stage == 1:
if self.patience_cntdwn <= 0:
# stop algorithm and reset everything
continue_training = False
self.algorithm_stage = 0
self.patience_cntdwn = self.patience_param
elif settled_layers > self.settled_layers_ceil:
# if a layer settles, add a layer and restart the countdown
self.settled_layers_ceil = settled_layers
self._new_layer(self.growth_rate)
self.patience_cntdwn = self.patience_param
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.patience_param + 1
else:
self.patience_cntdwn -= 1
return continue_training
def self_constructing_var4(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #4) for one
training epoch.
Builds new layers in the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists of an macro-algorithm, which adds layers to the
last block, and a micro-algorithm, which builds those layers kernel by
kernel.
- The macro-algorithm adds a new layer with one kernel and runs the
micro-algorithm to build it (i.e. to add/prune kernels in it), then
checks if the accuracy has improved significantly since the previous
layer addition. If so it adds a new layer, else the algorithm ends.
- The micro-algorithm consists of a succession of four stages:
- Ascension: add one kernel every m_asc_thresh training epochs,
break the loop (end the stage) when one of the kernels settles
(its CS has remained stable for a certain number of epochs).
- Improvement: countdown of m_patience_param epochs until the stage
ends; if another kernel settles AND if it is useful (CS above
usefulness_thresh, set by the user), add a kernel and restart the
countdown.
- Pruning: first wait until all kernels have settled (useful or
not), then save the current accuracy and prune all useless
kernels (CS below uselessness_thresh, set by the user) to end the
stage.
- Recovery: wait for one last countdown of m_patience_param epochs
(optionally resetting the learning rate to its initial value and
reducing it according to rlr0); after this countdown wait until
reaching pre-pruning accuracy, then end the stage.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch.
"""
continue_training = True
settled_kernels_count = 0
useful_kernels_count = 0
useless_kernels_list = []
# Update the kernel CS lists, count settled and useful kernels
cs_kernels = self.process_layer_kernels(
self.total_blocks-1, self.layer_num_list[-1]-1, epoch)
for k in range(len(self.kCS_FIFO)):
self.kCS_FIFO[k].append(cs_kernels[k])
if len(self.kCS_FIFO[k]) == self.dkCS_softening:
self.dkCS_FIFO[k].append(
(self.kCS_FIFO[k][-1] - self.kCS_FIFO[k][0])/(
self.dkCS_softening-1))
# Settled = kCS remained close to 0 during the last epochs
if ((len(self.dkCS_FIFO[k]) == self.dkCS_std_window) and (
np.abs(np.mean(self.dkCS_FIFO[k])
) <= self.dkCS_stl_thresh) and
(np.abs(np.std(self.dkCS_FIFO[k])
) <= self.dkCS_stl_thresh)):
settled_kernels_count += 1
# Useful = settled, and kCS above the usefulness thresh
if np.mean(self.kCS_FIFO[k]) >= self.usefulness_thresh:
useful_kernels_count += 1
# Useless = settled, and kCS below the uselessness thresh
if np.mean(self.kCS_FIFO[k]) <= self.uselessness_thresh:
useless_kernels_list.append(k)
# stage #0 = ascension stage (currently does nothing)
if self.algorithm_stage == 0:
self.algorithm_stage += 1
# stage #1 = improvement stage
if self.algorithm_stage == 1:
# micro-stage #0 = micro-ascension stage
if self.micro_stage == 0:
if settled_kernels_count >= 1:
# end stage when one or various kernels have settled
self.useful_kernels_ceil = useful_kernels_count
self.micro_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + 2*(self.m_patience_param + 1)
elif (epoch-1) % self.m_asc_thresh == 0:
self._new_kernels_to_last_layer(
self.expansion_rate,
complementarity=self.complementarity)
# micro-stage #1 = micro-improvement stage
if self.micro_stage == 1:
if self.m_patience_cntdwn <= 0:
# at the end of the patience countdown, end stage
self.micro_stage += 1
elif useful_kernels_count > self.useful_kernels_ceil:
# if a new kernel is useful, add a kernel and restart ctdwn
self.useful_kernels_ceil = useful_kernels_count
self._new_kernels_to_last_layer(
self.expansion_rate,
complementarity=self.complementarity)
self.m_patience_cntdwn = self.m_patience_param
self.max_n_ep = epoch + 2*(self.m_patience_param + 1)
else:
# patience countdown progress
self.m_patience_cntdwn -= 1
# micro-stage #2 = micro-pruning stage
if self.micro_stage == 2:
# wait until all kernels have settled (bound to happen?)
if settled_kernels_count == len(self.kCS_FIFO):
# save the accuracy, prune useless kernels and end stage
self.accuracy_pre_pruning = accuracy
self._prune_kernels_in_last_layer(useless_kernels_list)
self.micro_stage += 1
# run one last patience countdown for recovery
self.m_patience_cntdwn = self.m_patience_param
self.max_n_ep = epoch + self.m_patience_param + 1
# micro-stage #3 = micro-recovery stage (accessed in next epoch)
elif self.micro_stage == 3:
# patience countdown to ensure recovery
if self.m_patience_cntdwn > 0:
self.m_patience_cntdwn -= 1
# wait until reaching pre-pruning accuracy
elif accuracy >= self.accuracy_pre_pruning:
# prune again if there are useless kernels, else end stage
# if len(useless_kernels_list) >= 1:
# self.micro_stage = 2
# else:
self.micro_stage += 1
# self.algorithm_stage = 2
# at the end of the micro-algorithm, try to add a new layer
if self.micro_stage == 4:
# reset everything for the micro-algorithm
self.micro_stage = 0
self.useful_kernels_ceil = 0
self.m_patience_cntdwn = self.m_patience_param
self.accuracy_pre_pruning = 0
# check if the accuracy has improved since the last layer
# if so, add a layer, else end the improvement stage
if abs(accuracy-self.accuracy_last_layer) >= self.impr_thresh:
self.accuracy_last_layer = accuracy
self._new_layer(self.growth_rate)
# different uselessness threshold for new layers
# self.uselessness_thresh = self.alt_uselessness_thresh
# alt. number of kernels = half the previous
# layer's number if during the ascension stage.
# self._new_layer(floor(
# len(self.kernels_ref_list[-1][-1])/2))
# else:
self.algorithm_stage += 1
# stage #2 (nothing yet, stop the algorithm and reset everything)
if self.algorithm_stage == 2:
continue_training = False
self.algorithm_stage = 0
self.patience_cntdwn = self.patience_param
self.accuracy_last_layer = 0
return continue_training
def self_constructing_var5(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #5) for one
training epoch.
Builds new layers in the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists of an macro-algorithm, which adds layers to the
last block, and a micro-algorithm, which builds those layers kernel by
kernel.
- The macro-algorithm adds a new layer with one kernel and runs the
micro-algorithm to build it (i.e. to add/prune kernels in it), then
checks if the accuracy has improved significantly since the previous
layer addition. If so it adds a new layer, else the algorithm ends.
- The micro-algorithm consists of a succession of three stages:
- Improvement: countdown of m_patience_param epochs; if the number
of useful kernels (CS above usefulness_thresh, automatically set)
is above the latest max number of useful kernels, add a kernel
and restart the countdown; if the countdown ends, wait until all
kernels have settled and end the stage.
- Pruning: save the current accuracy and prune all useless kernels
(CS below uselessness_thresh, automatically set) to end the
stage.
- Recovery: wait for one last countdown of m_patience_param epochs
(optionally resetting the learning rate to its initial value and
reducing it according to rlr0); after this countdown wait until
reaching pre-pruning accuracy, then if there are any new useless
kernels wait for all kernels to settle and return to pruning,
else end the stage.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch.
"""
continue_training = True
settled_kernels_count = 0
useful_kernels_count = 0
useless_kernels_list = []
kCS_settled = []
# Update the kernel CS lists, count settled kernels
cs_kernels = self.process_layer_kernels(
self.total_blocks-1, self.layer_num_list[-1]-1, epoch)
for k in range(len(self.kCS_FIFO)):
self.kCS_FIFO[k].append(cs_kernels[k])
if len(self.kCS_FIFO[k]) == self.dkCS_softening:
self.dkCS_FIFO[k].append(
(self.kCS_FIFO[k][-1] - self.kCS_FIFO[k][0])/(
self.dkCS_softening-1))
# Settled = kCS remained close to 0 during the last epochs
if ((len(self.dkCS_FIFO[k]) == self.dkCS_std_window) and (
np.abs(np.mean(self.dkCS_FIFO[k])
) <= self.dkCS_stl_thresh) and
(np.abs(np.std(self.dkCS_FIFO[k])
) <= self.dkCS_stl_thresh)):
settled_kernels_count += 1
if self.micro_stage == 1:
kCS_settled.append(self.kCS_FIFO[k][-1])
# If half of the original kernels have settled
if settled_kernels_count >= 0.5*self.growth_rate:
# During impr. stage, calculate usefulness and uselessness thresh
if self.micro_stage == 1:
self.usefulness_thresh = min(kCS_settled) + (
max(kCS_settled) - min(kCS_settled)
)*self.auto_usefulness_thresh
self.uselessness_thresh = min(kCS_settled) + (
max(kCS_settled) - min(kCS_settled)
)*self.auto_uselessness_thresh
# Detect and count useful and useless kernels
for k in range(len(self.kCS_FIFO)):
# Useful = kCS above the usefulness thresh
if np.mean(self.kCS_FIFO[k]) >= self.usefulness_thresh:
useful_kernels_count += 1
# Useless = kCS below the uselessness thresh
if np.mean(self.kCS_FIFO[k]) <= self.uselessness_thresh:
useless_kernels_list.append(k)
# stage #0 = ascension stage (currently does nothing)
if self.algorithm_stage == 0:
self.algorithm_stage += 1
# stage #1 = improvement stage
if self.algorithm_stage == 1:
# micro-stage #0 = just some settings for the next (actual) stage
if self.micro_stage == 0:
self.micro_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + 2*(self.m_patience_param + 1)
# micro-stage #1 = micro-improvement stage
if self.micro_stage == 1:
if self.m_patience_cntdwn <= 0:
# at the end of the patience countdown, end stage when all
# the kernels have settled
if settled_kernels_count == len(self.kCS_FIFO):
self.micro_stage += 1
elif useful_kernels_count > self.useful_kernels_ceil:
# if the number of useful kernels is above the latest max,
# add a kernel and restart ctdwn
self.useful_kernels_ceil = useful_kernels_count
self._new_kernels_to_last_layer(
self.expansion_rate,
complementarity=self.complementarity)
self.m_patience_cntdwn = self.m_patience_param
self.max_n_ep = epoch + 2*(self.m_patience_param + 1)
else:
# patience countdown progress
self.m_patience_cntdwn -= 1
# micro-stage #2 = micro-pruning stage
if self.micro_stage == 2:
# save the accuracy, prune useless kernels and end stage
self.accuracy_pre_pruning = accuracy
self._prune_kernels_in_last_layer(useless_kernels_list)
self.micro_stage += 1
# run one last patience countdown for recovery
self.m_patience_cntdwn = self.m_patience_param
self.max_n_ep = epoch + self.m_patience_param + 1
# micro-stage #3 = micro-recovery stage (accessed in next epoch)
elif self.micro_stage == 3:
# patience countdown to ensure recovery
if self.m_patience_cntdwn > 0:
self.m_patience_cntdwn -= 1
# wait until reaching pre-pruning accuracy
elif accuracy >= self.accuracy_pre_pruning:
# prune again if there are useless kernels, else end stage
if len(useless_kernels_list) >= 1:
# but first, wait for all kernels to settle
if settled_kernels_count == len(self.kCS_FIFO):
self.micro_stage = 2
else:
self.micro_stage += 1
# at the end of the micro-algorithm, try to add a new layer
if self.micro_stage == 4:
# reset everything for the micro-algorithm
self.micro_stage = 0
self.useful_kernels_ceil = 0
self.m_patience_cntdwn = self.m_patience_param
self.accuracy_pre_pruning = 0
# check if the accuracy has improved since the last layer
# if so, add a layer, else end the improvement stage
if abs(accuracy-self.accuracy_last_layer) >= self.impr_thresh:
self.accuracy_last_layer = accuracy
self._new_layer(self.growth_rate)
# alt. number of kernels = half the previous
# layer's number if during the ascension stage.
# self._new_layer(floor(
# len(self.kernels_ref_list[-1][-1])/2))
else:
self.algorithm_stage += 1
# stage #2 (nothing yet, stop the algorithm and reset everything)
if self.algorithm_stage == 2:
continue_training = False
self.algorithm_stage = 0
self.patience_cntdwn = self.patience_param
self.accuracy_last_layer = 0
return continue_training
def self_constructing_minimal(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (minimal kernel-by-kernel
variant) for one training epoch.
Builds new layers in the last block depending on parameters.
Returns True if training should continue, False otherwise.
The algorithm is meant to be run with an initial architecture of 1
layer with 1 kernel. It only adds an additional kernel to the
layer after m_asc_thresh epochs, then ends after performing a patience
countdown (to further train the network). This is meant to represent a
"minimal" kernel-level self-construction.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch.
"""
continue_training = True
# Update the kernel CS lists
cs_kernels = self.process_layer_kernels(
self.total_blocks-1, self.layer_num_list[-1]-1, epoch)
for k in range(len(self.kCS_FIFO)):
self.kCS_FIFO[k].append(cs_kernels[k])
# stage #0 = ascension stage (currently does nothing)
if self.algorithm_stage == 0:
self.algorithm_stage += 1
# stage #1 = improvement stage
if self.algorithm_stage == 1:
# micro-stage #0 = minimal micro-ascension stage
if self.micro_stage == 0:
if len(self.kernels_ref_list[-1][-1]) >= 3:
# end stage when there are at least two kernels.
self.micro_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.m_patience_param + 1
elif (epoch-1) % self.m_asc_thresh == 0:
self._new_kernels_to_last_layer(
self.expansion_rate,
complementarity=self.complementarity)
# micro-stage #1 = patience countdown
if self.micro_stage == 1:
if self.m_patience_cntdwn <= 0:
# reset everything for the micro-algorithm
self.micro_stage = 0
self.useful_kernels_ceil = 0
self.m_patience_cntdwn = self.m_patience_param
# at the end of the patience countdown, end macro-stage
self.algorithm_stage += 1
else:
# patience countdown progress
self.m_patience_cntdwn -= 1
# stage #2 (nothing yet, stop the algorithm and reset everything)
if self.algorithm_stage == 2:
continue_training = False
self.algorithm_stage = 0
self.patience_cntdwn = self.patience_param
self.accuracy_last_layer = 0
return continue_training
# LEARNING RATE REDUCTION VARIANTS (FOR SELF CONSTRUCTING) ----------------
# -------------------------------------------------------------------------
def self_constr_rlr0(self, learning_rate, initial_lr, rlr_1, rlr_2):
"""
An optional learning rate reduction (Reduce LR #0) to be performed
after a step of the self-constructing algorithm (based on the patience
countdown, so it only works with variant #2 onwards).
Returns the new learning rate value.
Whenever the countdown reaches an epoch that corresponds to a given
fraction of the patience parameter (the patience_param multiplied by
1-rlr_1 or 1-rlr_2), the current learning rate is divided by 10.
If at any point the countdown is reset, the current learning rate
returns to its initial value.
Args:
learning_rate: `int`, the current learning rate value.
initial_lr: the initial value for the learning rate.
rlr_1: the fraction of epochs through the countdown at which
the learning rate must be reduced (/10) for the first time.
rlr_2: the fraction of epochs through the countdown at which
the learning rate must be reduced (/10) for the second time.
"""
if not self.has_micro_algo:
patience_cntdwn = self.patience_cntdwn
patience_param = self.patience_param
else:
patience_cntdwn = self.m_patience_cntdwn
patience_param = self.m_patience_param
if (patience_cntdwn == int(patience_param * (1-rlr_1))):
learning_rate = learning_rate / 10
elif (patience_cntdwn == int(patience_param * (1-rlr_2))):
learning_rate = learning_rate / 10
elif (patience_cntdwn == patience_param):
learning_rate = initial_lr
return learning_rate
def self_constr_rlr1(self, learning_rate, initial_lr, rlr_1, rlr_2):
"""
An optional learning rate reduction (Reduce LR #1) to be performed
after a step of the self-constructing algorithm (based on the patience
countdown, so it only works with variant #2 onwards).
Returns the new learning rate value.
The initial learning rate value is initial_lr.
The first time that the countdown reaches an epoch that corresponds to
patience_param * (1 - rlr_1), the learning rate becomes initial_lr/10.
The first time that the countdown reaches an epoch that corresponds to
patience_param * (1 - rlr_2), the learning rate becomes initial_lr/100.
Args:
learning_rate: `int`, the current learning rate value.
initial_lr: the initial value for the learning rate.
rlr_1: the fraction of epochs through the countdown at which
the learning rate must be reduced (/10) for the first time.
rlr_2: the fraction of epochs through the countdown at which
the learning rate must be reduced (/10) for the second time.
"""
if not self.has_micro_algo:
patience_cntdwn = self.patience_cntdwn
patience_param = self.patience_param
else:
patience_cntdwn = self.m_patience_cntdwn
patience_param = self.m_patience_param
if (patience_cntdwn == int(patience_param * (1-rlr_1))):
learning_rate = min(learning_rate, initial_lr / 10)
elif (patience_cntdwn == int(patience_param * (1-rlr_2))):
# learning_rate = min(learning_rate, initial_lr / 100)
learning_rate = initial_lr / 100 # min is unnecessary here
return learning_rate
# MAIN TRAINING AND TESTING FUNCTIONS -------------------------------------
# -------------------------------------------------------------------------
def train_one_epoch(self, data, batch_size, learning_rate):
"""
Trains the model for one epoch using data from the proper training set.
Args:
data: training data yielded by the dataset's data provider;
batch_size: `int`, number of examples in a training batch;
learning_rate: `int`, learning rate for the optimizer.
"""
num_examples = data.num_examples
total_loss = []
total_accuracy = []
# save each training batch's loss and accuracy
for i in range(num_examples // batch_size):
batch = data.next_batch(batch_size)
images, labels = batch
feed_dict = {
self.images: images,
self.labels: labels,
self.learning_rate: learning_rate,
self.is_training: True,
}
fetches = [self.train_step, self.cross_entropy[-1], self.accuracy]
result = self.sess.run(fetches, feed_dict=feed_dict)
_, loss, accuracy = result
total_loss.append(loss)
total_accuracy.append(accuracy)
if self.should_save_logs:
self.batches_step += 1
self.log_loss_accuracy(
loss, accuracy, self.batches_step, prefix='per_batch',
should_print=False)
# use the saved data to calculate the mean loss and accuracy
mean_loss = np.mean(total_loss)
mean_accuracy = np.mean(total_accuracy)
return mean_loss, mean_accuracy
def test(self, data, batch_size):
"""
Tests the model using the proper testing set.
Args:
data: testing data yielded by the dataset's data provider;
batch_size: `int`, number of examples in a testing batch.
"""
num_examples = data.num_examples
total_loss = []
for l in range(len(self.cross_entropy)):
total_loss.append([])
total_accuracy = []
# save each testing batch's loss and accuracy
for i in range(num_examples // batch_size):
batch = data.next_batch(batch_size)
feed_dict = {
self.images: batch[0],
self.labels: batch[1],
self.is_training: False,
}
loss = self.sess.run(self.cross_entropy, feed_dict=feed_dict)
accuracy = self.sess.run(self.accuracy, feed_dict=feed_dict)
for j in range(len(loss)):
total_loss[j].append(loss[j])
total_accuracy.append(accuracy)
# use the saved data to calculate the mean loss and accuracy
mean_loss = []
for loss_list in total_loss:
mean_loss.append(np.mean(loss_list))
mean_accuracy = np.mean(total_accuracy)
return mean_loss, mean_accuracy
def train_all_epochs(self, train_params):
"""
Trains the model for a certain number of epochs, using parameters
specified in the train_params argument.
Args (in train_params):
batch_size: `int`, number of examples in a training batch;
max_n_ep: `int`, maximum number of training epochs to run;
initial_learning_rate: `int`, initial learning rate for optimizer;
reduce_lr_1: `float`, if not self-constructing the network,
first fraction of max_n_ep after which the current
learning rate is divided by 10 (initial_learning_rate/10);
reduce_lr_2: `float`, if not self-constructing the network,
second fraction of max_n_ep after which the current
learning rate is divided by 10 (initial_learning_rate/100);
validation_set: `bool`, should a validation set be used or not;
validation_split: `float` or None;
`float`: chunk of the training set used as the validation set;
None: use the testing set as the validation set;
shuffle: `str` or None, or `bool`;
`str` or None: used with CIFAR datasets, should we shuffle the
data only before training ('once_prior_train'), on every
epoch ('every_epoch') or not at all (None);
`bool`: used with SVHN, should we shuffle the data or not;
normalisation: `str` or None;
None: don't use any normalisation for pixels;
'divide_255': divide all pixels by 255;
'divide_256': divide all pixels by 256;
'by_chanels': substract the mean of the pixel's chanel and
divide the result by the channel's standard deviation.
"""
self.max_n_ep = train_params['max_n_ep']
initial_lr = train_params['initial_learning_rate']
learning_rate = train_params['initial_learning_rate']
batch_size = train_params['batch_size']
rlr_1 = train_params['reduce_lr_1']
rlr_2 = train_params['reduce_lr_2']
validation_set = train_params.get('validation_set', False)
total_start_time = time.time()
epoch = 1 # current training epoch
epoch_last_b = 0 # epoch at which the last block was added
while True:
# only print epoch name on certain epochs
if (epoch-1) % self.ft_period == 0:
print('\n', '-'*30, "Train epoch: %d" % epoch, '-'*30, '\n')
start_time = time.time()
# if not self-constructing, may reduce learning rate at some epochs
if not self.should_self_construct and self.should_change_lr:
if (epoch == int(self.max_n_ep * rlr_1)) or (
epoch == int(self.max_n_ep * rlr_2)):
learning_rate = learning_rate / 10
print("Learning rate has been divided by 10, new lr = %f" %
learning_rate)
# training step for one epoch
print("Training...", end=' ')
loss, acc = self.train_one_epoch(
self.data_provider.train, batch_size, learning_rate)
# save logs
if self.should_save_logs:
self.log_loss_accuracy(loss, acc, epoch, prefix='train')
# validation step after the epoch
if validation_set:
print("Validation...")
loss, acc = self.test(
self.data_provider.validation, batch_size)
# save logs
if self.should_save_logs:
self.log_loss_accuracy(loss[-1], acc, epoch,
prefix='valid')
# save feature logs (on certain epochs)
if (epoch-1) % self.ft_period == 0:
self.print_pertinent_features(loss, acc, epoch, validation_set)
# save model if required
if self.should_save_model:
self.save_model()
# step of the self-constructing algorithm
if self.should_self_construct:
if epoch - epoch_last_b != 1:
# can break here if self-constructing algorithm is over
if not self.self_constructing_step(
epoch - epoch_last_b, acc):
# add another block if block_count not yet exceeded
if self.total_blocks < self.block_count:
self._new_block()
else:
break
# optional learning rate reduction for self-constructing
if self.should_change_lr:
if self.has_micro_algo and self.micro_stage == 3:
# micro-recovery uses rlr0 for proper recovery
learning_rate = self.self_constr_rlr0(
learning_rate, initial_lr, rlr_1, rlr_2)
else:
learning_rate = self.self_constr_rlr(
learning_rate, initial_lr, rlr_1, rlr_2)
# if this is a new block, reset the algorithm's variables
else:
self.settled_layers_ceil = 0 # highest num of settled lay
self.algorithm_stage = 0 # start with ascension stage
self.patience_cntdwn = self.patience_param
if self.has_micro_algo:
self.useful_kernels_ceil = 0 # highest n of settled k
self.micro_stage = 0 # kernel-level stages (if needed)
self.m_patience_cntdwn = self.m_patience_param
self.accuracy_pre_pruning = 0
self.accuracy_last_layer = 0
# measure training time for this epoch
time_per_epoch = time.time() - start_time
seconds_left = int((self.max_n_ep - epoch) * time_per_epoch)
print("Time per epoch: %s, Est. complete (%d epochs) in: %s" % (
str(timedelta(seconds=time_per_epoch)),
self.max_n_ep,
str(timedelta(seconds=seconds_left))))
# increase epoch, break at max_n_ep if not self-constructing
epoch += 1
if not self.should_self_construct and epoch >= self.max_n_ep+1:
break
# measure total training time
total_training_time = time.time() - total_start_time
print("\nTOTAL TRAINING TIME: %s\n" % str(timedelta(
seconds=total_training_time)))
if self.should_save_ft_logs:
self.feature_writer.write("\nTOTAL TRAINING TIME: %s\n" % str(
timedelta(seconds=total_training_time)))
self._count_trainable_params_in_use(write_to_ft_logs=True)
| [
"tensorflow.get_variable",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.nn.dropout",
"tensorflow.nn.softmax",
"numpy.moveaxis",
"datetime.timedelta",
"tensorflow.cast",
"tensorflow.variables_initializer",
"numpy.mean",
"numpy.multiply",
"tensorflow.__version__.split",
... | [((28949, 28985), 'numpy.moveaxis', 'np.moveaxis', (['f_image', '[0, 1]', '[1, 0]'], {}), '(f_image, [0, 1], [1, 0])\n', (28960, 28985), True, 'import numpy as np\n'), ((33291, 33351), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'shape', 'name': '"""input_images"""'}), "(tf.float32, shape=shape, name='input_images')\n", (33305, 33351), True, 'import tensorflow as tf\n'), ((33411, 33482), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.n_classes]', 'name': '"""labels"""'}), "(tf.float32, shape=[None, self.n_classes], name='labels')\n", (33425, 33482), True, 'import tensorflow as tf\n'), ((33549, 33607), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]', 'name': '"""learning_rate"""'}), "(tf.float32, shape=[], name='learning_rate')\n", (33563, 33607), True, 'import tensorflow as tf\n'), ((33672, 33705), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '[]'}), '(tf.bool, shape=[])\n', (33686, 33705), True, 'import tensorflow as tf\n'), ((35087, 35134), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['_input', 'ksize', 'strides', 'padding'], {}), '(_input, ksize, strides, padding)\n', (35101, 35134), True, 'import tensorflow as tf\n'), ((35444, 35566), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['_input'], {'scale': '(True)', 'is_training': 'self.is_training', 'updates_collections': 'None', 'scope': 'scope'}), '(_input, scale=True, is_training=self.\n is_training, updates_collections=None, scope=scope)\n', (35472, 35566), True, 'import tensorflow as tf\n'), ((36532, 36582), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['_input', 'filter_ref', 'strides', 'padding'], {}), '(_input, filter_ref, strides, padding)\n', (36544, 36582), True, 'import tensorflow as tf\n'), ((37877, 37917), 'tensorflow.stack', 'tf.stack', (['kernels'], {'axis': '(3)', 'name': '"""filter"""'}), "(kernels, axis=3, name='filter')\n", (37885, 37917), True, 'import tensorflow as tf\n'), ((37991, 38041), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['_input', 'filter_ref', 'strides', 'padding'], {}), '(_input, filter_ref, strides, padding)\n', (38003, 38041), True, 'import tensorflow as tf\n'), ((38932, 38972), 'tensorflow.stack', 'tf.stack', (['kernels'], {'axis': '(3)', 'name': '"""filter"""'}), "(kernels, axis=3, name='filter')\n", (38940, 38972), True, 'import tensorflow as tf\n'), ((38990, 39040), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['_input', 'filter_ref', 'strides', 'padding'], {}), '(_input, filter_ref, strides, padding)\n', (39002, 39040), True, 'import tensorflow as tf\n'), ((40647, 40676), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': 'shape'}), '(0.0, shape=shape)\n', (40658, 40676), True, 'import tensorflow as tf\n'), ((40692, 40734), 'tensorflow.get_variable', 'tf.get_variable', (['name'], {'initializer': 'initial'}), '(name, initializer=initial)\n', (40707, 40734), True, 'import tensorflow as tf\n'), ((60848, 60875), 'tensorflow.stack', 'tf.stack', (['self.FC_W'], {'axis': '(0)'}), '(self.FC_W, axis=0)\n', (60856, 60875), True, 'import tensorflow as tf\n'), ((64331, 64358), 'tensorflow.stack', 'tf.stack', (['self.FC_W'], {'axis': '(0)'}), '(self.FC_W, axis=0)\n', (64339, 64358), True, 'import tensorflow as tf\n'), ((68537, 68564), 'tensorflow.stack', 'tf.stack', (['self.FC_W'], {'axis': '(0)'}), '(self.FC_W, axis=0)\n', (68545, 68564), True, 'import tensorflow as tf\n'), ((70495, 70516), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (70508, 70516), True, 'import tensorflow as tf\n'), ((72380, 72473), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['self.learning_rate', 'self.nesterov_momentum'], {'use_nesterov': '(True)'}), '(self.learning_rate, self.nesterov_momentum,\n use_nesterov=True)\n', (72406, 72473), True, 'import tensorflow as tf\n'), ((82255, 82303), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (82272, 82303), True, 'import tensorflow as tf\n'), ((83169, 83185), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (83183, 83185), True, 'import tensorflow as tf\n'), ((83527, 83552), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (83537, 83552), True, 'import tensorflow as tf\n'), ((83686, 83702), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (83700, 83702), True, 'import tensorflow as tf\n'), ((84684, 84708), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (84706, 84708), True, 'import tensorflow as tf\n'), ((86353, 86377), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (86375, 86377), True, 'import tensorflow as tf\n'), ((89183, 89207), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (89205, 89207), True, 'import tensorflow as tf\n'), ((128977, 128996), 'numpy.mean', 'np.mean', (['total_loss'], {}), '(total_loss)\n', (128984, 128996), True, 'import numpy as np\n'), ((129021, 129044), 'numpy.mean', 'np.mean', (['total_accuracy'], {}), '(total_accuracy)\n', (129028, 129044), True, 'import numpy as np\n'), ((130327, 130350), 'numpy.mean', 'np.mean', (['total_accuracy'], {}), '(total_accuracy)\n', (130334, 130350), True, 'import numpy as np\n'), ((132643, 132654), 'time.time', 'time.time', ([], {}), '()\n', (132652, 132654), False, 'import time\n'), ((221, 246), 'tensorflow.__version__.split', 'tf.__version__.split', (['"""."""'], {}), "('.')\n", (241, 246), True, 'import tensorflow as tf\n'), ((12884, 12921), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (12895, 12921), False, 'import os\n'), ((13209, 13246), 'os.makedirs', 'os.makedirs', (['logs_path'], {'exist_ok': '(True)'}), '(logs_path, exist_ok=True)\n', (13220, 13246), False, 'import os\n'), ((13391, 13429), 'os.makedirs', 'os.makedirs', (['"""ft_logs/"""'], {'exist_ok': '(True)'}), "('ft_logs/', exist_ok=True)\n", (13402, 13429), False, 'import os\n'), ((13577, 13616), 'os.makedirs', 'os.makedirs', (['images_path'], {'exist_ok': '(True)'}), '(images_path, exist_ok=True)\n', (13588, 13616), False, 'import os\n'), ((21862, 21896), 'numpy.split', 'np.split', (['f_image', 'splitting_guide'], {}), '(f_image, splitting_guide)\n', (21870, 21896), True, 'import numpy as np\n'), ((29234, 29270), 'numpy.moveaxis', 'np.moveaxis', (['f_image', '[1, 2]', '[0, 1]'], {}), '(f_image, [1, 2], [0, 1])\n', (29245, 29270), True, 'import numpy as np\n'), ((29293, 29347), 'numpy.resize', 'np.resize', (['f_image', '(f_d[1] * f_d[3], f_d[0] * f_d[2])'], {}), '(f_image, (f_d[1] * f_d[3], f_d[0] * f_d[2]))\n', (29302, 29347), True, 'import numpy as np\n'), ((29518, 29557), 'os.makedirs', 'os.makedirs', (['im_filepath'], {'exist_ok': '(True)'}), '(im_filepath, exist_ok=True)\n', (29529, 29557), False, 'import os\n'), ((30143, 30158), 'numpy.abs', 'np.abs', (['k_image'], {}), '(k_image)\n', (30149, 30158), True, 'import numpy as np\n'), ((41746, 41785), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""composite_function"""'], {}), "('composite_function')\n", (41763, 41785), True, 'import tensorflow as tf\n'), ((41924, 41941), 'tensorflow.nn.relu', 'tf.nn.relu', (['in_cv'], {}), '(in_cv)\n', (41934, 41941), True, 'import tensorflow as tf\n'), ((43660, 43691), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bottleneck"""'], {}), "('bottleneck')\n", (43677, 43691), True, 'import tensorflow as tf\n'), ((43832, 43850), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {}), '(output)\n', (43842, 43850), True, 'import tensorflow as tf\n'), ((45762, 45799), 'tensorflow.variable_scope', 'tf.variable_scope', (["('layer_%d' % layer)"], {}), "('layer_%d' % layer)\n", (45779, 45799), True, 'import tensorflow as tf\n'), ((50972, 51009), 'tensorflow.variable_scope', 'tf.variable_scope', (["('layer_%d' % layer)"], {}), "('layer_%d' % layer)\n", (50989, 51009), True, 'import tensorflow as tf\n'), ((53317, 53354), 'tensorflow.variable_scope', 'tf.variable_scope', (["('layer_%d' % layer)"], {}), "('layer_%d' % layer)\n", (53334, 53354), True, 'import tensorflow as tf\n'), ((56132, 56169), 'tensorflow.variable_scope', 'tf.variable_scope', (["('Block_%d' % block)"], {}), "('Block_%d' % block)\n", (56149, 56169), True, 'import tensorflow as tf\n'), ((58050, 58104), 'tensorflow.variable_scope', 'tf.variable_scope', (["('Transition_after_block_%d' % block)"], {}), "('Transition_after_block_%d' % block)\n", (58067, 58104), True, 'import tensorflow as tf\n'), ((59867, 59916), 'tensorflow.variable_scope', 'tf.variable_scope', (['var_scope'], {'reuse': 'tf.AUTO_REUSE'}), '(var_scope, reuse=tf.AUTO_REUSE)\n', (59884, 59916), True, 'import tensorflow as tf\n'), ((60165, 60183), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {}), '(output)\n', (60175, 60183), True, 'import tensorflow as tf\n'), ((60407, 60452), 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, self.features_total]'], {}), '(output, [-1, self.features_total])\n', (60417, 60452), True, 'import tensorflow as tf\n'), ((60893, 60924), 'tensorflow.matmul', 'tf.matmul', (['output', 'stacked_FC_W'], {}), '(output, stacked_FC_W)\n', (60902, 60924), True, 'import tensorflow as tf\n'), ((61781, 61830), 'tensorflow.variable_scope', 'tf.variable_scope', (['var_scope'], {'reuse': 'tf.AUTO_REUSE'}), '(var_scope, reuse=tf.AUTO_REUSE)\n', (61798, 61830), True, 'import tensorflow as tf\n'), ((63730, 63748), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {}), '(output)\n', (63740, 63748), True, 'import tensorflow as tf\n'), ((63949, 63989), 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, features_total]'], {}), '(output, [-1, features_total])\n', (63959, 63989), True, 'import tensorflow as tf\n'), ((64376, 64407), 'tensorflow.matmul', 'tf.matmul', (['output', 'stacked_FC_W'], {}), '(output, stacked_FC_W)\n', (64385, 64407), True, 'import tensorflow as tf\n'), ((65461, 65510), 'tensorflow.variable_scope', 'tf.variable_scope', (['var_scope'], {'reuse': 'tf.AUTO_REUSE'}), '(var_scope, reuse=tf.AUTO_REUSE)\n', (65478, 65510), True, 'import tensorflow as tf\n'), ((68018, 68036), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {}), '(output)\n', (68028, 68036), True, 'import tensorflow as tf\n'), ((68237, 68277), 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, features_total]'], {}), '(output, [-1, features_total])\n', (68247, 68277), True, 'import tensorflow as tf\n'), ((68582, 68613), 'tensorflow.matmul', 'tf.matmul', (['output', 'stacked_FC_W'], {}), '(output, stacked_FC_W)\n', (68591, 68613), True, 'import tensorflow as tf\n'), ((72704, 72728), 'tensorflow.argmax', 'tf.argmax', (['prediction', '(1)'], {}), '(prediction, 1)\n', (72713, 72728), True, 'import tensorflow as tf\n'), ((72742, 72767), 'tensorflow.argmax', 'tf.argmax', (['self.labels', '(1)'], {}), '(self.labels, 1)\n', (72751, 72767), True, 'import tensorflow as tf\n'), ((72808, 72847), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (72815, 72847), True, 'import tensorflow as tf\n'), ((73710, 73775), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.current_block'], {'auxiliary_name_scope': '(False)'}), '(self.current_block, auxiliary_name_scope=False)\n', (73727, 73775), True, 'import tensorflow as tf\n'), ((75212, 75277), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.current_block'], {'auxiliary_name_scope': '(False)'}), '(self.current_block, auxiliary_name_scope=False)\n', (75229, 75277), True, 'import tensorflow as tf\n'), ((76975, 77040), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.current_block'], {'auxiliary_name_scope': '(False)'}), '(self.current_block, auxiliary_name_scope=False)\n', (76992, 77040), True, 'import tensorflow as tf\n'), ((80538, 80578), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Initial_convolution"""'], {}), "('Initial_convolution')\n", (80555, 80578), True, 'import tensorflow as tf\n'), ((82513, 82550), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['uninit_vars'], {}), '(uninit_vars)\n', (82537, 82550), True, 'import tensorflow as tf\n'), ((132999, 133010), 'time.time', 'time.time', ([], {}), '()\n', (133008, 133010), False, 'import time\n'), ((137025, 137036), 'time.time', 'time.time', ([], {}), '()\n', (137034, 137036), False, 'import time\n'), ((6706, 6720), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6718, 6720), False, 'from datetime import timedelta, datetime\n'), ((10650, 10679), 'collections.deque', 'deque', ([], {'maxlen': 'self.std_window'}), '(maxlen=self.std_window)\n', (10655, 10679), False, 'from collections import deque\n'), ((13152, 13196), 'shutil.rmtree', 'shutil.rmtree', (['logs_path'], {'ignore_errors': '(True)'}), '(logs_path, ignore_errors=True)\n', (13165, 13196), False, 'import shutil\n'), ((34635, 34683), 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), '()\n', (34681, 34683), True, 'import tensorflow as tf\n'), ((40297, 40335), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (40333, 40335), True, 'import tensorflow as tf\n'), ((45818, 45857), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""composite_function"""'], {}), "('composite_function')\n", (45835, 45857), True, 'import tensorflow as tf\n'), ((49742, 49786), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '(_input, comp_out)'}), '(axis=3, values=(_input, comp_out))\n', (49751, 49786), True, 'import tensorflow as tf\n'), ((49830, 49862), 'tensorflow.concat', 'tf.concat', (['(3)', '(_input, comp_out)'], {}), '(3, (_input, comp_out))\n', (49839, 49862), True, 'import tensorflow as tf\n'), ((49942, 49975), 'collections.deque', 'deque', ([], {'maxlen': 'self.dkCS_softening'}), '(maxlen=self.dkCS_softening)\n', (49947, 49975), False, 'from collections import deque\n'), ((50049, 50083), 'collections.deque', 'deque', ([], {'maxlen': 'self.dkCS_std_window'}), '(maxlen=self.dkCS_std_window)\n', (50054, 50083), False, 'from collections import deque\n'), ((51028, 51067), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""composite_function"""'], {}), "('composite_function')\n", (51045, 51067), True, 'import tensorflow as tf\n'), ((52326, 52370), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '(_input, comp_out)'}), '(axis=3, values=(_input, comp_out))\n', (52335, 52370), True, 'import tensorflow as tf\n'), ((52414, 52446), 'tensorflow.concat', 'tf.concat', (['(3)', '(_input, comp_out)'], {}), '(3, (_input, comp_out))\n', (52423, 52446), True, 'import tensorflow as tf\n'), ((54643, 54687), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '(_input, comp_out)'}), '(axis=3, values=(_input, comp_out))\n', (54652, 54687), True, 'import tensorflow as tf\n'), ((54731, 54763), 'tensorflow.concat', 'tf.concat', (['(3)', '(_input, comp_out)'], {}), '(3, (_input, comp_out))\n', (54740, 54763), True, 'import tensorflow as tf\n'), ((54916, 54949), 'collections.deque', 'deque', ([], {'maxlen': 'self.dkCS_softening'}), '(maxlen=self.dkCS_softening)\n', (54921, 54949), False, 'from collections import deque\n'), ((55026, 55060), 'collections.deque', 'deque', ([], {'maxlen': 'self.dkCS_std_window'}), '(maxlen=self.dkCS_std_window)\n', (55031, 55060), False, 'from collections import deque\n'), ((62845, 62892), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['[new_beta, new_gamma]'], {}), '([new_beta, new_gamma])\n', (62869, 62892), True, 'import tensorflow as tf\n'), ((70705, 70777), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'labels'}), '(logits=logits, labels=labels)\n', (70747, 70777), True, 'import tensorflow as tf\n'), ((70912, 70981), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'labels'}), '(logits=logits, labels=labels)\n', (70951, 70981), True, 'import tensorflow as tf\n'), ((72261, 72279), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), '(var)\n', (72274, 72279), True, 'import tensorflow as tf\n'), ((73841, 73888), 'tensorflow.name_scope', 'tf.name_scope', (['cblock_scope.original_name_scope'], {}), '(cblock_scope.original_name_scope)\n', (73854, 73888), True, 'import tensorflow as tf\n'), ((75343, 75390), 'tensorflow.name_scope', 'tf.name_scope', (['cblock_scope.original_name_scope'], {}), '(cblock_scope.original_name_scope)\n', (75356, 75390), True, 'import tensorflow as tf\n'), ((77106, 77153), 'tensorflow.name_scope', 'tf.name_scope', (['cblock_scope.original_name_scope'], {}), '(cblock_scope.original_name_scope)\n', (77119, 77153), True, 'import tensorflow as tf\n'), ((82813, 82846), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (82844, 82846), True, 'import tensorflow as tf\n'), ((82888, 82917), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (82915, 82917), True, 'import tensorflow as tf\n'), ((130283, 130301), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (130290, 130301), True, 'import numpy as np\n'), ((136444, 136455), 'time.time', 'time.time', ([], {}), '()\n', (136453, 136455), False, 'import time\n'), ((22121, 22149), 'numpy.abs', 'np.abs', (['f_split_image[split]'], {}), '(f_split_image[split])\n', (22127, 22149), True, 'import numpy as np\n'), ((39522, 39559), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['_input', 'self.keep_prob'], {}), '(_input, self.keep_prob)\n', (39535, 39559), True, 'import tensorflow as tf\n'), ((67043, 67098), 'numpy.append', 'np.append', (['new_beta_values', 'beta_values[k + difference]'], {}), '(new_beta_values, beta_values[k + difference])\n', (67052, 67098), True, 'import numpy as np\n'), ((67161, 67218), 'numpy.append', 'np.append', (['new_gamma_values', 'gamma_values[k + difference]'], {}), '(new_gamma_values, gamma_values[k + difference])\n', (67170, 67218), True, 'import numpy as np\n'), ((82096, 82131), 'tensorflow.report_uninitialized_variables', 'tf.report_uninitialized_variables', ([], {}), '()\n', (82129, 82131), True, 'import tensorflow as tf\n'), ((99830, 99856), 'numpy.std', 'np.std', (['self.accuracy_FIFO'], {}), '(self.accuracy_FIFO)\n', (99836, 99856), True, 'import numpy as np\n'), ((103017, 103043), 'numpy.std', 'np.std', (['self.accuracy_FIFO'], {}), '(self.accuracy_FIFO)\n', (103023, 103043), True, 'import numpy as np\n'), ((116282, 116307), 'numpy.mean', 'np.mean', (['self.kCS_FIFO[k]'], {}), '(self.kCS_FIFO[k])\n', (116289, 116307), True, 'import numpy as np\n'), ((116461, 116486), 'numpy.mean', 'np.mean', (['self.kCS_FIFO[k]'], {}), '(self.kCS_FIFO[k])\n', (116468, 116486), True, 'import numpy as np\n'), ((137106, 137144), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'total_training_time'}), '(seconds=total_training_time)\n', (137115, 137144), False, 'from datetime import timedelta, datetime\n'), ((46981, 47042), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['[self.kernels_ref_list[-1][-1][-1]]'], {}), '([self.kernels_ref_list[-1][-1][-1]])\n', (47005, 47042), True, 'import tensorflow as tf\n'), ((47485, 47509), 'numpy.absolute', 'np.absolute', (['new_k_image'], {}), '(new_k_image)\n', (47496, 47509), True, 'import numpy as np\n'), ((48946, 48987), 'numpy.multiply', 'np.multiply', (['new_k_image', 'new_k_signs_try'], {}), '(new_k_image, new_k_signs_try)\n', (48957, 48987), True, 'import numpy as np\n'), ((107407, 107432), 'numpy.mean', 'np.mean', (['self.kCS_FIFO[k]'], {}), '(self.kCS_FIFO[k])\n', (107414, 107432), True, 'import numpy as np\n'), ((107611, 107636), 'numpy.mean', 'np.mean', (['self.kCS_FIFO[k]'], {}), '(self.kCS_FIFO[k])\n', (107618, 107636), True, 'import numpy as np\n'), ((137288, 137326), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'total_training_time'}), '(seconds=total_training_time)\n', (137297, 137326), False, 'from datetime import timedelta, datetime\n'), ((107053, 107079), 'numpy.mean', 'np.mean', (['self.dkCS_FIFO[k]'], {}), '(self.dkCS_FIFO[k])\n', (107060, 107079), True, 'import numpy as np\n'), ((107174, 107199), 'numpy.std', 'np.std', (['self.dkCS_FIFO[k]'], {}), '(self.dkCS_FIFO[k])\n', (107180, 107199), True, 'import numpy as np\n'), ((115164, 115190), 'numpy.mean', 'np.mean', (['self.dkCS_FIFO[k]'], {}), '(self.dkCS_FIFO[k])\n', (115171, 115190), True, 'import numpy as np\n'), ((115285, 115310), 'numpy.std', 'np.std', (['self.dkCS_FIFO[k]'], {}), '(self.dkCS_FIFO[k])\n', (115291, 115310), True, 'import numpy as np\n'), ((136639, 136672), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'time_per_epoch'}), '(seconds=time_per_epoch)\n', (136648, 136672), False, 'from datetime import timedelta, datetime\n'), ((136726, 136757), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'seconds_left'}), '(seconds=seconds_left)\n', (136735, 136757), False, 'from datetime import timedelta, datetime\n'), ((48464, 48484), 'numpy.copy', 'np.copy', (['new_k_signs'], {}), '(new_k_signs)\n', (48471, 48484), True, 'import numpy as np\n'), ((48708, 48738), 'numpy.random.randint', 'np.random.randint', (['in_features'], {}), '(in_features)\n', (48725, 48738), True, 'import numpy as np\n'), ((48639, 48669), 'numpy.random.randint', 'np.random.randint', (['kernel_size'], {}), '(kernel_size)\n', (48656, 48669), True, 'import numpy as np\n'), ((48570, 48600), 'numpy.random.randint', 'np.random.randint', (['kernel_size'], {}), '(kernel_size)\n', (48587, 48600), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import xarray as xr
import numpy as np
import seaborn as sns
import pandas as pd
import scipy as sc
from latex_size import set_size
"""
#=== Import SEB Anomalies ====
#from seasonal_SEB_components import *
#CMIP5
ACCESS = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/ACCESS_anomaly_JJA.nc')
HADGEM = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/HADGEM_anomaly_JJA.nc')
CSIRO = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/CSIRO_anomaly_JJA.nc')
IPSL = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/IPSL_anomaly_JJA.nc')
MIROC5 = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/MIROC5_anomaly_JJA.nc')
NORESM = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/NORESM_anomaly_JJA.nc')
#CMIP6
CESM = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/CESM_anomaly_JJA.nc')
CNRM_CM6 = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/CNRM_CM6_anomaly_JJA.nc')
CNRM_ESM2 = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/CNRM_ESM2_anomaly_JJA.nc')
MRI = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/MRI_anomaly_JJA.nc')
UKMO = xr.open_dataset('/projects/NS9600K/idunnam/src/SEB_anomalies_seasonal/UKMO_anomaly_JJA.nc')
#=== CMIP5 component model mean ===
def model_mean(mod):
return sum(mod)/ len(mod)
CMIP5_models = [ACCESS, HADGEM, CSIRO, IPSL, MIROC5, NORESM]
TT_CMIP5 = []
SMB_CMIP5 = []
ME_CMIP5 = []
RU_CMIP5 = []
RF_CMIP5 = []
for i in range(len(CMIP5_models)):
TT_CM5 = CMIP5_models[i].TT.mean(dim=["X10_105","Y21_199"])
SMB_CM5 = CMIP5_models[i].SMB.mean(dim=["X10_105","Y21_199"])
ME_CM5 = CMIP5_models[i].ME.mean(dim=["X10_105","Y21_199"])
RU_CM5 = CMIP5_models[i].RU.mean(dim=["X10_105","Y21_199"])
RF_CM5 = CMIP5_models[i].RF.mean(dim=["X10_105","Y21_199"])
TT_CMIP5.append(TT_CM5)
SMB_CMIP5.append(SMB_CM5)
ME_CMIP5.append(ME_CM5)
RU_CMIP5.append(RU_CM5)
RF_CMIP5.append(RF_CM5)
TT_CMIP5 = model_mean(TT_CMIP5)
SMB_CMIP5 = model_mean(SMB_CMIP5)
ME_CMIP5 = model_mean(ME_CMIP5)
RU_CMIP5 = model_mean(RU_CMIP5)
RF_CMIP5 = model_mean(RF_CMIP5)
SEB_var_CMIP5 = [SMB_CMIP5, ME_CMIP5, RU_CMIP5, RF_CMIP5]
#=== CMIP6 component model mean ===
CMIP6_models = [CESM, CNRM_CM6, CNRM_ESM2, MRI, UKMO]
TT_CMIP6 = []
SMB_CMIP6 = []
ME_CMIP6 = []
RU_CMIP6 = []
RF_CMIP6 = []
for i in range(len(CMIP6_models)):
TT_CM6 = CMIP6_models[i].TT.mean(dim=["X10_105","Y21_199"])
SMB_CM6 = CMIP6_models[i].SMB.mean(dim=["X10_105","Y21_199"])
ME_CM6 = CMIP6_models[i].ME.mean(dim=["X10_105","Y21_199"])
RU_CM6 = CMIP6_models[i].RU.mean(dim=["X10_105","Y21_199"])
RF_CM6 = CMIP6_models[i].RF.mean(dim=["X10_105","Y21_199"])
TT_CMIP6.append(TT_CM6)
SMB_CMIP6.append(SMB_CM6)
RU_CMIP6.append(RU_CM6)
ME_CMIP6.append(ME_CM6)
RF_CMIP6.append(RF_CM6)
TT_CMIP6 = model_mean(TT_CMIP6)
SMB_CMIP6 = model_mean(SMB_CMIP6)
ME_CMIP6 = model_mean(ME_CMIP6)
RU_CMIP6 = model_mean(RU_CMIP6)
RF_CMIP6 = model_mean(RF_CMIP6)
SEB_var_CMIP6 = [SMB_CMIP6, ME_CMIP6, RU_CMIP6, RF_CMIP6]
SEB_var_label = ['SMB','ME','RU','RF']
# ==== REGRESSION =====
# CMIP5
TT_reg_CM5 = TT_CMIP5.to_dataframe()
SMB_reg_CM5 = SMB_CMIP5.to_dataframe()
ME_reg_CM5 = ME_CMIP5.to_dataframe()
RU_reg_CM5 = RU_CMIP5.to_dataframe()
RF_reg_CM5 = RF_CMIP5.to_dataframe()
#CMIP6
TT_reg_CM6 = TT_CMIP6.to_dataframe()
SMB_reg_CM6 = SMB_CMIP6.to_dataframe()
ME_reg_CM6 = ME_CMIP6.to_dataframe()
RU_reg_CM6 = RU_CMIP6.to_dataframe()
RF_reg_CM6 = RF_CMIP6.to_dataframe()
### CMIP5 ###
x_CM5 = TT_reg_CM5['TT']
y1_CM5 = SMB_reg_CM5['SMB']
y2_CM5 = ME_reg_CM5['ME']
y3_CM5 = RU_reg_CM5['RU']
y4_CM5 = RF_reg_CM5['RF']
coeff_CM5 = np.polyfit(x_CM5, y1_CM5,2)
poly1_CM5 = np.poly1d(coeff_CM5)
coeff2_CM5 = np.polyfit(x_CM5, y2_CM5, 2)
poly2_CM5 = np.poly1d(coeff2_CM5)
coeff3_CM5 = np.polyfit(x_CM5, y3_CM5, 2)
poly3_CM5 = np.poly1d(coeff3_CM5)
coeff4_CM5 = np.polyfit(x_CM5, y4_CM5, 2)
poly4_CM5 = np.poly1d(coeff4_CM5)
t = np.sort(TT_CMIP5)
curve_x_CM5 = np.linspace(t[0], t[-1])
curve_y1_CM5 = poly1_CM5(curve_x_CM5)
curve_y2_CM5 = poly2_CM5(curve_x_CM5)
curve_y3_CM5 = poly3_CM5(curve_x_CM5)
curve_y4_CM5 = poly4_CM5(curve_x_CM5)
### CMIP6 ###
x_CM6 = TT_reg_CM6['TT']
y1_CM6 = SMB_reg_CM6['SMB']
y2_CM6 = ME_reg_CM6['ME']
y3_CM6 = RU_reg_CM6['RU']
y4_CM6 = RF_reg_CM6['RF']
coeff_CM6 = np.polyfit(x_CM6, y1_CM6,2)
poly1_CM6 = np.poly1d(coeff_CM6)
coeff2_CM6 = np.polyfit(x_CM6, y2_CM6, 2)
poly2_CM6 = np.poly1d(coeff2_CM6)
coeff3_CM6 = np.polyfit(x_CM6, y3_CM6, 2)
poly3_CM6 = np.poly1d(coeff3_CM6)
coeff4_CM6 = np.polyfit(x_CM6, y4_CM6, 2)
poly4_CM6 = np.poly1d(coeff4_CM6)
t = np.sort(TT_CMIP6)
curve_x_CM6 = np.linspace(t[0], t[-1])
curve_y1_CM6 = poly1_CM6(curve_x_CM6)
curve_y2_CM6 = poly2_CM6(curve_x_CM6)
curve_y3_CM6 = poly3_CM6(curve_x_CM6)
curve_y4_CM6 = poly4_CM6(curve_x_CM6)
#==========================================================================================
#==========================================================================================
plt.rcParams.update({
"text.usetex": True,
"font.family": 'DejaVu Sans',
"font.serif": ["Computer Modern Roman"]})
#== JOINT PLOT CM5 & CM6 ==
plt.figure(figsize= (10,10))
plt.xlabel('Near-surface Temperature anomalies [$^\circ$C]', fontsize = 14)
plt.ylabel('SMB', fontsize = 14)
plt.title('Seasonal (JJA) SMB component anomalies \n Model Mean of CMIP5 vs. CMIP6 MAR simulations', fontsize=16)
#plt.title('CMIP5 & CMIP6 Model Mean - Seasonal (JJA) SEB Radiative flux component anomalies')
color_CM5 = ['darkolivegreen', 'firebrick','indigo','darkorange']
label_CM5 = ['SMB - CMIP5','ME - CMIP5', 'RU - CMIP5', 'RF - CMIP5']
for i in range(len(SEB_var_CMIP5)):
plt.scatter(TT_CMIP5, SEB_var_CMIP5[i], label= label_CM5[i], s=10, color = color_CM5[i])
#sns.set_palette('colorblind')
plt.plot(curve_x_CM5, curve_y1_CM5, color ='darkolivegreen') ### TEST
plt.plot(curve_x_CM5, curve_y2_CM5, color ='firebrick') ### TEST
plt.plot(curve_x_CM5, curve_y3_CM5, color ='indigo') ### TEST
plt.plot(curve_x_CM5, curve_y4_CM5, color ='darkorange') ### TEST
color_CM6 = ['yellowgreen','lightcoral','mediumpurple', 'sandybrown']
label_CM6 = ['SMB - CMIP6','ME - CMIP6', 'RU - CMIP6', 'RF - CMIP6']
for i in range(len(SEB_var_CMIP6)):
plt.scatter(TT_CMIP6, SEB_var_CMIP6[i] ,label = label_CM6[i], s=10, marker='x',color = color_CM6[i])
plt.plot(curve_x_CM6, curve_y1_CM6, '--', color ='yellowgreen') ### TEST
plt.plot(curve_x_CM6, curve_y2_CM6, '--',color ='lightcoral') ### TEST
plt.plot(curve_x_CM6, curve_y3_CM6, '--', color ='mediumpurple') ### TEST
plt.plot(curve_x_CM6, curve_y4_CM6, '--', color ='sandybrown') ### TEST
#Imports
import matplotlib.patches as mpatches
###sns.set_palette('colorblind')
sns.despine()
plt.legend(ncol=2)
plt.show()
#plt.savefig('/projects/NS9600K/idunnam/src/Figures/SMB_anomalies_jointCM5CM6_JJA.png')
"""
#######============== ANNUAL ============ #############
import matplotlib.pyplot as plt
import xarray as xr
import numpy as np
import seaborn as sns
import pandas as pd
import scipy as sc
#=== Import SEB Anomalies ====
#from seasonal_SEB_components import *
#CMIP5
"""
ACCESS = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/ACCESS_anomaly_annual.nc')
HADGEM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/HADGEM_anomaly_SMB_annual.nc')
CSIRO = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CSIRO_anomaly_annual.nc')
IPSL = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/IPSL_anomaly_annual.nc')
MIROC5 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/MIROC5_anomaly_annual.nc')
NORESM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/NORESM_anomaly_annual.nc')
#CMIP6
CESM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CESM_anomaly_annual.nc')
CNRM_CM6 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CNRM_CM6_anomaly_annual.nc')
CNRM_ESM2 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CNRM_ESM2_anomaly_annual.nc')
MRI = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/MRI_anomaly_annual.nc')
UKMO = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/UKMO_anomaly_annual.nc')
"""
season= input('Enter season [MAM,JJA,SON,DJF]:')
ACCESS = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/ACCESS_anomaly_'+season+'.nc')
HADGEM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/HADGEM_anomaly_'+season+'_SMB.nc')
CSIRO = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CSIRO_anomaly_'+season+'.nc')
IPSL = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/IPSL_anomaly_'+season+'.nc')
MIROC5 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/MIROC5_anomaly_'+season+'.nc')
NORESM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/NORESM_anomaly_'+season+'.nc')
#CMIP6
CESM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CESM_anomaly_'+season+'.nc')
CNRM_CM6 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CNRM_CM6_anomaly_'+season+'.nc')
CNRM_ESM2 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CNRM_ESM2_anomaly_'+season+'.nc')
MRI = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/MRI_anomaly_'+season+'.nc')
UKMO = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/UKMO_anomaly_'+season+'.nc')
#=== CMIP5 component model mean ===
def model_mean(mod):
return sum(mod)/ len(mod)
CMIP5_models = [ACCESS, HADGEM, CSIRO, IPSL, MIROC5, NORESM]
TT_CMIP5 = []
SMB_CMIP5 = []
ME_CMIP5 = []
RU_CMIP5 = []
RF_CMIP5 = []
RZ_CMIP5 = []
for i in range(len(CMIP5_models)):
TT_CM5 = CMIP5_models[i].TT.mean(dim=["X10_105","Y21_199"])
SMB_CM5 = CMIP5_models[i].SMB.mean(dim=["X10_105","Y21_199"])
ME_CM5 = CMIP5_models[i].ME.mean(dim=["X10_105","Y21_199"])
RU_CM5 = CMIP5_models[i].RU.mean(dim=["X10_105","Y21_199"])
RF_CM5 = CMIP5_models[i].RF.mean(dim=["X10_105","Y21_199"])
RZ_CM5 = CMIP5_models[i].RZ.mean(dim=["X10_105","Y21_199"])
TT_CMIP5.append(TT_CM5)
SMB_CMIP5.append(SMB_CM5)
ME_CMIP5.append(ME_CM5)
RU_CMIP5.append(RU_CM5)
RF_CMIP5.append(RF_CM5)
RZ_CMIP5.append(RZ_CM5)
TT_CMIP5 = model_mean(TT_CMIP5)
SMB_CMIP5 = model_mean(SMB_CMIP5)
ME_CMIP5 = model_mean(ME_CMIP5)
RU_CMIP5 = model_mean(RU_CMIP5)
RF_CMIP5 = model_mean(RF_CMIP5)
RZ_CMIP5 = model_mean(RZ_CMIP5)
SEB_var_CMIP5 = [SMB_CMIP5, ME_CMIP5, RU_CMIP5]#, RZ_CMIP5, RF_CMIP5]
#=== CMIP6 component model mean ===
CMIP6_models = [CESM, CNRM_CM6, CNRM_ESM2, MRI, UKMO]
TT_CMIP6 = []
SMB_CMIP6 = []
ME_CMIP6 = []
RU_CMIP6 = []
RF_CMIP6 = []
RZ_CMIP6 = []
for i in range(len(CMIP6_models)):
TT_CM6 = CMIP6_models[i].TT.mean(dim=["X10_105","Y21_199"])
SMB_CM6 = CMIP6_models[i].SMB.mean(dim=["X10_105","Y21_199"])
ME_CM6 = CMIP6_models[i].ME.mean(dim=["X10_105","Y21_199"])
RU_CM6 = CMIP6_models[i].RU.mean(dim=["X10_105","Y21_199"])
RF_CM6 = CMIP6_models[i].RF.mean(dim=["X10_105","Y21_199"])
RZ_CM6 = CMIP6_models[i].RZ.mean(dim=["X10_105","Y21_199"])
TT_CMIP6.append(TT_CM6)
SMB_CMIP6.append(SMB_CM6)
RU_CMIP6.append(RU_CM6)
ME_CMIP6.append(ME_CM6)
RF_CMIP6.append(RF_CM6)
RZ_CMIP6.append(RZ_CM6)
TT_CMIP6 = model_mean(TT_CMIP6)
SMB_CMIP6 = model_mean(SMB_CMIP6)
ME_CMIP6 = model_mean(ME_CMIP6)
RU_CMIP6 = model_mean(RU_CMIP6)
RF_CMIP6 = model_mean(RF_CMIP6)
RZ_CMIP6 = model_mean(RZ_CMIP6)
SEB_var_CMIP6 = [SMB_CMIP6, ME_CMIP6, RU_CMIP6]#, RZ_CMIP6, RF_CMIP6]
SEB_var_label = ['SMB','ME','RU']#, 'RZ', 'RF']
# ==== REGRESSION =====
# CMIP5
TT_reg_CM5 = TT_CMIP5.to_dataframe()
SMB_reg_CM5 = SMB_CMIP5.to_dataframe()
ME_reg_CM5 = ME_CMIP5.to_dataframe()
RU_reg_CM5 = RU_CMIP5.to_dataframe()
RF_reg_CM5 = RF_CMIP5.to_dataframe()
RZ_reg_CM5 = RZ_CMIP5.to_dataframe()
#CMIP6
TT_reg_CM6 = TT_CMIP6.to_dataframe()
SMB_reg_CM6 = SMB_CMIP6.to_dataframe()
ME_reg_CM6 = ME_CMIP6.to_dataframe()
RU_reg_CM6 = RU_CMIP6.to_dataframe()
RF_reg_CM6 = RF_CMIP6.to_dataframe()
RZ_reg_CM6 = RZ_CMIP6.to_dataframe()
### CMIP5 ###
x_CM5 = TT_reg_CM5['TT']
y1_CM5 = SMB_reg_CM5['SMB']
y2_CM5 = ME_reg_CM5['ME']
y3_CM5 = RU_reg_CM5['RU']
y4_CM5 = RF_reg_CM5['RF']
y5_CM5 = RZ_reg_CM5['RZ']
coeff_CM5 = np.polyfit(x_CM5, y1_CM5, 2)
poly1_CM5 = np.poly1d(coeff_CM5)
coeff2_CM5 = np.polyfit(x_CM5, y2_CM5, 2)
poly2_CM5 = np.poly1d(coeff2_CM5)
coeff3_CM5 = np.polyfit(x_CM5, y3_CM5, 2)
poly3_CM5 = np.poly1d(coeff3_CM5)
coeff4_CM5 = np.polyfit(x_CM5, y4_CM5, 2)
poly4_CM5 = np.poly1d(coeff4_CM5)
coeff5_CM5 = np.polyfit(x_CM5, y5_CM5, 2)
poly5_CM5 = np.poly1d(coeff5_CM5)
t = np.sort(TT_CMIP5)
curve_x_CM5 = np.linspace(t[0], t[-1])
curve_y1_CM5 = poly1_CM5(curve_x_CM5)
curve_y2_CM5 = poly2_CM5(curve_x_CM5)
curve_y3_CM5 = poly3_CM5(curve_x_CM5)
curve_y4_CM5 = poly4_CM5(curve_x_CM5)
curve_y5_CM5 = poly5_CM5(curve_x_CM5)
### CMIP6 ###
x_CM6 = TT_reg_CM6['TT']
y1_CM6 = SMB_reg_CM6['SMB']
y2_CM6 = ME_reg_CM6['ME']
y3_CM6 = RU_reg_CM6['RU']
y4_CM6 = RF_reg_CM6['RF']
y5_CM6 = RZ_reg_CM6['RZ']
coeff_CM6 = np.polyfit(x_CM6, y1_CM6,2)
poly1_CM6 = np.poly1d(coeff_CM6)
coeff2_CM6 = np.polyfit(x_CM6, y2_CM6, 2)
poly2_CM6 = np.poly1d(coeff2_CM6)
coeff3_CM6 = np.polyfit(x_CM6, y3_CM6, 2)
poly3_CM6 = np.poly1d(coeff3_CM6)
coeff4_CM6 = np.polyfit(x_CM6, y4_CM6, 2)
poly4_CM6 = np.poly1d(coeff4_CM6)
coeff5_CM6 = np.polyfit(x_CM6, y5_CM6, 2)
poly5_CM6 = np.poly1d(coeff5_CM6)
t = np.sort(TT_CMIP6)
curve_x_CM6 = np.linspace(t[0], t[-1])
curve_y1_CM6 = poly1_CM6(curve_x_CM6)
curve_y2_CM6 = poly2_CM6(curve_x_CM6)
curve_y3_CM6 = poly3_CM6(curve_x_CM6)
curve_y4_CM6 = poly4_CM6(curve_x_CM6)
curve_y5_CM6 = poly5_CM6(curve_x_CM6)
#==========================================================================================
#==========================================================================================
#plt.rcParams.update({
#"text.usetex": True,
#"font.family": 'DejaVu Sans',
#"font.serif": ["Computer Modern Roman"],
#"font.size": 20})
plt.rcParams.update({
"text.usetex": True,
"font.family": 'DejaVu Sans',
"font.serif": ["Computer Modern Roman"],
"axes.labelsize": 12,
"font.size": 12,
"legend.fontsize": 8,
"xtick.labelsize": 20,
"ytick.labelsize": 20,})
#== JOINT PLOT CM5 & CM6 ==
plt.figure(figsize=set_size(width=490)) # ANNUAL
#plt.figure(figsize=set_size(width=460)) #Seasonl
plt.xlabel('Near-surface Temperature anomalies [$^\circ$C]', fontsize = 22)
plt.ylabel('Annual SMB anomalies [mmWE]', fontsize = 22)
#plt.title('Annual SMB component anomalies \n Model Mean of MAR CMIP5 vs. MAR CMIP6 simulations', fontsize=24)
#plt.title('('+season+') SMB component anomalies \n Model Mean of CMIP5 vs. CMIP6 MAR simulations', fontsize=24)
color_CM5 = ['darkolivegreen', 'firebrick','indigo']#,'darkorange','black']
label_CM5 = ['SMB - CMIP5','ME - CMIP5', 'RU - CMIP5']#, 'RZ - CMIP5', 'RF - CMIP5']
for i in range(len(SEB_var_CMIP5)):
plt.scatter(TT_CMIP5, SEB_var_CMIP5[i], label= label_CM5[i], s = 22,color = color_CM5[i], alpha=0.9)#seasonal: s=1, Annual: s=2
sns.set_palette('colorblind')
plt.plot(curve_x_CM5, curve_y1_CM5, color ='darkolivegreen', linewidth=2.5)#, linewidth=0.7)
plt.plot(curve_x_CM5, curve_y2_CM5, color ='firebrick', linewidth=2.5)#, linewidth=0.7)
plt.plot(curve_x_CM5, curve_y3_CM5, color ='indigo', linewidth=2.5)#, linewidth=0.7)
#plt.plot(curve_x_CM5, curve_y4_CM5, color ='black')
#plt.plot(curve_x_CM5, curve_y5_CM5, color ='darkorange')
color_CM6 = ['yellowgreen','lightcoral','mediumpurple']#, 'sandybrown', 'black']
label_CM6 = ['SMB - CMIP6','ME - CMIP6', 'RU - CMIP6']#, 'RZ - CMIP6', 'RF - CMIP5']
for i in range(len(SEB_var_CMIP6)):
plt.scatter(TT_CMIP6, SEB_var_CMIP6[i] ,label = label_CM6[i], marker='+', linewidth=0.8,s= 80,color = color_CM6[i]) #seasnoal: s=5, Annual:s=20, linewidth=0.5,
plt.plot(curve_x_CM6, curve_y1_CM6, '--', color ='yellowgreen', linewidth=2.5)#, linewidth=0.7)#seasonal: linewidth=0.7
plt.plot(curve_x_CM6, curve_y2_CM6, '--',color ='lightcoral', linewidth=2.5)#, linewidth=0.7)
plt.plot(curve_x_CM6, curve_y3_CM6, '--', color ='mediumpurple', linewidth=2.5)#, linewidth=0.7)
#plt.plot(curve_x_CM6, curve_y4_CM6, '--', color ='black')
#plt.plot(curve_x_CM5, curve_y5_CM6, '--', color ='sandybrown')
#if season == 'JJA':
# plt.ylim(-400,400)
#else:
# plt.ylim(-75,75)
#plt.ylim(-150,150)
plt.xlim(-1,8.5)
#remove thicks from xaixs
#x = range(-1,8)
#plt.xticks(x, np.arange(0, 8, step=2)) #Annual and JJA
#Imports
import matplotlib.patches as mpatches
###sns.set_palette('colorblind')
sns.despine()
#-#-#- FANCY LEGEND -#-#-#
import matplotlib.lines as mlines
from matplotlib.legend_handler import HandlerBase
class AnyObjectHandler(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
SMB_cm5 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height],
color='darkolivegreen')
SMB_cm6 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], linestyle='--', linewidth=1,color='yellowgreen')
return [SMB_cm5, SMB_cm6]
class AnyObjectHandler2(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
ME_cm5 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height],
color='firebrick')
ME_cm6 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], linestyle='--',linewidth=1, color='lightcoral')
return [ME_cm5, ME_cm6]
class AnyObjectHandler3(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
RU_cm5 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height],
color='indigo')
RU_cm6 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], linestyle='--', linewidth=1,color='mediumpurple')
return [RU_cm5, RU_cm6]
class AnyObjectHandler4(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
cm5_dott = mlines.Line2D([11],[3], color='black', marker='o', markersize=7,label='MAR CMIP5')#markersize=7,
return [cm5_dott]
class AnyObjectHandler5(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
cm6_cross = mlines.Line2D([11],[3], color='black', marker='+', markersize=9, label='MAR CMIP6')#markersize=9,
return [cm6_cross]
object1 = HandlerBase()
object2 = HandlerBase()
object3 = HandlerBase()
object4 = HandlerBase()
object5 = HandlerBase()
#plt.legend([object1,object2, object3, object4, object5], ['SMB','ME', 'RU', 'MAR CMIP5','MAR CMIP6'],
# handler_map={object1: AnyObjectHandler(),
# object2:AnyObjectHandler2(),
# object3:AnyObjectHandler3(),
# object4:AnyObjectHandler4(),
# object5:AnyObjectHandler5()},
# fontsize=20,frameon=False,ncol=2, loc='upper left') #fontsize=16
#-#-#-#-#-#-#-#-#-#-#-#-#-#
#plt.legend(ncol=2)
plt.show()
#plt.savefig('/projects/NS9600K/idunnam/Thesis/src/Figures/SMB_anomalies_jointCM5CM6_annual.pdf',bbox_inches='tight',dpi=300)
#plt.savefig('/projects/NS9600K/idunnam/Thesis/src/Figures/SMB_anomalies_jointCM5CM6_'+season+'.pdf', bbox_inches='tight',dpi=300)
#print(season)
#print('TAS:',curve_x_CM5[-1])
#print('CMIP5', 'SMB:', np.round(poly1_CM5(curve_x_CM5[-1]),2))
#print('CMIP6', 'SMB:', np.round(poly1_CM6(curve_x_CM5[-1]),2))
#print('SMB (CMIP6 - CMIP5) =',np.round(poly1_CM6(curve_x_CM5[-1]) - poly1_CM5(curve_x_CM5[-1]),2))
#print('Standard deviation CMIP6:', np.std((poly1_CM5(curve_x_CM65[-1]),2)))
#print('Standard deviation CMIP6:', np.std((poly1_CM6(curve_x_CM5[-1]),2)))
print('TEMPERATURE:')
print('CMIP5',curve_x_CM5[-1])
print('CMIP6',curve_x_CM6[-1])
def R_square(x, y, coeff):
"""
coeff = coeff_CM..
x= x_CM6
y= y1_CM6
"""
p = np.poly1d(coeff)
curve = np.polyval(coeff, x)
yhat = p(x) # or [p(z) for z in x]
ybar = np.sum(y)/len(y) # or sum(y)/len(y)
ssres = np.sum((y - curve)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
sstot = np.sum((y - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y])
R_square = 1 - ssres / sstot
return np.round(R_square,3)
print('R2 - CMIP5')
print('SMB',R_square(x_CM5,y1_CM5,coeff_CM5))
print('ME',R_square(x_CM5,y2_CM5,coeff2_CM5))
print('RU',R_square(x_CM5,y3_CM5,coeff3_CM5))
print('RF',R_square(x_CM5,y4_CM5,coeff4_CM5))
print('RZ',R_square(x_CM5,y5_CM5,coeff5_CM5))
print('R2 - CMIP6')
print('SMB',R_square(x_CM6,y1_CM6,coeff_CM6))
print('ME',R_square(x_CM6,y2_CM6,coeff2_CM6))
print('RU',R_square(x_CM6,y3_CM6,coeff3_CM6))
print('RF',R_square(x_CM6,y4_CM6,coeff4_CM6))
print('RZ',R_square(x_CM6,y5_CM6,coeff5_CM6))
print('SMB: f(x)=',np.round(coeff_CM5[0],2),'x$^2$','+',np.round(coeff_CM5[1],2),'x','+',np.round(coeff_CM5[2],2))
print('SMB: f(x)=',np.round(coeff_CM6[0],3),'x$^2$','+',np.round(coeff_CM6[1],2),'x','+',np.round(coeff_CM6[2],2))
print('ME: f(x)=',np.round(coeff2_CM5[0],2),'x$^2$','+',np.round(coeff2_CM5[1],2),'x','+',np.round(coeff2_CM5[2],2))
print('ME: f(x)=',np.round(coeff2_CM6[0],3),'x$^2$','+',np.round(coeff2_CM6[1],2),'x','+',np.round(coeff2_CM6[2],2))
print('RU: f(x)=',np.round(coeff3_CM5[0],2),'x$^2$','+',np.round(coeff3_CM5[1],2),'x','+',np.round(coeff3_CM5[2],2))
print('RU: f(x)=',np.round(coeff3_CM6[0],3),'x$^2$','+',np.round(coeff3_CM6[1],2),'x','+',np.round(coeff3_CM6[2],2))
"""
ANNUAL:
R2 - CMIP5
SMB 0.96
ME 0.98
RU 0.99
RF 0.98
RZ 0.96
R2 - CMIP6
SMB 0.98
ME 0.99
RU 0.99
RF 0.97
RZ 0.96
JJA:
R2 - CMIP5
SMB 0.99
ME 1.0
RU 1.0
RF 0.96
RZ 0.96
R2 - CMIP6
SMB 0.99
ME 1.0
RU 0.99
RF 0.97
RZ 0.96
SON:
SMB 0.15
ME 0.94
RU 0.95
RF 0.94
RZ 0.37
R2 - CMIP6
SMB 0.78
ME 0.96
RU 0.97
RF 0.96
RZ 0.22
"""
def stand_dev(x,y,coeff):
#use y = y_CM5, x= x_CM5, coeff = coeff_CM5
#use y = y_CM6, x= x_CM6, coeff = coeff_CM6
c = y - (coeff[0]*(x**2) + coeff[1]*x + coeff[2])
return c
if season =='JJA':
TAS=5.4
if season=='SON':
TAS=6.7
#for TAS in range(1,6):
print('Season:',season)
print('TAS:', TAS)
print('MAR CMIP5', 'SMB:', np.round(poly1_CM5(TAS),2),'%','std: $\pm$', np.round(np.std(stand_dev(x_CM5,y1_CM5,coeff_CM5)[-20:]),2))
print('MAR CMIP6', 'SMB:', np.round(poly1_CM6(TAS),2),'%','std: $\pm$', np.round(np.std(stand_dev(x_CM6,y1_CM6,coeff_CM6)[-20:]),2))
print('MAR CMIP5', 'ME:', np.round(poly2_CM5(TAS),2),'%','std: $\pm$', np.round(np.std(stand_dev(x_CM5,y2_CM5,coeff2_CM5)[-20:]),2))
print('MAR CMIP6', 'ME:', np.round(poly2_CM6(TAS),2),'%','std: $\pm$', np.round(np.std(stand_dev(x_CM6,y2_CM6,coeff2_CM6)[-20:]),2))
print('MAR CMIP5', 'RU:', np.round(poly3_CM5(TAS),2),'%','std: $\pm$', np.round(np.std(stand_dev(x_CM5,y3_CM5,coeff3_CM5)[-20:]),2))
print('MAR CMIP6', 'RU:', np.round(poly3_CM6(TAS),2),'%','std: $\pm$', np.round(np.std(stand_dev(x_CM6,y3_CM6,coeff3_CM6)[-20:]),2))
"""
TAS: 1
MAR CMIP5 SMB: -3.39 mmWE std: $\pm$ 2.69
RANGE CMIP5: [ -0.69 , -6.08 ]
MAR CMIP6 SMB: -3.68 mmWE std: $\pm$ 2.84
RANGE CMIP6: [ -0.99 , -6.53 ]
ANNUAL
TAS: 2
MAR CMIP5 SMB: -8.05 mmWE std: $\pm$ 5.02
RANGE CMIP5: [ -3.02 , -13.07 ]
MAR CMIP6 SMB: -9.33 mmWE std: $\pm$ 5.67
RANGE CMIP6: [ -4.31 , -15.0 ]
ANNUAL
TAS: 3
MAR CMIP5 SMB: -14.22 mmWE std: $\pm$ 8.11
RANGE CMIP5: [ -6.11 , -22.32 ]
MAR CMIP6 SMB: -17.67 mmWE std: $\pm$ 9.84
RANGE CMIP6: [ -9.56 , -27.51 ]
ANNUAL
TAS: 4
MAR CMIP5 SMB: -21.9 mmWE std: $\pm$ 11.95
RANGE CMIP5: [ -9.95 , -33.85 ]
MAR CMIP6 SMB: -28.69 mmWE std: $\pm$ 15.35
RANGE CMIP6: [ -16.74 , -44.04 ]
ANNUAL
TAS: 5
MAR CMIP5 SMB: -31.09 mmWE std: $\pm$ 16.55
RANGE CMIP5: [ -14.55 , -47.64 ]
MAR CMIP6 SMB: -42.4 mmWE std: $\pm$ 22.2
RANGE CMIP6: [ -25.85 , -64.6 ]
ANNUAL
TAS: 6
MAR CMIP5 SMB: -41.8 mmWE std: $\pm$ 21.9
RANGE CMIP5: [ -19.9 , -63.7 ]
MAR CMIP6 SMB: -58.79 mmWE std: $\pm$ 30.4
RANGE CMIP6: [ -36.89 , -89.19 ]
Season: JJA
TAS: 1
MAR CMIP5 SMB: -14.22 mmWE std: $\pm$ 8.11
RANGE CMIP5: [ -6.11 , -22.34 ]
MAR CMIP6 SMB: -12.13 mmWE std: $\pm$ 7.07
RANGE CMIP6: [ -4.02 , -19.2 ]
Season: JJA
TAS: 2
MAR CMIP5 SMB: -36.29 mmWE std: $\pm$ 19.15
RANGE CMIP5: [ -17.15 , -55.44 ]
MAR CMIP6 SMB: -32.69 mmWE std: $\pm$ 17.34
RANGE CMIP6: [ -13.54 , -50.03 ]
TAS: 3
MAR CMIP5 SMB: -66.58 mmWE std: $\pm$ 34.29
RANGE CMIP5: [ -32.29 , -100.88 ]
MAR CMIP6 SMB: -63.99 mmWE std: $\pm$ 33.0
RANGE CMIP6: [ -29.7 , -96.99 ]
TAS: 4
MAR CMIP5 SMB: -105.1 mmWE std: $\pm$ 53.55
RANGE CMIP5: [ -51.55 , -158.65 ]
MAR CMIP6 SMB: -106.05 mmWE std: $\pm$ 54.03
RANGE CMIP6: [ -52.5 , -160.08 ]
Season: JJA
TAS: 5
MAR CMIP5 SMB: -151.84 mmWE std: $\pm$ 76.92
RANGE CMIP5: [ -74.92 , -228.77 ]
MAR CMIP6 SMB: -158.86 mmWE std: $\pm$ 80.43
RANGE CMIP6: [ -81.94 , -239.3 ]
(master) [idunnam@login2-nird-tos src]$ python SMB_var_timeline_JJA.py
Enter season [MAM,JJA,SON,DJF]:SON
"""
| [
"numpy.polyfit",
"matplotlib.pyplot.ylabel",
"latex_size.set_size",
"matplotlib.pyplot.Line2D",
"numpy.poly1d",
"matplotlib.lines.Line2D",
"seaborn.despine",
"numpy.sort",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.polyval",
"matplotlib.pyplot.scatter",
... | [((8821, 8942), 'xarray.open_dataset', 'xr.open_dataset', (["(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/ACCESS_anomaly_'\n + season + '.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/ACCESS_anomaly_'\n + season + '.nc')\n", (8836, 8942), True, 'import xarray as xr\n'), ((8938, 9063), 'xarray.open_dataset', 'xr.open_dataset', (["(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/HADGEM_anomaly_'\n + season + '_SMB.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/HADGEM_anomaly_'\n + season + '_SMB.nc')\n", (8953, 9063), True, 'import xarray as xr\n'), ((9059, 9179), 'xarray.open_dataset', 'xr.open_dataset', (["(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CSIRO_anomaly_'\n + season + '.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CSIRO_anomaly_'\n + season + '.nc')\n", (9074, 9179), True, 'import xarray as xr\n'), ((9176, 9295), 'xarray.open_dataset', 'xr.open_dataset', (["(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/IPSL_anomaly_'\n + season + '.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/IPSL_anomaly_'\n + season + '.nc')\n", (9191, 9295), True, 'import xarray as xr\n'), ((9293, 9414), 'xarray.open_dataset', 'xr.open_dataset', (["(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/MIROC5_anomaly_'\n + season + '.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/MIROC5_anomaly_'\n + season + '.nc')\n", (9308, 9414), True, 'import xarray as xr\n'), ((9411, 9532), 'xarray.open_dataset', 'xr.open_dataset', (["(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/NORESM_anomaly_'\n + season + '.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/NORESM_anomaly_'\n + season + '.nc')\n", (9426, 9532), True, 'import xarray as xr\n'), ((9541, 9660), 'xarray.open_dataset', 'xr.open_dataset', (["(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CESM_anomaly_'\n + season + '.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CESM_anomaly_'\n + season + '.nc')\n", (9556, 9660), True, 'import xarray as xr\n'), ((9659, 9782), 'xarray.open_dataset', 'xr.open_dataset', (["(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CNRM_CM6_anomaly_'\n + season + '.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CNRM_CM6_anomaly_'\n + season + '.nc')\n", (9674, 9782), True, 'import xarray as xr\n'), ((9781, 9905), 'xarray.open_dataset', 'xr.open_dataset', (["(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CNRM_ESM2_anomaly_'\n + season + '.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/CNRM_ESM2_anomaly_'\n + season + '.nc')\n", (9796, 9905), True, 'import xarray as xr\n'), ((9904, 10022), 'xarray.open_dataset', 'xr.open_dataset', (["('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/MRI_anomaly_' +\n season + '.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/MRI_anomaly_'\n + season + '.nc')\n", (9919, 10022), True, 'import xarray as xr\n'), ((10028, 10147), 'xarray.open_dataset', 'xr.open_dataset', (["(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/UKMO_anomaly_'\n + season + '.nc')"], {}), "(\n '/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_seasonal/UKMO_anomaly_'\n + season + '.nc')\n", (10043, 10147), True, 'import xarray as xr\n'), ((13144, 13172), 'numpy.polyfit', 'np.polyfit', (['x_CM5', 'y1_CM5', '(2)'], {}), '(x_CM5, y1_CM5, 2)\n', (13154, 13172), True, 'import numpy as np\n'), ((13185, 13205), 'numpy.poly1d', 'np.poly1d', (['coeff_CM5'], {}), '(coeff_CM5)\n', (13194, 13205), True, 'import numpy as np\n'), ((13220, 13248), 'numpy.polyfit', 'np.polyfit', (['x_CM5', 'y2_CM5', '(2)'], {}), '(x_CM5, y2_CM5, 2)\n', (13230, 13248), True, 'import numpy as np\n'), ((13261, 13282), 'numpy.poly1d', 'np.poly1d', (['coeff2_CM5'], {}), '(coeff2_CM5)\n', (13270, 13282), True, 'import numpy as np\n'), ((13297, 13325), 'numpy.polyfit', 'np.polyfit', (['x_CM5', 'y3_CM5', '(2)'], {}), '(x_CM5, y3_CM5, 2)\n', (13307, 13325), True, 'import numpy as np\n'), ((13338, 13359), 'numpy.poly1d', 'np.poly1d', (['coeff3_CM5'], {}), '(coeff3_CM5)\n', (13347, 13359), True, 'import numpy as np\n'), ((13374, 13402), 'numpy.polyfit', 'np.polyfit', (['x_CM5', 'y4_CM5', '(2)'], {}), '(x_CM5, y4_CM5, 2)\n', (13384, 13402), True, 'import numpy as np\n'), ((13415, 13436), 'numpy.poly1d', 'np.poly1d', (['coeff4_CM5'], {}), '(coeff4_CM5)\n', (13424, 13436), True, 'import numpy as np\n'), ((13451, 13479), 'numpy.polyfit', 'np.polyfit', (['x_CM5', 'y5_CM5', '(2)'], {}), '(x_CM5, y5_CM5, 2)\n', (13461, 13479), True, 'import numpy as np\n'), ((13492, 13513), 'numpy.poly1d', 'np.poly1d', (['coeff5_CM5'], {}), '(coeff5_CM5)\n', (13501, 13513), True, 'import numpy as np\n'), ((13521, 13538), 'numpy.sort', 'np.sort', (['TT_CMIP5'], {}), '(TT_CMIP5)\n', (13528, 13538), True, 'import numpy as np\n'), ((13553, 13577), 'numpy.linspace', 'np.linspace', (['t[0]', 't[-1]'], {}), '(t[0], t[-1])\n', (13564, 13577), True, 'import numpy as np\n'), ((13955, 13983), 'numpy.polyfit', 'np.polyfit', (['x_CM6', 'y1_CM6', '(2)'], {}), '(x_CM6, y1_CM6, 2)\n', (13965, 13983), True, 'import numpy as np\n'), ((13995, 14015), 'numpy.poly1d', 'np.poly1d', (['coeff_CM6'], {}), '(coeff_CM6)\n', (14004, 14015), True, 'import numpy as np\n'), ((14030, 14058), 'numpy.polyfit', 'np.polyfit', (['x_CM6', 'y2_CM6', '(2)'], {}), '(x_CM6, y2_CM6, 2)\n', (14040, 14058), True, 'import numpy as np\n'), ((14071, 14092), 'numpy.poly1d', 'np.poly1d', (['coeff2_CM6'], {}), '(coeff2_CM6)\n', (14080, 14092), True, 'import numpy as np\n'), ((14107, 14135), 'numpy.polyfit', 'np.polyfit', (['x_CM6', 'y3_CM6', '(2)'], {}), '(x_CM6, y3_CM6, 2)\n', (14117, 14135), True, 'import numpy as np\n'), ((14148, 14169), 'numpy.poly1d', 'np.poly1d', (['coeff3_CM6'], {}), '(coeff3_CM6)\n', (14157, 14169), True, 'import numpy as np\n'), ((14184, 14212), 'numpy.polyfit', 'np.polyfit', (['x_CM6', 'y4_CM6', '(2)'], {}), '(x_CM6, y4_CM6, 2)\n', (14194, 14212), True, 'import numpy as np\n'), ((14225, 14246), 'numpy.poly1d', 'np.poly1d', (['coeff4_CM6'], {}), '(coeff4_CM6)\n', (14234, 14246), True, 'import numpy as np\n'), ((14261, 14289), 'numpy.polyfit', 'np.polyfit', (['x_CM6', 'y5_CM6', '(2)'], {}), '(x_CM6, y5_CM6, 2)\n', (14271, 14289), True, 'import numpy as np\n'), ((14302, 14323), 'numpy.poly1d', 'np.poly1d', (['coeff5_CM6'], {}), '(coeff5_CM6)\n', (14311, 14323), True, 'import numpy as np\n'), ((14330, 14347), 'numpy.sort', 'np.sort', (['TT_CMIP6'], {}), '(TT_CMIP6)\n', (14337, 14347), True, 'import numpy as np\n'), ((14362, 14386), 'numpy.linspace', 'np.linspace', (['t[0]', 't[-1]'], {}), '(t[0], t[-1])\n', (14373, 14386), True, 'import numpy as np\n'), ((14902, 15134), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'text.usetex': True, 'font.family': 'DejaVu Sans', 'font.serif': [\n 'Computer Modern Roman'], 'axes.labelsize': 12, 'font.size': 12,\n 'legend.fontsize': 8, 'xtick.labelsize': 20, 'ytick.labelsize': 20}"], {}), "({'text.usetex': True, 'font.family': 'DejaVu Sans',\n 'font.serif': ['Computer Modern Roman'], 'axes.labelsize': 12,\n 'font.size': 12, 'legend.fontsize': 8, 'xtick.labelsize': 20,\n 'ytick.labelsize': 20})\n", (14921, 15134), True, 'import matplotlib.pyplot as plt\n'), ((15254, 15328), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Near-surface Temperature anomalies [$^\\\\circ$C]"""'], {'fontsize': '(22)'}), "('Near-surface Temperature anomalies [$^\\\\circ$C]', fontsize=22)\n", (15264, 15328), True, 'import matplotlib.pyplot as plt\n'), ((15330, 15384), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Annual SMB anomalies [mmWE]"""'], {'fontsize': '(22)'}), "('Annual SMB anomalies [mmWE]', fontsize=22)\n", (15340, 15384), True, 'import matplotlib.pyplot as plt\n'), ((15982, 16056), 'matplotlib.pyplot.plot', 'plt.plot', (['curve_x_CM5', 'curve_y1_CM5'], {'color': '"""darkolivegreen"""', 'linewidth': '(2.5)'}), "(curve_x_CM5, curve_y1_CM5, color='darkolivegreen', linewidth=2.5)\n", (15990, 16056), True, 'import matplotlib.pyplot as plt\n'), ((16077, 16146), 'matplotlib.pyplot.plot', 'plt.plot', (['curve_x_CM5', 'curve_y2_CM5'], {'color': '"""firebrick"""', 'linewidth': '(2.5)'}), "(curve_x_CM5, curve_y2_CM5, color='firebrick', linewidth=2.5)\n", (16085, 16146), True, 'import matplotlib.pyplot as plt\n'), ((16167, 16233), 'matplotlib.pyplot.plot', 'plt.plot', (['curve_x_CM5', 'curve_y3_CM5'], {'color': '"""indigo"""', 'linewidth': '(2.5)'}), "(curve_x_CM5, curve_y3_CM5, color='indigo', linewidth=2.5)\n", (16175, 16233), True, 'import matplotlib.pyplot as plt\n'), ((16751, 16828), 'matplotlib.pyplot.plot', 'plt.plot', (['curve_x_CM6', 'curve_y1_CM6', '"""--"""'], {'color': '"""yellowgreen"""', 'linewidth': '(2.5)'}), "(curve_x_CM6, curve_y1_CM6, '--', color='yellowgreen', linewidth=2.5)\n", (16759, 16828), True, 'import matplotlib.pyplot as plt\n'), ((16873, 16949), 'matplotlib.pyplot.plot', 'plt.plot', (['curve_x_CM6', 'curve_y2_CM6', '"""--"""'], {'color': '"""lightcoral"""', 'linewidth': '(2.5)'}), "(curve_x_CM6, curve_y2_CM6, '--', color='lightcoral', linewidth=2.5)\n", (16881, 16949), True, 'import matplotlib.pyplot as plt\n'), ((16969, 17047), 'matplotlib.pyplot.plot', 'plt.plot', (['curve_x_CM6', 'curve_y3_CM6', '"""--"""'], {'color': '"""mediumpurple"""', 'linewidth': '(2.5)'}), "(curve_x_CM6, curve_y3_CM6, '--', color='mediumpurple', linewidth=2.5)\n", (16977, 17047), True, 'import matplotlib.pyplot as plt\n'), ((17290, 17307), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(8.5)'], {}), '(-1, 8.5)\n', (17298, 17307), True, 'import matplotlib.pyplot as plt\n'), ((17493, 17506), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (17504, 17506), True, 'import seaborn as sns\n'), ((19570, 19583), 'matplotlib.legend_handler.HandlerBase', 'HandlerBase', ([], {}), '()\n', (19581, 19583), False, 'from matplotlib.legend_handler import HandlerBase\n'), ((19594, 19607), 'matplotlib.legend_handler.HandlerBase', 'HandlerBase', ([], {}), '()\n', (19605, 19607), False, 'from matplotlib.legend_handler import HandlerBase\n'), ((19618, 19631), 'matplotlib.legend_handler.HandlerBase', 'HandlerBase', ([], {}), '()\n', (19629, 19631), False, 'from matplotlib.legend_handler import HandlerBase\n'), ((19642, 19655), 'matplotlib.legend_handler.HandlerBase', 'HandlerBase', ([], {}), '()\n', (19653, 19655), False, 'from matplotlib.legend_handler import HandlerBase\n'), ((19666, 19679), 'matplotlib.legend_handler.HandlerBase', 'HandlerBase', ([], {}), '()\n', (19677, 19679), False, 'from matplotlib.legend_handler import HandlerBase\n'), ((20186, 20196), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20194, 20196), True, 'import matplotlib.pyplot as plt\n'), ((15815, 15916), 'matplotlib.pyplot.scatter', 'plt.scatter', (['TT_CMIP5', 'SEB_var_CMIP5[i]'], {'label': 'label_CM5[i]', 's': '(22)', 'color': 'color_CM5[i]', 'alpha': '(0.9)'}), '(TT_CMIP5, SEB_var_CMIP5[i], label=label_CM5[i], s=22, color=\n color_CM5[i], alpha=0.9)\n', (15826, 15916), True, 'import matplotlib.pyplot as plt\n'), ((15947, 15976), 'seaborn.set_palette', 'sns.set_palette', (['"""colorblind"""'], {}), "('colorblind')\n", (15962, 15976), True, 'import seaborn as sns\n'), ((16577, 16693), 'matplotlib.pyplot.scatter', 'plt.scatter', (['TT_CMIP6', 'SEB_var_CMIP6[i]'], {'label': 'label_CM6[i]', 'marker': '"""+"""', 'linewidth': '(0.8)', 's': '(80)', 'color': 'color_CM6[i]'}), "(TT_CMIP6, SEB_var_CMIP6[i], label=label_CM6[i], marker='+',\n linewidth=0.8, s=80, color=color_CM6[i])\n", (16588, 16693), True, 'import matplotlib.pyplot as plt\n'), ((21097, 21113), 'numpy.poly1d', 'np.poly1d', (['coeff'], {}), '(coeff)\n', (21106, 21113), True, 'import numpy as np\n'), ((21144, 21164), 'numpy.polyval', 'np.polyval', (['coeff', 'x'], {}), '(coeff, x)\n', (21154, 21164), True, 'import numpy as np\n'), ((21296, 21320), 'numpy.sum', 'np.sum', (['((y - curve) ** 2)'], {}), '((y - curve) ** 2)\n', (21302, 21320), True, 'import numpy as np\n'), ((21381, 21404), 'numpy.sum', 'np.sum', (['((y - ybar) ** 2)'], {}), '((y - ybar) ** 2)\n', (21387, 21404), True, 'import numpy as np\n'), ((21490, 21511), 'numpy.round', 'np.round', (['R_square', '(3)'], {}), '(R_square, 3)\n', (21498, 21511), True, 'import numpy as np\n'), ((22036, 22061), 'numpy.round', 'np.round', (['coeff_CM5[0]', '(2)'], {}), '(coeff_CM5[0], 2)\n', (22044, 22061), True, 'import numpy as np\n'), ((22073, 22098), 'numpy.round', 'np.round', (['coeff_CM5[1]', '(2)'], {}), '(coeff_CM5[1], 2)\n', (22081, 22098), True, 'import numpy as np\n'), ((22106, 22131), 'numpy.round', 'np.round', (['coeff_CM5[2]', '(2)'], {}), '(coeff_CM5[2], 2)\n', (22114, 22131), True, 'import numpy as np\n'), ((22151, 22176), 'numpy.round', 'np.round', (['coeff_CM6[0]', '(3)'], {}), '(coeff_CM6[0], 3)\n', (22159, 22176), True, 'import numpy as np\n'), ((22188, 22213), 'numpy.round', 'np.round', (['coeff_CM6[1]', '(2)'], {}), '(coeff_CM6[1], 2)\n', (22196, 22213), True, 'import numpy as np\n'), ((22221, 22246), 'numpy.round', 'np.round', (['coeff_CM6[2]', '(2)'], {}), '(coeff_CM6[2], 2)\n', (22229, 22246), True, 'import numpy as np\n'), ((22266, 22292), 'numpy.round', 'np.round', (['coeff2_CM5[0]', '(2)'], {}), '(coeff2_CM5[0], 2)\n', (22274, 22292), True, 'import numpy as np\n'), ((22304, 22330), 'numpy.round', 'np.round', (['coeff2_CM5[1]', '(2)'], {}), '(coeff2_CM5[1], 2)\n', (22312, 22330), True, 'import numpy as np\n'), ((22338, 22364), 'numpy.round', 'np.round', (['coeff2_CM5[2]', '(2)'], {}), '(coeff2_CM5[2], 2)\n', (22346, 22364), True, 'import numpy as np\n'), ((22383, 22409), 'numpy.round', 'np.round', (['coeff2_CM6[0]', '(3)'], {}), '(coeff2_CM6[0], 3)\n', (22391, 22409), True, 'import numpy as np\n'), ((22421, 22447), 'numpy.round', 'np.round', (['coeff2_CM6[1]', '(2)'], {}), '(coeff2_CM6[1], 2)\n', (22429, 22447), True, 'import numpy as np\n'), ((22455, 22481), 'numpy.round', 'np.round', (['coeff2_CM6[2]', '(2)'], {}), '(coeff2_CM6[2], 2)\n', (22463, 22481), True, 'import numpy as np\n'), ((22501, 22527), 'numpy.round', 'np.round', (['coeff3_CM5[0]', '(2)'], {}), '(coeff3_CM5[0], 2)\n', (22509, 22527), True, 'import numpy as np\n'), ((22539, 22565), 'numpy.round', 'np.round', (['coeff3_CM5[1]', '(2)'], {}), '(coeff3_CM5[1], 2)\n', (22547, 22565), True, 'import numpy as np\n'), ((22573, 22599), 'numpy.round', 'np.round', (['coeff3_CM5[2]', '(2)'], {}), '(coeff3_CM5[2], 2)\n', (22581, 22599), True, 'import numpy as np\n'), ((22618, 22644), 'numpy.round', 'np.round', (['coeff3_CM6[0]', '(3)'], {}), '(coeff3_CM6[0], 3)\n', (22626, 22644), True, 'import numpy as np\n'), ((22656, 22682), 'numpy.round', 'np.round', (['coeff3_CM6[1]', '(2)'], {}), '(coeff3_CM6[1], 2)\n', (22664, 22682), True, 'import numpy as np\n'), ((22690, 22716), 'numpy.round', 'np.round', (['coeff3_CM6[2]', '(2)'], {}), '(coeff3_CM6[2], 2)\n', (22698, 22716), True, 'import numpy as np\n'), ((15174, 15193), 'latex_size.set_size', 'set_size', ([], {'width': '(490)'}), '(width=490)\n', (15182, 15193), False, 'from latex_size import set_size\n'), ((17789, 17876), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['[x0, y0 + width]', '[0.7 * height, 0.7 * height]'], {'color': '"""darkolivegreen"""'}), "([x0, y0 + width], [0.7 * height, 0.7 * height], color=\n 'darkolivegreen')\n", (17799, 17876), True, 'import matplotlib.pyplot as plt\n'), ((17932, 18044), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['[x0, y0 + width]', '[0.3 * height, 0.3 * height]'], {'linestyle': '"""--"""', 'linewidth': '(1)', 'color': '"""yellowgreen"""'}), "([x0, y0 + width], [0.3 * height, 0.3 * height], linestyle='--',\n linewidth=1, color='yellowgreen')\n", (17942, 18044), True, 'import matplotlib.pyplot as plt\n'), ((18242, 18319), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['[x0, y0 + width]', '[0.7 * height, 0.7 * height]'], {'color': '"""firebrick"""'}), "([x0, y0 + width], [0.7 * height, 0.7 * height], color='firebrick')\n", (18252, 18319), True, 'import matplotlib.pyplot as plt\n'), ((18379, 18490), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['[x0, y0 + width]', '[0.3 * height, 0.3 * height]'], {'linestyle': '"""--"""', 'linewidth': '(1)', 'color': '"""lightcoral"""'}), "([x0, y0 + width], [0.3 * height, 0.3 * height], linestyle='--',\n linewidth=1, color='lightcoral')\n", (18389, 18490), True, 'import matplotlib.pyplot as plt\n'), ((18686, 18760), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['[x0, y0 + width]', '[0.7 * height, 0.7 * height]'], {'color': '"""indigo"""'}), "([x0, y0 + width], [0.7 * height, 0.7 * height], color='indigo')\n", (18696, 18760), True, 'import matplotlib.pyplot as plt\n'), ((18820, 18933), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['[x0, y0 + width]', '[0.3 * height, 0.3 * height]'], {'linestyle': '"""--"""', 'linewidth': '(1)', 'color': '"""mediumpurple"""'}), "([x0, y0 + width], [0.3 * height, 0.3 * height], linestyle='--',\n linewidth=1, color='mediumpurple')\n", (18830, 18933), True, 'import matplotlib.pyplot as plt\n'), ((19131, 19220), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[11]', '[3]'], {'color': '"""black"""', 'marker': '"""o"""', 'markersize': '(7)', 'label': '"""MAR CMIP5"""'}), "([11], [3], color='black', marker='o', markersize=7, label=\n 'MAR CMIP5')\n", (19144, 19220), True, 'import matplotlib.lines as mlines\n'), ((19434, 19523), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[11]', '[3]'], {'color': '"""black"""', 'marker': '"""+"""', 'markersize': '(9)', 'label': '"""MAR CMIP6"""'}), "([11], [3], color='black', marker='+', markersize=9, label=\n 'MAR CMIP6')\n", (19447, 19523), True, 'import matplotlib.lines as mlines\n'), ((21239, 21248), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (21245, 21248), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
import numpy as np
import numpy.ma as ma
from collections import namedtuple
from mra_sympy import MRA
TWO_PI = 2.*math.pi
INTEGRAL_FILE = "wavelet_data/wavelet_base%d_power%d_i%d_%s.dat"
class Point(namedtuple('Point', ['x', 'y', 'z'])):
__slots__ = ()
def __new__(cls, x, y, z):
x, y, z = [int(round(i)) for i in (x, y, z)]
return super().__new__(cls, x, y, z)
def __add__(self, other):
if not isinstance(other, Point):
other = Point._make(other)
return Point(self.x+other.x, self.y+other.y, self.z+other.z)
def __mul__(self, scalar):
return Point(scalar*self.x, scalar*self.y, scalar*self.z)
def __rmul__(self, scalar):
return self.__mul__(scalar)
def __truediv__(self, scalar):
return Point(self.x/scalar, self.y/scalar, self.z/scalar)
def __rtruediv__(self, scalar):
return Point(scalar/self.x, scalar/self.y, scalar/self.z)
class Field(namedtuple('Field', ['x'])):
__slots__ = ()
@property
def y(self):
return np.moveaxis(self.x, [1, 2, 3], [2, 3, 1])
@property
def z(self):
return np.moveaxis(self.x, [1, 2, 3], [3, 1, 2])
def read_wavelet_integrals(base, exp, q):
assert base in (2, 3)
assert exp in (2, 3, 4)
assert q == 4
a, b = MRA._compute_or_read_cached_results(q)
n = 2*base**exp
s = slice(-2, 2, 1j*n)
x, y, z = np.ogrid[s,s,s]
mask = (x**2 + y**2 + z**2 <= 4)
I = np.zeros((2, q, n, n, n))
for p in range(q):
I[0,p][mask] = np.genfromtxt(INTEGRAL_FILE % (base, exp, p, "lower"))
I[1,p][mask] = np.genfromtxt(INTEGRAL_FILE % (base, exp, p, "upper"))
# force symmetry
# XXX: with base==2 and exp==4 there appear to be numerical errors near the
# edge of the spherical domain
I[...,n//2:,:,:] = -I[...,:n//2,:,:][...,::-1,:,:]
# I[...,:,n//2:,:] = I[...,:,:n//2,:][...,:,::-1,:]
# I[...,:,:,n//2:] = I[...,:,:,:n//2][...,:,:,::-1]
# symmetrize last axes
I = (I + I.swapaxes(-1, -2)) / 2.
# I_{lower/upper}.shape == (q, n, n, n)
I_lower = np.sum(a[:,:,None,None,None] * I[0][None,...], axis=1)
I_upper = np.sum(b[:,:,None,None,None] * I[1][None,...], axis=1)
return Field(ma.array(
I_lower + I_upper,
mask=np.broadcast_to(~mask, (q, n, n, n)),
hard_mask=True
))
def _vdiff(v, a):
s1 = [slice(None, -1)]*3
s2 = s1.copy()
s1[a] = slice(1, None)
return v[tuple(s1)] - v[tuple(s2)]
def vorticity(v, dx):
o = np.zeros([s-1 for s in v.shape[:3]]+[3])
o[...,0] = _vdiff(v[...,2], 1) - _vdiff(v[...,1], 2)
o[...,1] = _vdiff(v[...,0], 2) - _vdiff(v[...,2], 0)
o[...,2] = _vdiff(v[...,1], 0) - _vdiff(v[...,0], 1)
return o / dx
def div(v, dx):
return sum([_vdiff(v[...,i], i) for i in range(3)]) / dx
# vim: set ff=unix tw=79 sw=4 ts=8 et ic ai :
| [
"collections.namedtuple",
"numpy.broadcast_to",
"numpy.sum",
"numpy.zeros",
"mra_sympy.MRA._compute_or_read_cached_results",
"numpy.moveaxis",
"numpy.genfromtxt"
] | [((265, 301), 'collections.namedtuple', 'namedtuple', (['"""Point"""', "['x', 'y', 'z']"], {}), "('Point', ['x', 'y', 'z'])\n", (275, 301), False, 'from collections import namedtuple\n'), ((1013, 1039), 'collections.namedtuple', 'namedtuple', (['"""Field"""', "['x']"], {}), "('Field', ['x'])\n", (1023, 1039), False, 'from collections import namedtuple\n'), ((1365, 1403), 'mra_sympy.MRA._compute_or_read_cached_results', 'MRA._compute_or_read_cached_results', (['q'], {}), '(q)\n', (1400, 1403), False, 'from mra_sympy import MRA\n'), ((1526, 1551), 'numpy.zeros', 'np.zeros', (['(2, q, n, n, n)'], {}), '((2, q, n, n, n))\n', (1534, 1551), True, 'import numpy as np\n'), ((2163, 2222), 'numpy.sum', 'np.sum', (['(a[:, :, None, None, None] * I[0][None, ...])'], {'axis': '(1)'}), '(a[:, :, None, None, None] * I[0][None, ...], axis=1)\n', (2169, 2222), True, 'import numpy as np\n'), ((2232, 2291), 'numpy.sum', 'np.sum', (['(b[:, :, None, None, None] * I[1][None, ...])'], {'axis': '(1)'}), '(b[:, :, None, None, None] * I[1][None, ...], axis=1)\n', (2238, 2291), True, 'import numpy as np\n'), ((2589, 2635), 'numpy.zeros', 'np.zeros', (['([(s - 1) for s in v.shape[:3]] + [3])'], {}), '([(s - 1) for s in v.shape[:3]] + [3])\n', (2597, 2635), True, 'import numpy as np\n'), ((1107, 1148), 'numpy.moveaxis', 'np.moveaxis', (['self.x', '[1, 2, 3]', '[2, 3, 1]'], {}), '(self.x, [1, 2, 3], [2, 3, 1])\n', (1118, 1148), True, 'import numpy as np\n'), ((1195, 1236), 'numpy.moveaxis', 'np.moveaxis', (['self.x', '[1, 2, 3]', '[3, 1, 2]'], {}), '(self.x, [1, 2, 3], [3, 1, 2])\n', (1206, 1236), True, 'import numpy as np\n'), ((1599, 1653), 'numpy.genfromtxt', 'np.genfromtxt', (["(INTEGRAL_FILE % (base, exp, p, 'lower'))"], {}), "(INTEGRAL_FILE % (base, exp, p, 'lower'))\n", (1612, 1653), True, 'import numpy as np\n'), ((1677, 1731), 'numpy.genfromtxt', 'np.genfromtxt', (["(INTEGRAL_FILE % (base, exp, p, 'upper'))"], {}), "(INTEGRAL_FILE % (base, exp, p, 'upper'))\n", (1690, 1731), True, 'import numpy as np\n'), ((2355, 2391), 'numpy.broadcast_to', 'np.broadcast_to', (['(~mask)', '(q, n, n, n)'], {}), '(~mask, (q, n, n, n))\n', (2370, 2391), True, 'import numpy as np\n')] |
import numpy as np
import torch
from .base_model import BaseModel
from . import networks
from .nce import PatchNCELoss
import util.util as util
class CUTModel(BaseModel):
""" This class implements CUT and FastCUT model
The code borrows heavily from the PyTorch implementation of CycleGAN
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
""" Configures options specific for CUT model
"""
parser.add_argument('--CUT_mode', type=str, default="CUT", choices='(CUT, cut, FastCUT, fastcut)')
parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))')
parser.add_argument('--lambda_NCE', type=float, default=1.0, help='weight for NCE loss: NCE(G(X), X)')
parser.add_argument('--nce_idt', type=util.str2bool, nargs='?', const=True, default=False, help='use NCE loss for identity mapping: NCE(G(Y), Y))')
parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers')
parser.add_argument('--netF', type=str, default='mlp_sample', help='downsample the feature map: sample | reshape | mlp_sample')
parser.add_argument('--netF_nc', type=int, default=256)
parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss')
parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer')
parser.add_argument('--flip_equivariance',
type=util.str2bool, nargs='?', const=True, default=False,
help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT")
parser.set_defaults(pool_size=0) # no image pooling
opt, _ = parser.parse_known_args()
# Set default parameters for CUT and FastCUT
if opt.CUT_mode.lower() == "cut":
parser.set_defaults(nce_idt=True, lambda_NCE=1.0)
elif opt.CUT_mode.lower() == "fastcut":
parser.set_defaults(
nce_idt=False, lambda_NCE=10.0, flip_equivariance=True,
n_epochs=150, n_epochs_decay=50
)
else:
raise ValueError(opt.CUT_mode)
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
# specify the training losses you want to print out.
# The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'G', 'NCE']
self.visual_names = ['real_A', 'fake_B', 'real_B']
self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')]
if opt.nce_idt and self.isTrain:
self.loss_names += ['NCE_Y']
self.visual_names += ['idt_B']
if self.isTrain:
self.model_names = ['G', 'F', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt)
self.netF = networks.define_F(opt.input_nc, opt.netF, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
if self.isTrain:
self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionNCE = []
for nce_layer in self.nce_layers:
self.criterionNCE.append(PatchNCELoss(opt).to(self.device))
self.criterionIdt = torch.nn.L1Loss().to(self.device)
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def data_dependent_initialize(self):
"""
The feature network netF is defined in terms of the shape of the intermediate, extracted
features of the encoder portion of netG. Because of this, the weights of netF are
initialized at the first feedforward pass with some input images.
Please also see PatchSampleF.create_mlp(), which is called at the first forward() call.
"""
bs_per_gpu = self.real_A.size(0) // len(self.opt.gpu_ids)
self.real_A = self.real_A[:bs_per_gpu]
self.real_B = self.real_B[:bs_per_gpu]
self.forward() # compute fake images: G(A)
if self.opt.isTrain:
self.backward_D() # calculate gradients for D
self.backward_G() # calculate graidents for G
if self.opt.lambda_NCE > 0.0:
self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2))
self.optimizers.append(self.optimizer_F)
def optimize_parameters(self):
# forward
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
if self.opt.netF == 'mlp_sample':
self.optimizer_F.zero_grad()
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
if self.opt.netF == 'mlp_sample':
self.optimizer_F.step()
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.real = torch.cat((self.real_A, self.real_B), dim=0) if self.opt.nce_idt else self.real_A
if self.opt.flip_equivariance:
self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5)
if self.flipped_for_equivariance:
self.real = torch.flip(self.real, [3])
self.fake = self.netG(self.real)
self.fake_B = self.fake[:self.real_A.size(0)]
if self.opt.nce_idt:
self.idt_B = self.fake[self.real_A.size(0):]
def backward_D(self):
if self.opt.lambda_GAN > 0.0:
"""Calculate GAN loss for the discriminator"""
fake = self.fake_B.detach()
# Fake; stop backprop to the generator by detaching fake_B
pred_fake = self.netD(fake)
self.loss_D_fake = self.criterionGAN(pred_fake, False).mean()
# Real
pred_real = self.netD(self.real_B)
loss_D_real_unweighted = self.criterionGAN(pred_real, True)
self.loss_D_real = loss_D_real_unweighted.mean()
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
else:
self.loss_D_real, self.loss_D_fake, self.loss_D = 0.0, 0.0, 0.0
def backward_G(self):
"""Calculate GAN and NCE loss for the generator"""
fake = self.fake_B
# First, G(A) should fake the discriminator
if self.opt.lambda_GAN > 0.0:
pred_fake = self.netD(fake)
self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN
else:
self.loss_G_GAN = 0.0
if self.opt.lambda_NCE > 0.0:
self.loss_NCE = self.calculate_NCE_loss(self.real_A, self.fake_B)
else:
self.loss_NCE, self.loss_NCE_bd = 0.0, 0.0
if self.opt.nce_idt and self.opt.lambda_NCE > 0.0:
self.loss_NCE_Y = self.calculate_NCE_loss(self.real_B, self.idt_B)
loss_NCE_both = (self.loss_NCE + self.loss_NCE_Y) * 0.5
else:
loss_NCE_both = self.loss_NCE
self.loss_G = self.loss_G_GAN + loss_NCE_both
self.loss_G.backward()
def calculate_NCE_loss(self, src, tgt):
n_layers = len(self.nce_layers)
feat_q = self.netG(tgt, self.nce_layers, encode_only=True)
if self.opt.flip_equivariance and self.flipped_for_equivariance:
feat_q = [torch.flip(fq, [3]) for fq in feat_q]
feat_k = self.netG(src, self.nce_layers, encode_only=True)
feat_k_pool, sample_ids = self.netF(feat_k, self.opt.num_patches, None)
feat_q_pool, _ = self.netF(feat_q, self.opt.num_patches, sample_ids)
total_nce_loss = 0.0
for f_q, f_k, crit, nce_layer in zip(feat_q_pool, feat_k_pool, self.criterionNCE, self.nce_layers):
loss = crit(f_q, f_k) * self.opt.lambda_NCE
total_nce_loss += loss.mean()
return total_nce_loss / n_layers
| [
"numpy.random.random",
"torch.nn.L1Loss",
"torch.flip",
"torch.cat"
] | [((6976, 7020), 'torch.cat', 'torch.cat', (['(self.real_A, self.real_B)'], {'dim': '(0)'}), '((self.real_A, self.real_B), dim=0)\n', (6985, 7020), False, 'import torch\n'), ((7263, 7289), 'torch.flip', 'torch.flip', (['self.real', '[3]'], {}), '(self.real, [3])\n', (7273, 7289), False, 'import torch\n'), ((9436, 9455), 'torch.flip', 'torch.flip', (['fq', '[3]'], {}), '(fq, [3])\n', (9446, 9455), False, 'import torch\n'), ((3957, 3974), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (3972, 3974), False, 'import torch\n'), ((7163, 7181), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7179, 7181), True, 'import numpy as np\n')] |
import numpy as np
class binning_container(object):
"""
Define the binning_container data type used for conditional averaging
Example: (See also the example program at the end of the file)
blob_bc = binning_container(num_bins, bin_length, bin_edges,
bin_function, mode)
num_bins: Number of bins, equal to the number of conditional events
bin_length: Length of each bin (each bin is a np.ndarray)
bin_function: Function operating on each conditional window that
determines the bin in which the conditional window data
is put.
mode: Either add or append. If add, the instances adds the
argument interval to the interval in each bin
If mode=='append', the instance appends the argument
interval to the list for each bin
When calling blob_bc( array ), call bin_function(array) to
determine which bin it adds to.
For example
We want to bin 42 conditional events, each 20 samples long in bins where
2.5 < max(event) < 3.5
3.5 < max(event) < 4.5
4.5 < max(event) < 5.5
5.5 < max(event) < 100.0
num_peaks = 42
burst_length = 20
First create the binning container object:
blob_bc = binning_containter(num_peaks, burst_length,
np.array([2.5, 3.5, 4.5, 5.5, 100.0])
lambda x: x.max() )
Assume that we know the center of the conditional events, f.ex. by
peak_arr = peaks_ne = detect_peaks_1d(ts, burst_separation, burst_threshold)
peak_arr is a np.array, containing the peaks in ts
assert(peak_arr.shape[0] == num_peaks)
Iterate over the peaks and put each conditional event the appropriate bin:
for peak_idx, peak_tidx in enumerate(peak_arr):
# Get a view on the current peak in ts
ts_cut = ts[peak_tidx - 10:peak_tidx + 10]
# This puts ts_cut into the correct bin
blob_bc.bin(ts_cut)
Get binned bursts with 3.5 < max(burst) < 4.5
# blob_bc[1]
This returns a np.array with dimension [n, burst_length],
where n is the number of bursts within this bin.
"""
def __init__(self, num_bins, bin_length, bin_edges, bin_function, mode="add"):
"""
num_bins:......
bin_length:....
bin_edges:.....
bin_function:..
"""
assert mode in ["add", "append"]
self.num_bins = num_bins
self.bin_length = bin_length
self.bin_edges = list(zip(bin_edges[:-1], bin_edges[1:]))
self.bin_function = bin_function
self.mode = mode
self.bin_max = bin_edges.max()
self.bin_min = bin_edges.min()
# Create list of bins
self.bins = []
# Fill the bins
for ibin in np.arange(num_bins):
# If we add to the bins, insert an intervall we keep adding to
if self.mode == "add":
self.bins.append(np.zeros(bin_length, dtype="float64"))
elif self.mode == "append":
self.bins.append([])
self.count = np.zeros(num_bins, dtype="int")
def max(self, array):
return array.max()
def bin(self, array, feval_array=None):
# Bin the data in array into the according bin
# If supplied, use feval_array to determine the bin
# array is binned into.
# If feval_array == None, use array to determine the
# bin used
assert array.size == self.bin_length
# Find the bin we bin ''array'' in
if feval_array is None:
# If feval_array is unspecified, pass ''array'' to bin_function
rv = self.bin_function(array)
else:
# if feval_array is specified, pass ''feval_array'' to bin_function
rv = self.bin_function(feval_array)
# Perform boundary checks of rv against the upper and lower bin
# boundary
if rv > self.bin_max:
raise ValueError("Could not bin array: %f > max(bin_edges)" % rv)
if rv < self.bin_min:
# raise ValueError('Could not bin array: %f < min(bin_edges)' % rv)
raise ValueError("Could not bin array: %f < %f" % (rv, self.bin_min))
idx = np.argwhere(
np.array([(rv > t1) & (rv <= t2) for t1, t2 in self.bin_edges])
)[0][0]
# Add to the appropriate bin
if self.mode == "add":
(self.bins[idx])[:] = (self.bins[idx])[:] + array
elif self.mode == "append":
self.bins[idx].append(array)
# Increase bin counter
self.count[idx] = self.count[idx] + 1
def count(self, bin_idx=None):
# return the count of each bin
if bin_idx is None:
return self.count
else:
return self.count[bin_idx]
def get_num_bins(self):
return len(self.num_bins)
def __getitem__(self, idx):
if self.mode == "add":
return self.bins[idx]
elif self.mode == "append":
return np.array(self.bins[idx])
def condvar(self, idx):
"""
Compute the conditional variance of the data stored in bin idx
Input:
======
idx.......int, gives the index to the bin we compute the cond. var from
Output:
=======
cvar.....ndarray(float), the conditional variance at each sampling point
"""
all_bursts = np.array(self.bins[idx])
ca = all_bursts.mean(axis=0)
tmp = all_bursts - ca[np.newaxis, :].repeat(all_bursts.shape[0], axis=0)
return 1.0 - (tmp ** 2.0).mean(axis=0) / (all_bursts ** 2.0).mean(axis=0)
# Exemplary use of binning_container to find conditional averages
if __name__ == "__main__()":
# define the conditional averaging threshold
burst_threshold = 2.5
# define the separation between neighbouring bursts, in samples
burst_separation = 250
# define the length of a burst event, in samples
burst_length = 250
# define the bin boundaries in which we bin the bursts. This corresponds to
# bursts 2.0 <= A < 4.0
# 4.0 <= A < 6.0
# 6.0 <= A 100.0 (100.0 is a maximum and may be the maximum of the time
# series at hand
burst_boundaries = np.array([2.0, 4.0, 6.0, 100.0])
# Now, a get a time series
ts = np.random.uniform(0.0, 5.0, 10000)
# Normalize the time series. This will be our reference time series
ts_ref = (ts - ts_mean()) / ts.std(ddof=1)
# Lets say we also have another time series, for cross-conditional
# averaging
ts = np.random.uniform(0.0, 5.0, 1000)
ts_x = (ts - ts.mean()) / ts_std(ddof=1)
# Get the indices in the time series where a burst is detected
ref_burst_idx = detect_peaks_1d(
ts_norm, burst_separation, burst_threshold, peak_width=5
)
ref_num_bursts = ref_burst_idx.size
print("Detected %d bursts in the signal" % (ref_num_bursts))
# Now we run conditional averaging with the binning container
# For this, we create a binning container that will store all
# sub-intervals around the peaks that were detected in the time series
burst_bc = binning_container(
ref_num_bursts,
2 * burst_length,
burst_boundaries,
lambda x: x.max(),
mode="append",
)
binned_bursts = 0
# Iterate over the bursts and pub each
for b_idx, b_tidx in enumerate(ref_burst_idx):
# b_idx is the index of the current burst in the array of all detected bursts
# b_tidx is the index of the current burst in the time series at hand
# First, we create a view on the intervall around the bin in the reference
# signal
ts_ref_cut = ts_norm[b_tidx - burst_length : b_tidx + burst_length]
# Assume we have a another time series for cross-conditional averaging.
# Let's create a view on this time series in the same intervall
ts_x_cut = ts_x[b_tidx - burst_length : b_tidx + burst_length]
# N
try:
# This will add the waveform ts_x_cut into the bin determined by
# the max of the reference waveform
burst_bc.bin(ts_x_cut, feval_array=ts_ref_cut)
binned_bursts += 1
except:
# something went wrong.
continue
# Noe we can compute the conditionally averaged waveform in one
# amplitude range, f.ex. 1, 4.0 <= A < 6.0
all_bursts = burst_bc[1]
num_bursts = len(burst_bc[1])
# This is the conditionally averaged waveform
ca = all_bursts.mean(axis=0)
# Compute the conditional variance
tmp = all_bursts - ca[np.newaxis, :].repeat(num_bursts, axis=0)
cvar = 1.0 - (tmp ** 2.0).mean(axis=0) / (all_bursts ** 2.0).mean(axis=0)
| [
"numpy.array",
"numpy.zeros",
"numpy.arange",
"numpy.random.uniform"
] | [((6263, 6295), 'numpy.array', 'np.array', (['[2.0, 4.0, 6.0, 100.0]'], {}), '([2.0, 4.0, 6.0, 100.0])\n', (6271, 6295), True, 'import numpy as np\n'), ((6337, 6371), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(5.0)', '(10000)'], {}), '(0.0, 5.0, 10000)\n', (6354, 6371), True, 'import numpy as np\n'), ((6589, 6622), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(5.0)', '(1000)'], {}), '(0.0, 5.0, 1000)\n', (6606, 6622), True, 'import numpy as np\n'), ((2801, 2820), 'numpy.arange', 'np.arange', (['num_bins'], {}), '(num_bins)\n', (2810, 2820), True, 'import numpy as np\n'), ((3103, 3134), 'numpy.zeros', 'np.zeros', (['num_bins'], {'dtype': '"""int"""'}), "(num_bins, dtype='int')\n", (3111, 3134), True, 'import numpy as np\n'), ((5438, 5462), 'numpy.array', 'np.array', (['self.bins[idx]'], {}), '(self.bins[idx])\n', (5446, 5462), True, 'import numpy as np\n'), ((5042, 5066), 'numpy.array', 'np.array', (['self.bins[idx]'], {}), '(self.bins[idx])\n', (5050, 5066), True, 'import numpy as np\n'), ((2965, 3002), 'numpy.zeros', 'np.zeros', (['bin_length'], {'dtype': '"""float64"""'}), "(bin_length, dtype='float64')\n", (2973, 3002), True, 'import numpy as np\n'), ((4276, 4341), 'numpy.array', 'np.array', (['[((rv > t1) & (rv <= t2)) for t1, t2 in self.bin_edges]'], {}), '([((rv > t1) & (rv <= t2)) for t1, t2 in self.bin_edges])\n', (4284, 4341), True, 'import numpy as np\n')] |
import numpy as np
from utils import getInitialPoint, setup_logger
from utils.common import ResultManager
logger = setup_logger(__name__)
def minimize(dimension, objective, eps=1e-10, *args, **kwargs):
x = getInitialPoint((dimension,), objective)
try:
objective.grad(x)
except NotImplementedError:
raise AttributeError(
f"Gradient of {objective} is not defined.")
try:
objective.hesse(x)
except NotImplementedError:
raise AttributeError(
f"Hesse matrix of {objective} is not defined.")
nab = objective.grad(x)
try:
H_inv = np.linalg.inv(objective.hesse(x))
except np.linalg.LinAlgError:
logger.critical("Use pseudo inverse matrix.")
H_inv = np.linalg.pinv(objective.hesse(x))
lam = nab.T@H_inv@nab
d = -H_inv@nab
result = ResultManager(objective, __name__, logger, *args, **kwargs)
result.post_process_per_iter(x, x, -1, lam=lam)
if (np.isnan(nab)).any():
logger.critical("gradient is nan.")
t = 0
while lam > eps:
# eig, _ = np.linalg.eig(H_inv)
# assert (eig >= 0).all()
x = x + d
nab = objective.grad(x)
try:
H_inv = np.linalg.inv(objective.hesse(x))
except np.linalg.LinAlgError:
logger.critical("Use pseudo inverse matrix.")
H_inv = np.linalg.pinv(objective.hesse(x))
lam = nab.T@H_inv@nab
d = -H_inv@nab
if result.post_process_per_iter(x, x, t, lam=lam, grad=nab):
break
t += 1
return result
| [
"utils.common.ResultManager",
"utils.getInitialPoint",
"utils.setup_logger",
"numpy.isnan"
] | [((116, 138), 'utils.setup_logger', 'setup_logger', (['__name__'], {}), '(__name__)\n', (128, 138), False, 'from utils import getInitialPoint, setup_logger\n'), ((213, 253), 'utils.getInitialPoint', 'getInitialPoint', (['(dimension,)', 'objective'], {}), '((dimension,), objective)\n', (228, 253), False, 'from utils import getInitialPoint, setup_logger\n'), ((851, 910), 'utils.common.ResultManager', 'ResultManager', (['objective', '__name__', 'logger', '*args'], {}), '(objective, __name__, logger, *args, **kwargs)\n', (864, 910), False, 'from utils.common import ResultManager\n'), ((971, 984), 'numpy.isnan', 'np.isnan', (['nab'], {}), '(nab)\n', (979, 984), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import cPickle as pickle
import copy
import json
from tqdm import tqdm
from utils.nn import NN
from utils.coco.coco import COCO
from utils.coco.pycocoevalcap.eval import COCOEvalCap
from utils.misc import ImageLoader, CaptionData, TopN
class BaseModel(object):
def __init__(self, config):
self.config = config
self.is_train = True if config.phase == 'train' else False
self.train_cnn = self.is_train and config.train_cnn
self.image_loader = ImageLoader('./DeepRNN/utils/ilsvrc_2012_mean.npy')
self.image_shape = [224, 224, 3]
self.nn = NN(config)
self.global_step = tf.Variable(0,
name = 'global_step',
trainable = False)
self.build()
def build(self):
raise NotImplementedError()
def test(self, sess, test_data, vocabulary):
""" Test the model using any given images. """
config = self.config
# Generate the captions for the images
for k in tqdm(list(range(test_data.num_batches)), desc='path'):
batch = test_data.next_batch()
caption_data = self.beam_search(sess, batch, vocabulary)
fake_cnt = 0 if k<test_data.num_batches-1 \
else test_data.fake_count
for l in range(test_data.batch_size-fake_cnt):
word_idxs = caption_data[l][0].sentence
score = caption_data[l][0].score
caption = vocabulary.get_sentence(word_idxs)
print('**'+caption+'**')
def beam_search(self, sess, image_files, vocabulary):
"""Use beam search to generate the captions for a batch of images."""
# Feed in the images to get the contexts and the initial LSTM states
config = self.config
images = self.image_loader.load_images(image_files)
contexts, initial_memory, initial_output = sess.run(
[self.conv_feats, self.initial_memory, self.initial_output],
feed_dict = {self.images: images})
partial_caption_data = []
complete_caption_data = []
for k in range(config.batch_size):
initial_beam = CaptionData(sentence = [],
memory = initial_memory[k],
output = initial_output[k],
score = 1.0)
partial_caption_data.append(TopN(config.beam_size))
partial_caption_data[-1].push(initial_beam)
complete_caption_data.append(TopN(config.beam_size))
# Run beam search
for idx in range(config.max_caption_length):
partial_caption_data_lists = []
for k in range(config.batch_size):
data = partial_caption_data[k].extract()
partial_caption_data_lists.append(data)
partial_caption_data[k].reset()
num_steps = 1 if idx == 0 else config.beam_size
for b in range(num_steps):
if idx == 0:
last_word = np.zeros((config.batch_size), np.int32)
else:
last_word = np.array([pcl[b].sentence[-1]
for pcl in partial_caption_data_lists],
np.int32)
last_memory = np.array([pcl[b].memory
for pcl in partial_caption_data_lists],
np.float32)
last_output = np.array([pcl[b].output
for pcl in partial_caption_data_lists],
np.float32)
memory, output, scores = sess.run(
[self.memory, self.output, self.probs],
feed_dict = {self.contexts: contexts,
self.last_word: last_word,
self.last_memory: last_memory,
self.last_output: last_output})
# Find the beam_size most probable next words
for k in range(config.batch_size):
caption_data = partial_caption_data_lists[k][b]
words_and_scores = list(enumerate(scores[k]))
words_and_scores.sort(key=lambda x: -x[1])
words_and_scores = words_and_scores[0:config.beam_size+1]
# Append each of these words to the current partial caption
for w, s in words_and_scores:
sentence = caption_data.sentence + [w]
score = caption_data.score * s
beam = CaptionData(sentence,
memory[k],
output[k],
score)
if vocabulary.words[w] == '.':
complete_caption_data[k].push(beam)
else:
partial_caption_data[k].push(beam)
results = []
for k in range(config.batch_size):
if complete_caption_data[k].size() == 0:
complete_caption_data[k] = partial_caption_data[k]
results.append(complete_caption_data[k].extract(sort=True))
return results
def load(self, sess, model_file=None):
""" Load the model. """
config = self.config
if model_file is not None:
save_path = model_file
else:
info_path = os.path.join(config.save_dir, "config.pickle")
info_file = open(info_path, "rb")
config = pickle.load(info_file)
global_step = config.global_step
info_file.close()
save_path = os.path.join(config.save_dir,
str(global_step)+".npy")
data_dict = np.load(save_path).item()
count = 0
for v in tqdm(tf.global_variables()):
if v.name in data_dict.keys():
sess.run(v.assign(data_dict[v.name]))
count += 1
def load_cnn(self, session, data_path, ignore_missing=True):
""" Load a pretrained CNN model. """
data_dict = np.load(data_path).item()
count = 0
for op_name in tqdm(data_dict):
with tf.variable_scope(op_name, reuse = True):
for param_name, data in data_dict[op_name].iteritems():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
count += 1
except ValueError:
pass
| [
"tensorflow.variable_scope",
"utils.misc.ImageLoader",
"tensorflow.Variable",
"tensorflow.get_variable",
"tqdm.tqdm",
"os.path.join",
"utils.misc.TopN",
"tensorflow.global_variables",
"utils.misc.CaptionData",
"numpy.array",
"numpy.zeros",
"numpy.load",
"cPickle.load",
"utils.nn.NN"
] | [((584, 635), 'utils.misc.ImageLoader', 'ImageLoader', (['"""./DeepRNN/utils/ilsvrc_2012_mean.npy"""'], {}), "('./DeepRNN/utils/ilsvrc_2012_mean.npy')\n", (595, 635), False, 'from utils.misc import ImageLoader, CaptionData, TopN\n'), ((695, 705), 'utils.nn.NN', 'NN', (['config'], {}), '(config)\n', (697, 705), False, 'from utils.nn import NN\n'), ((733, 784), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (744, 784), True, 'import tensorflow as tf\n'), ((6503, 6518), 'tqdm.tqdm', 'tqdm', (['data_dict'], {}), '(data_dict)\n', (6507, 6518), False, 'from tqdm import tqdm\n'), ((2310, 2401), 'utils.misc.CaptionData', 'CaptionData', ([], {'sentence': '[]', 'memory': 'initial_memory[k]', 'output': 'initial_output[k]', 'score': '(1.0)'}), '(sentence=[], memory=initial_memory[k], output=initial_output[k],\n score=1.0)\n', (2321, 2401), False, 'from utils.misc import ImageLoader, CaptionData, TopN\n'), ((5742, 5788), 'os.path.join', 'os.path.join', (['config.save_dir', '"""config.pickle"""'], {}), "(config.save_dir, 'config.pickle')\n", (5754, 5788), False, 'import os\n'), ((5856, 5878), 'cPickle.load', 'pickle.load', (['info_file'], {}), '(info_file)\n', (5867, 5878), True, 'import cPickle as pickle\n'), ((6157, 6178), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6176, 6178), True, 'import tensorflow as tf\n'), ((2563, 2585), 'utils.misc.TopN', 'TopN', (['config.beam_size'], {}), '(config.beam_size)\n', (2567, 2585), False, 'from utils.misc import ImageLoader, CaptionData, TopN\n'), ((2684, 2706), 'utils.misc.TopN', 'TopN', (['config.beam_size'], {}), '(config.beam_size)\n', (2688, 2706), False, 'from utils.misc import ImageLoader, CaptionData, TopN\n'), ((3486, 3561), 'numpy.array', 'np.array', (['[pcl[b].memory for pcl in partial_caption_data_lists]', 'np.float32'], {}), '([pcl[b].memory for pcl in partial_caption_data_lists], np.float32)\n', (3494, 3561), True, 'import numpy as np\n'), ((3672, 3747), 'numpy.array', 'np.array', (['[pcl[b].output for pcl in partial_caption_data_lists]', 'np.float32'], {}), '([pcl[b].output for pcl in partial_caption_data_lists], np.float32)\n', (3680, 3747), True, 'import numpy as np\n'), ((6091, 6109), 'numpy.load', 'np.load', (['save_path'], {}), '(save_path)\n', (6098, 6109), True, 'import numpy as np\n'), ((6436, 6454), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (6443, 6454), True, 'import numpy as np\n'), ((6537, 6575), 'tensorflow.variable_scope', 'tf.variable_scope', (['op_name'], {'reuse': '(True)'}), '(op_name, reuse=True)\n', (6554, 6575), True, 'import tensorflow as tf\n'), ((3201, 3238), 'numpy.zeros', 'np.zeros', (['config.batch_size', 'np.int32'], {}), '(config.batch_size, np.int32)\n', (3209, 3238), True, 'import numpy as np\n'), ((3295, 3374), 'numpy.array', 'np.array', (['[pcl[b].sentence[-1] for pcl in partial_caption_data_lists]', 'np.int32'], {}), '([pcl[b].sentence[-1] for pcl in partial_caption_data_lists], np.int32)\n', (3303, 3374), True, 'import numpy as np\n'), ((4856, 4906), 'utils.misc.CaptionData', 'CaptionData', (['sentence', 'memory[k]', 'output[k]', 'score'], {}), '(sentence, memory[k], output[k], score)\n', (4867, 4906), False, 'from utils.misc import ImageLoader, CaptionData, TopN\n'), ((6706, 6733), 'tensorflow.get_variable', 'tf.get_variable', (['param_name'], {}), '(param_name)\n', (6721, 6733), True, 'import tensorflow as tf\n')] |
import nbformat as nbf
from glob import glob
import numpy as np
# Collect a list of all notebooks in the content folder
loc = "gmot/docs/**/*.ipynb"
print(f"Looking for notebooks in: {loc}\n")
notebooks = glob(loc, recursive=True)
# Text to look for in adding tags
text_search_dict = {
"# HIDDEN": "remove-cell", # Remove the whole cell
"# NO CODE": "remove-input", # Remove only the input
"# HIDE CODE": "hide-input", # Hide the input w/ a button to show
}
print("Modifying:")
# Search through each notebook and look for the text, add a tag if necessary
for ipath in notebooks:
ntbk = nbf.read(ipath, nbf.NO_CONVERT)
print(ipath)
for cell in ntbk.cells:
if cell.get("cell_type") == "code":
cell_tags = cell.get("metadata", {}).get("tags", [])
cell_tags = list(np.unique(cell_tags))
if "hide-input" not in cell_tags:
cell_tags.append("hide-input")
# for key, val in text_search_dict.items():
# if key in cell["source"]:
# if val not in cell_tags:
# cell_tags.append(val)
if len(cell_tags) > 0:
cell["metadata"]["tags"] = cell_tags
nbf.write(ntbk, ipath) | [
"nbformat.read",
"numpy.unique",
"nbformat.write",
"glob.glob"
] | [((206, 231), 'glob.glob', 'glob', (['loc'], {'recursive': '(True)'}), '(loc, recursive=True)\n', (210, 231), False, 'from glob import glob\n'), ((608, 639), 'nbformat.read', 'nbf.read', (['ipath', 'nbf.NO_CONVERT'], {}), '(ipath, nbf.NO_CONVERT)\n', (616, 639), True, 'import nbformat as nbf\n'), ((1226, 1248), 'nbformat.write', 'nbf.write', (['ntbk', 'ipath'], {}), '(ntbk, ipath)\n', (1235, 1248), True, 'import nbformat as nbf\n'), ((823, 843), 'numpy.unique', 'np.unique', (['cell_tags'], {}), '(cell_tags)\n', (832, 843), True, 'import numpy as np\n')] |
import functools
import operator
import sys
import warnings
import numbers
from collections import namedtuple
import inspect
import math
import numpy as np
try:
from numpy.random import Generator as Generator
except ImportError:
class Generator(): # type: ignore[no-redef]
pass
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
Examples
--------
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
... return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice, it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
cond = np.asarray(cond)
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
args = np.broadcast_arrays(cond, *arrays)
cond, arrays = args[0], args[1:]
temp = tuple(np.extract(cond, arr) for arr in arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = np.full(np.shape(arrays[0]), fill_value=fillvalue, dtype=tcode)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
def _lazyselect(condlist, choicelist, arrays, default=0):
"""
Mimic `np.select(condlist, choicelist)`.
Notice, it assumes that all `arrays` are of the same shape or can be
broadcasted together.
All functions in `choicelist` must accept array arguments in the order
given in `arrays` and must return an array of the same shape as broadcasted
`arrays`.
Examples
--------
>>> x = np.arange(6)
>>> np.select([x <3, x > 3], [x**2, x**3], default=0)
array([ 0, 1, 4, 0, 64, 125])
>>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
array([ 0., 1., 4., 0., 64., 125.])
>>> a = -np.ones_like(x)
>>> _lazyselect([x < 3, x > 3],
... [lambda x, a: x**2, lambda x, a: a * x**3],
... (x, a), default=np.nan)
array([ 0., 1., 4., nan, -64., -125.])
"""
arrays = np.broadcast_arrays(*arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)
for index in range(len(condlist)):
func, cond = choicelist[index], condlist[index]
if np.all(cond is False):
continue
cond, _ = np.broadcast_arrays(cond, arrays[0])
temp = tuple(np.extract(cond, arr) for arr in arrays)
np.place(out, cond, func(*temp))
return out
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""Allocate a new ndarray with aligned memory.
Primary use case for this currently is working around a f2py issue
in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does
not necessarily create arrays aligned up to it.
"""
dtype = np.dtype(dtype)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
def _prune_array(array):
"""Return an array equivalent to the input array. If the input
array is a view of a much larger array, copy its contents to a
newly allocated array. Otherwise, return the input unchanged.
"""
if array.base is not None and array.size < array.base.size // 2:
return array.copy()
return array
def prod(iterable):
"""
Product of a sequence of numbers.
Faster than np.prod for short lists like array shapes, and does
not overflow if using Python integers.
"""
product = 1
for x in iterable:
product *= x
return product
def float_factorial(n: int) -> float:
"""Compute the factorial and return as a float
Returns infinity when result is too large for a double
"""
return float(math.factorial(n)) if n < 171 else np.inf
class DeprecatedImport:
"""
Deprecated import with redirection and warning.
Examples
--------
Suppose you previously had in some module::
from foo import spam
If this has to be deprecated, do::
spam = DeprecatedImport("foo.spam", "baz")
to redirect users to use "baz" module instead.
"""
def __init__(self, old_module_name, new_module_name):
self._old_name = old_module_name
self._new_name = new_module_name
__import__(self._new_name)
self._mod = sys.modules[self._new_name]
def __dir__(self):
return dir(self._mod)
def __getattr__(self, name):
warnings.warn("Module %s is deprecated, use %s instead"
% (self._old_name, self._new_name),
DeprecationWarning)
return getattr(self._mod, name)
# copy-pasted from scikit-learn utils/validation.py
# change this to scipy.stats._qmc.check_random_state once numpy 1.16 is dropped
def check_random_state(seed):
"""Turn `seed` into a `np.random.RandomState` instance.
Parameters
----------
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
Random number generator.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
try:
# Generator is only available in numpy >= 1.17
if isinstance(seed, np.random.Generator):
return seed
except AttributeError:
pass
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _asarray_validated(a, check_finite=True,
sparse_ok=False, objects_ok=False, mask_ok=False,
as_inexact=False):
"""
Helper function for SciPy argument validation.
Many SciPy linear algebra functions do support arbitrary array-like
input arguments. Examples of commonly unsupported inputs include
matrices containing inf/nan, sparse matrix representations, and
matrices with complicated elements.
Parameters
----------
a : array_like
The array-like input.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
sparse_ok : bool, optional
True if scipy sparse matrices are allowed.
objects_ok : bool, optional
True if arrays with dype('O') are allowed.
mask_ok : bool, optional
True if masked arrays are allowed.
as_inexact : bool, optional
True to convert the input array to a np.inexact dtype.
Returns
-------
ret : ndarray
The converted validated array.
"""
if not sparse_ok:
import scipy.sparse
if scipy.sparse.issparse(a):
msg = ('Sparse matrices are not supported by this function. '
'Perhaps one of the scipy.sparse.linalg functions '
'would work instead.')
raise ValueError(msg)
if not mask_ok:
if np.ma.isMaskedArray(a):
raise ValueError('masked arrays are not supported')
toarray = np.asarray_chkfinite if check_finite else np.asarray
a = toarray(a)
if not objects_ok:
if a.dtype is np.dtype('O'):
raise ValueError('object arrays are not supported')
if as_inexact:
if not np.issubdtype(a.dtype, np.inexact):
a = toarray(a, dtype=np.float_)
return a
def _validate_int(k, name, minimum=None):
"""
Validate a scalar integer.
This functon can be used to validate an argument to a function
that expects the value to be an integer. It uses `operator.index`
to validate the value (so, for example, k=2.0 results in a
TypeError).
Parameters
----------
k : int
The value to be validated.
name : str
The name of the parameter.
minimum : int, optional
An optional lower bound.
"""
try:
k = operator.index(k)
except TypeError:
raise TypeError(f'{name} must be an integer.') from None
if minimum is not None and k < minimum:
raise ValueError(f'{name} must be an integer not less '
f'than {minimum}') from None
return k
# Add a replacement for inspect.getfullargspec()/
# The version below is borrowed from Django,
# https://github.com/django/django/pull/4846.
# Note an inconsistency between inspect.getfullargspec(func) and
# inspect.signature(func). If `func` is a bound method, the latter does *not*
# list `self` as a first argument, while the former *does*.
# Hence, cook up a common ground replacement: `getfullargspec_no_self` which
# mimics `inspect.getfullargspec` but does not list `self`.
#
# This way, the caller code does not need to know whether it uses a legacy
# .getfullargspec or a bright and shiny .signature.
FullArgSpec = namedtuple('FullArgSpec',
['args', 'varargs', 'varkw', 'defaults',
'kwonlyargs', 'kwonlydefaults', 'annotations'])
def getfullargspec_no_self(func):
"""inspect.getfullargspec replacement using inspect.signature.
If func is a bound method, do not list the 'self' parameter.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
kwonlydefaults, annotations)
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not*, included in fullargspec.args.
This is done for consistency between inspect.getargspec() under
Python 2.x, and inspect.signature() under Python 3.x.
"""
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY]
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = tuple(
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
) or None
kwonlyargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY
]
kwdefaults = {p.name: p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY and
p.default is not p.empty}
annotations = {p.name: p.annotation for p in sig.parameters.values()
if p.annotation is not p.empty}
return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
kwdefaults or None, annotations)
class MapWrapper:
"""
Parallelisation wrapper for working with map-like callables, such as
`multiprocessing.Pool.map`.
Parameters
----------
pool : int or map-like callable
If `pool` is an integer, then it specifies the number of threads to
use for parallelization. If ``int(pool) == 1``, then no parallel
processing is used and the map builtin is used.
If ``pool == -1``, then the pool will utilize all available CPUs.
If `pool` is a map-like callable that follows the same
calling sequence as the built-in map function, then this callable is
used for parallelization.
"""
def __init__(self, pool=1):
self.pool = None
self._mapfunc = map
self._own_pool = False
if callable(pool):
self.pool = pool
self._mapfunc = self.pool
else:
from multiprocessing import Pool
# user supplies a number
if int(pool) == -1:
# use as many processors as possible
self.pool = Pool()
self._mapfunc = self.pool.map
self._own_pool = True
elif int(pool) == 1:
pass
elif int(pool) > 1:
# use the number of processors requested
self.pool = Pool(processes=int(pool))
self._mapfunc = self.pool.map
self._own_pool = True
else:
raise RuntimeError("Number of workers specified must be -1,"
" an int >= 1, or an object with a 'map' "
"method")
def __enter__(self):
return self
def terminate(self):
if self._own_pool:
self.pool.terminate()
def join(self):
if self._own_pool:
self.pool.join()
def close(self):
if self._own_pool:
self.pool.close()
def __exit__(self, exc_type, exc_value, traceback):
if self._own_pool:
self.pool.close()
self.pool.terminate()
def __call__(self, func, iterable):
# only accept one iterable because that's all Pool.map accepts
try:
return self._mapfunc(func, iterable)
except TypeError as e:
# wrong number of arguments
raise TypeError("The map-like callable must be of the"
" form f(func, iterable)") from e
def rng_integers(gen, low, high=None, size=None, dtype='int64',
endpoint=False):
"""
Return random integers from low (inclusive) to high (exclusive), or if
endpoint=True, low (inclusive) to high (inclusive). Replaces
`RandomState.randint` (with endpoint=False) and
`RandomState.random_integers` (with endpoint=True).
Return random integers from the "discrete uniform" distribution of the
specified dtype. If high is None (the default), then results are from
0 to low.
Parameters
----------
gen : {None, np.random.RandomState, np.random.Generator}
Random number generator. If None, then the np.random.RandomState
singleton is used.
low : int or array-like of ints
Lowest (signed) integers to be drawn from the distribution (unless
high=None, in which case this parameter is 0 and this value is used
for high).
high : int or array-like of ints
If provided, one above the largest (signed) integer to be drawn from
the distribution (see above for behavior if high=None). If array-like,
must contain integer values.
size : None
Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
samples are drawn. Default is None, in which case a single value is
returned.
dtype : {str, dtype}, optional
Desired dtype of the result. All dtypes are determined by their name,
i.e., 'int64', 'int', etc, so byteorder is not available and a specific
precision may have different C types depending on the platform.
The default value is np.int_.
endpoint : bool, optional
If True, sample from the interval [low, high] instead of the default
[low, high) Defaults to False.
Returns
-------
out: int or ndarray of ints
size-shaped array of random integers from the appropriate distribution,
or a single such random int if size not provided.
"""
if isinstance(gen, Generator):
return gen.integers(low, high=high, size=size, dtype=dtype,
endpoint=endpoint)
else:
if gen is None:
# default is RandomState singleton used by np.random.
gen = np.random.mtrand._rand
if endpoint:
# inclusive of endpoint
# remember that low and high can be arrays, so don't modify in
# place
if high is None:
return gen.randint(low + 1, size=size, dtype=dtype)
if high is not None:
return gen.randint(low, high=high + 1, size=size, dtype=dtype)
# exclusive
return gen.randint(low, high=high, size=size, dtype=dtype)
| [
"numpy.mintypecode",
"inspect.signature",
"numpy.ma.isMaskedArray",
"numpy.random.RandomState",
"numpy.asarray",
"numpy.issubdtype",
"numpy.empty",
"warnings.warn",
"numpy.dtype",
"collections.namedtuple",
"operator.index",
"functools.reduce",
"math.factorial",
"numpy.shape",
"numpy.extr... | [((10402, 10520), 'collections.namedtuple', 'namedtuple', (['"""FullArgSpec"""', "['args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults',\n 'annotations']"], {}), "('FullArgSpec', ['args', 'varargs', 'varkw', 'defaults',\n 'kwonlyargs', 'kwonlydefaults', 'annotations'])\n", (10412, 10520), False, 'from collections import namedtuple\n'), ((838, 854), 'numpy.asarray', 'np.asarray', (['cond'], {}), '(cond)\n', (848, 854), True, 'import numpy as np\n'), ((1142, 1176), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['cond', '*arrays'], {}), '(cond, *arrays)\n', (1161, 1176), True, 'import numpy as np\n'), ((1285, 1331), 'numpy.mintypecode', 'np.mintypecode', (['[a.dtype.char for a in arrays]'], {}), '([a.dtype.char for a in arrays])\n', (1299, 1331), True, 'import numpy as np\n'), ((2497, 2525), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['*arrays'], {}), '(*arrays)\n', (2516, 2525), True, 'import numpy as np\n'), ((2538, 2584), 'numpy.mintypecode', 'np.mintypecode', (['[a.dtype.char for a in arrays]'], {}), '([a.dtype.char for a in arrays])\n', (2552, 2584), True, 'import numpy as np\n'), ((3312, 3327), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (3320, 3327), True, 'import numpy as np\n'), ((3521, 3557), 'numpy.empty', 'np.empty', (['(size + align + 1)', 'np.uint8'], {}), '(size + align + 1, np.uint8)\n', (3529, 3557), True, 'import numpy as np\n'), ((3840, 3882), 'numpy.ndarray', 'np.ndarray', (['shape', 'dtype', 'buf'], {'order': 'order'}), '(shape, dtype, buf, order=order)\n', (3850, 3882), True, 'import numpy as np\n'), ((11252, 11275), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (11269, 11275), False, 'import inspect\n'), ((1350, 1369), 'numpy.shape', 'np.shape', (['arrays[0]'], {}), '(arrays[0])\n', (1358, 1369), True, 'import numpy as np\n'), ((2603, 2622), 'numpy.shape', 'np.shape', (['arrays[0]'], {}), '(arrays[0])\n', (2611, 2622), True, 'import numpy as np\n'), ((2763, 2784), 'numpy.all', 'np.all', (['(cond is False)'], {}), '(cond is False)\n', (2769, 2784), True, 'import numpy as np\n'), ((2825, 2861), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['cond', 'arrays[0]'], {}), '(cond, arrays[0])\n', (2844, 2861), True, 'import numpy as np\n'), ((3456, 3493), 'functools.reduce', 'functools.reduce', (['operator.mul', 'shape'], {}), '(operator.mul, shape)\n', (3472, 3493), False, 'import functools\n'), ((5414, 5529), 'warnings.warn', 'warnings.warn', (["('Module %s is deprecated, use %s instead' % (self._old_name, self._new_name))", 'DeprecationWarning'], {}), "('Module %s is deprecated, use %s instead' % (self._old_name,\n self._new_name), DeprecationWarning)\n", (5427, 5529), False, 'import warnings\n'), ((6558, 6585), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (6579, 6585), True, 'import numpy as np\n'), ((8550, 8572), 'numpy.ma.isMaskedArray', 'np.ma.isMaskedArray', (['a'], {}), '(a)\n', (8569, 8572), True, 'import numpy as np\n'), ((9494, 9511), 'operator.index', 'operator.index', (['k'], {}), '(k)\n', (9508, 9511), False, 'import operator\n'), ((1232, 1253), 'numpy.extract', 'np.extract', (['cond', 'arr'], {}), '(cond, arr)\n', (1242, 1253), True, 'import numpy as np\n'), ((4708, 4725), 'math.factorial', 'math.factorial', (['n'], {}), '(n)\n', (4722, 4725), False, 'import math\n'), ((8769, 8782), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (8777, 8782), True, 'import numpy as np\n'), ((8882, 8916), 'numpy.issubdtype', 'np.issubdtype', (['a.dtype', 'np.inexact'], {}), '(a.dtype, np.inexact)\n', (8895, 8916), True, 'import numpy as np\n'), ((1484, 1506), 'numpy.extract', 'np.extract', (['(~cond)', 'arr'], {}), '(~cond, arr)\n', (1494, 1506), True, 'import numpy as np\n'), ((2883, 2904), 'numpy.extract', 'np.extract', (['cond', 'arr'], {}), '(cond, arr)\n', (2893, 2904), True, 'import numpy as np\n'), ((13615, 13621), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (13619, 13621), False, 'from multiprocessing import Pool\n')] |
import argparse
from selenium import webdriver
import pandas as pd
import os
from legiscrapor.legiskenya import legisKenya
import numpy as np
####### LET'S RUN A KENYA WEB CRAWL #######
####### SETUP #######
## ARGPARSE: args for this script.
parser = argparse.ArgumentParser(description='Extract PDFs of legislation from South African Parliament website.')
parser.add_argument('input',help='Path to input file')
args = parser.parse_args()
####### THE GOOD STUFF #######
new_kenya = legisKenya()
new_kenya.read_inputs(args.input)
new_kenya.checkers()
pd.options.display.max_colwidth = 1000
#keywords =[ 'judicial assistance']
all_hrefs = []
for k in new_kenya.keywords:
print(k)
hrefs = new_kenya.search_laws(k)
all_hrefs.append(hrefs)
all_hrefs = [item.split('&term=')[0] for sublist in all_hrefs for item in sublist]
## for Kenya, the hyperlinks have '&term=KEYWORD' at the end, which doesn't impact the final destination
## of the webpage. Removing it helps us find the unique documents, since sometimes the same document
## is found for two different keywords.
all_hrefs = np.unique(all_hrefs)
print(len(all_hrefs))
#print(all_hrefs)
new_kenya.get_pdfs(all_hrefs,path=new_kenya.downloadPath+'final',anotherLink=True)
specs = 'all-Kenya-laws'
matches_files = new_kenya.scan_pdfs(new_kenya.downloadPath+'final',new_kenya.keywords)
if len(matches_files) > 0:
print(matches_files)
new_kenya.print_matches(matches_files,specs)
## let's delete any files not moved into the final destination folder (which means they're duplicates):
new_kenya.delete_unneeded_files('duplicates-'+specs,[],path=new_kenya.downloadPath,moveNotDelete=True)
new_kenya.delete_no_matches(specs,path=new_kenya.downloadPath+'final',moveFiles=True)
new_kenya.teardown()
| [
"legiscrapor.legiskenya.legisKenya",
"numpy.unique",
"argparse.ArgumentParser"
] | [((258, 368), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract PDFs of legislation from South African Parliament website."""'}), "(description=\n 'Extract PDFs of legislation from South African Parliament website.')\n", (281, 368), False, 'import argparse\n'), ((493, 505), 'legiscrapor.legiskenya.legisKenya', 'legisKenya', ([], {}), '()\n', (503, 505), False, 'from legiscrapor.legiskenya import legisKenya\n'), ((1103, 1123), 'numpy.unique', 'np.unique', (['all_hrefs'], {}), '(all_hrefs)\n', (1112, 1123), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# plotband.py
#
# Simple script to visualize phonon dispersion relations
#
# Copyright (c) 2014 <NAME>
#
# This file is distributed under the terms of the MIT license.
# Please see the file 'LICENCE.txt' in the root directory
# or http://opensource.org/licenses/mit-license.php for information.
#
import numpy as np
import optparse
import matplotlib as mpl
import mpl_toolkits
from matplotlib.gridspec import GridSpec
try:
mpl.use("Qt5agg")
except:
pass
import matplotlib.pyplot as plt
# parser options
usage = "usage: %prog [options] file1.bands file2.bands ... "
parser = optparse.OptionParser(usage=usage)
parser.add_option("--nokey", action="store_false", dest="print_key", default=True,
help="don't print the key in the figure")
parser.add_option("-u", "--unit", action="store", type="string", dest="unitname", default="kayser",
help="print the band dispersion in units of UNIT. Available options are kayser, meV, and THz", metavar="UNIT")
parser.add_option("--emin", action="store", type="float", dest="emin",
help="minimum value of the energy axis")
parser.add_option("--emax", action="store", type="float", dest="emax",
help="maximum value of the energy axis")
parser.add_option("--normalize", action="store_true", dest="normalize_xaxis", default=False,
help="normalize the x axis to unity.")
# font styles
mpl.rc('font', **{'family': 'Times New Roman', 'sans-serif': ['Helvetica']})
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=16)
mpl.rc('axes', labelsize=16)
mpl.rc('lines', linewidth=1.5)
mpl.rc('legend', fontsize='small')
# line colors and styles
color = ['b', 'g', 'r', 'm', 'k', 'c', 'y', 'r']
lsty = ['-', '-', '-', '-', '--', '--', '--', '--']
bn = [0, 300, 400]
def get_kpath_and_kval(file_in):
ftmp = open(file_in, 'r')
kpath = ftmp.readline().rstrip('\n').split()
kval = ftmp.readline().rstrip('\n').split()
ftmp.close()
if kpath[0] == '#' and kval[0] == '#':
kval_float = [float(val) for val in kval[1:]]
kpath_list = []
for i in range(len(kpath[1:])):
if kpath[i + 1] == 'G':
kpath_list.append('$\Gamma$')
else:
kpath_list.append("$\mathrm{%s}$" % kpath[i + 1])
return kpath_list, kval_float
else:
return [], []
def change_scale(array, str_scale):
str_tmp = str_scale.lower()
if str_tmp == 'kayser':
print("Band structure will be shown in units of cm^{-1}")
return array
elif str_tmp == 'mev':
print("Band structure will be shown in units of meV")
kayser_to_mev = 0.0299792458 * 1.0e+12 * \
6.62606896e-34 / 1.602176565e-19 * 1000
for i in range(len(array)):
for j in range(len(array[i])):
for k in range(1, len(array[i][j])):
array[i][j][k] *= kayser_to_mev
return array
elif str_tmp == 'thz':
print("Band structure will be shown in units of THz")
kayser_to_thz = 0.0299792458
for i in range(len(array)):
for j in range(len(array[i])):
for k in range(1, len(array[i][j])):
array[i][j][k] *= kayser_to_thz
return array
else:
print("Unrecognizable option for --unit %s" % str_scale)
print("Band structure will be shown in units of cm^{-1}")
return array
def normalize_to_unity(array, array_axis):
for i in range(len(array)):
max_val = array[i][-1][0]
factor_normalize = 1.0 / max_val
for j in range(len(array[i])):
array[i][j][0] *= factor_normalize
max_val = array_axis[-1]
factor_normalize = 1.0 / max_val
for i in range(len(array_axis)):
array_axis[i] *= factor_normalize
return array, array_axis
def get_xy_minmax(array):
xmin, xmax, ymin, ymax = [0, 0, 0, 0]
for i in range(len(array)):
xtmp = array[i][-1][0]
xmax = max(xmax, xtmp)
for i in range(len(array)):
for j in range(len(array[i])):
for k in range(1, len(array[i][j])):
ytmp = array[i][j][k]
ymin = min(ymin, ytmp)
ymax = max(ymax, ytmp)
return xmin, xmax, ymin, ymax
def gridspec_setup(data_merged, xtickslabels, xticksvars):
xmaxs = []
xmins = []
xticks_grids = []
xticklabels_grids = []
xticklabels_tmp = []
xticks_tmp = []
for i in range(len(xtickslabels)):
if i == 0:
xmins.append(xticksvars[0])
else:
if xticksvars[i] == xticksvars[i-1]:
xmaxs.append(xticksvars[i - 1])
xmins.append(xticksvars[i])
xticks_grids.append(xticks_tmp)
xticklabels_grids.append(xticklabels_tmp)
xticklabels_tmp = []
xticks_tmp = []
xticklabels_tmp.append(xtickslabels[i])
xticks_tmp.append(xticksvars[i])
xticks_grids.append(xticks_tmp)
xticklabels_grids.append(xticklabels_tmp)
xmaxs.append(xticksvars[-1])
naxes = len(xticks_grids)
nfiles = len(data_merged)
data_all_axes = []
for i in range(naxes):
data_ax = []
xmin_ax = xmins[i]
xmax_ax = xmaxs[i]
for j in range(nfiles):
kval = np.array(data_merged[j][0:, 0])
ix_xmin_arr = np.where(kval <= xmin_ax)
ix_xmax_arr = np.where(kval >= xmax_ax)
if len(ix_xmin_arr[0]) > 0:
ix_xmin = int(ix_xmin_arr[0][-1])
else:
ix_xmin = 0
if len(ix_xmax_arr[0]) > 0:
ix_xmax = int(ix_xmax_arr[0][0])
else:
ix_xmax = -2
data_ax.append(data_merged[j][ix_xmin:(ix_xmax+1), :])
data_all_axes.append(data_ax)
return naxes, xticks_grids, xticklabels_grids, xmins, xmaxs, data_all_axes
def preprocess_data(files, unitname, normalize_xaxis):
xtickslabels, xticksvars = get_kpath_and_kval(files[0])
data_merged = []
for file in files:
data_tmp = np.loadtxt(file, dtype=float)
data_merged.append(data_tmp)
data_merged = change_scale(data_merged, unitname)
if normalize_xaxis:
data_merged, xticksvars = normalize_to_unity(data_merged, xticksvars)
xmin, xmax, ymin, ymax = get_xy_minmax(data_merged)
if options.emin is None and options.emax is None:
factor = 1.05
ymin *= factor
ymax *= factor
else:
if options.emin is not None:
ymin = options.emin
if options.emax is not None:
ymax = options.emax
if ymin > ymax:
print("Warning: emin > emax")
naxes, xticks_grids, xticklabels_grids, xmins, xmaxs, data_merged_grids \
= gridspec_setup(data_merged, xtickslabels, xticksvars)
return naxes, xticks_grids, xticklabels_grids, \
xmins, xmaxs, ymin, ymax, data_merged_grids
def run_plot(nax, xticks_ax, xticklabels_ax, xmin_ax, xmax_ax, ymin, ymax, data_merged_ax):
fig = plt.figure()
width_ratios = []
used_colors = []
for xmin, xmax in zip(xmin_ax, xmax_ax):
width_ratios.append(xmax - xmin)
gs = GridSpec(nrows=1, ncols=nax, width_ratios=width_ratios)
gs.update(wspace=0.1)
for iax in range(nax):
ax = plt.subplot(gs[iax])
for i in range(len(data_merged_ax[iax])):
if len(data_merged_ax[iax][i]) > 0:
ax.plot(data_merged_ax[iax][i][0:, 0], data_merged_ax[iax][i][0:, 1],
linestyle=lsty[i], color=color[i], label=files[i])
used_colors.append(color[i])
for j in range(2, len(data_merged_ax[iax][i][0][0:])):
ax.plot(data_merged_ax[iax][i][0:, 0], data_merged_ax[iax][i][0:, j],
linestyle=lsty[i], color=color[i])
# # ax_0 = plt.subplot(gs[0,:])
# # ax_0.set_axis_off()
# cmp = mpl.colors.ListedColormap(used_colors)
# norm = mpl.colors.BoundaryNorm(boundaries=bn, ncolors=cmp.N)
# #fig.subplots_adjust(top=0.9)
#
# p0 = ax.get_position().get_points().flatten()
# # l, b, w = [0.15, 0.92, 0.7]
# # cax = fig.add_axes([0.15, 0.9, 0.2, 0.05])
# # cax = fig.add_axes([p0[0], 1, p0[2]-p0[0], 0.05])
# # cb_ax = mpl_toolkits.axes_grid1.inset_locator.inset_axes(ax, loc=3)
# cb =fig.colorbar(mappable=mpl.cm.ScalarMappable(norm=norm, cmap=cmp), use_gridspec=False,
# ax=ax, orientation='horizontal',shrink=1, fraction=0.1, pad=0.1)
if iax == 0:
if options.unitname.lower() == "mev":
ax.set_ylabel("Frequency (meV)", labelpad=20)
elif options.unitname.lower() == "thz":
ax.set_ylabel("Frequency (THz)", labelpad=20)
else:
ax.set_ylabel("Frequency (cm${}^{-1}$)", labelpad=10)
else:
ax.set_yticklabels([])
ax.set_yticks([])
plt.axis([xmin_ax[iax], xmax_ax[iax], ymin, ymax])
ax.set_xticks(xticks_ax[iax])
ax.set_xticklabels(xticklabels_ax[iax])
ax.xaxis.grid(True, linestyle='-')
if options.print_key and iax == 0:
ax.legend(loc='best', prop={'size': 10})
plt.show()
if __name__ == '__main__':
'''
Simple script for visualizing phonon dispersion relations.
Usage:
$ python plot_band.py [options] file1.bands file2.bands ...
For details of available options, please type
$ python plot_band.py -h
'''
"""
Test arguments:
./BrCsSn/scfph/300/pc_disp.bands
./BrCsSn/scfph/400/pc_disp.bands
"""
fakeArgs = ['-u', 'Thz', './BrCsSn/phonons/pc_disp.bands', './Br3Ca1Cs1/phonons/pc_disp.bands',]
options, args = parser.parse_args(fakeArgs)
files = args[0:]
nfiles = len(files)
if nfiles == 0:
print("Usage: plotband.py [options] file1.bands file2.bands ...")
print("For details of available options, please type\n$ python plotband.py -h")
exit(1)
else:
print("Number of files = %d" % nfiles)
nax, xticks_ax, xticklabels_ax, xmin_ax, xmax_ax, ymin, ymax, \
data_merged_ax = preprocess_data(
files, options.unitname, options.normalize_xaxis)
run_plot(nax, xticks_ax, xticklabels_ax,
xmin_ax, xmax_ax, ymin, ymax, data_merged_ax) | [
"matplotlib.use",
"numpy.where",
"optparse.OptionParser",
"numpy.array",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure",
"matplotlib.rc",
"matplotlib.pyplot.axis",
"numpy.loadtxt",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((634, 668), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (655, 668), False, 'import optparse\n'), ((1483, 1559), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **{'family': 'Times New Roman', 'sans-serif': ['Helvetica']})\n", (1489, 1559), True, 'import matplotlib as mpl\n'), ((1561, 1590), 'matplotlib.rc', 'mpl.rc', (['"""xtick"""'], {'labelsize': '(12)'}), "('xtick', labelsize=12)\n", (1567, 1590), True, 'import matplotlib as mpl\n'), ((1592, 1621), 'matplotlib.rc', 'mpl.rc', (['"""ytick"""'], {'labelsize': '(16)'}), "('ytick', labelsize=16)\n", (1598, 1621), True, 'import matplotlib as mpl\n'), ((1623, 1651), 'matplotlib.rc', 'mpl.rc', (['"""axes"""'], {'labelsize': '(16)'}), "('axes', labelsize=16)\n", (1629, 1651), True, 'import matplotlib as mpl\n'), ((1653, 1683), 'matplotlib.rc', 'mpl.rc', (['"""lines"""'], {'linewidth': '(1.5)'}), "('lines', linewidth=1.5)\n", (1659, 1683), True, 'import matplotlib as mpl\n'), ((1685, 1719), 'matplotlib.rc', 'mpl.rc', (['"""legend"""'], {'fontsize': '"""small"""'}), "('legend', fontsize='small')\n", (1691, 1719), True, 'import matplotlib as mpl\n'), ((471, 488), 'matplotlib.use', 'mpl.use', (['"""Qt5agg"""'], {}), "('Qt5agg')\n", (478, 488), True, 'import matplotlib as mpl\n'), ((7406, 7418), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7416, 7418), True, 'import matplotlib.pyplot as plt\n'), ((7566, 7621), 'matplotlib.gridspec.GridSpec', 'GridSpec', ([], {'nrows': '(1)', 'ncols': 'nax', 'width_ratios': 'width_ratios'}), '(nrows=1, ncols=nax, width_ratios=width_ratios)\n', (7574, 7621), False, 'from matplotlib.gridspec import GridSpec\n'), ((9733, 9743), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9741, 9743), True, 'import matplotlib.pyplot as plt\n'), ((6402, 6431), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'dtype': 'float'}), '(file, dtype=float)\n', (6412, 6431), True, 'import numpy as np\n'), ((7695, 7715), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[iax]'], {}), '(gs[iax])\n', (7706, 7715), True, 'import matplotlib.pyplot as plt\n'), ((9443, 9493), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin_ax[iax], xmax_ax[iax], ymin, ymax]'], {}), '([xmin_ax[iax], xmax_ax[iax], ymin, ymax])\n', (9451, 9493), True, 'import matplotlib.pyplot as plt\n'), ((5594, 5625), 'numpy.array', 'np.array', (['data_merged[j][0:, 0]'], {}), '(data_merged[j][0:, 0])\n', (5602, 5625), True, 'import numpy as np\n'), ((5653, 5678), 'numpy.where', 'np.where', (['(kval <= xmin_ax)'], {}), '(kval <= xmin_ax)\n', (5661, 5678), True, 'import numpy as np\n'), ((5706, 5731), 'numpy.where', 'np.where', (['(kval >= xmax_ax)'], {}), '(kval >= xmax_ax)\n', (5714, 5731), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def build_data():
train = np.load('./data/train.npy')
test = np.load('./data/test.npy')
return train, test
def distance(v1, v2):
"""
:param v1: array
:param v2: array
:return: dist
"""
dist = np.sqrt(np.sum(np.power((v1 - v2), 2)))
return dist
def knn_owns(train, test, k):
true_num = 0
for i in range(test.shape[0]):
arr_dist = np.zeros(shape=(train.shape[0], 2))
for j in range(train.shape[0]):
dist = distance(test[i, :1024], train[j, :1024])
arr_dist[j, :] = dist, train[j, -1]
df = pd.DataFrame(data=arr_dist, columns=['dist', 'target'])
mode = df.sort_values(by='dist')['target'].head(k).mode()[0]
if mode == test[i, -1]:
true_num += 1
score = true_num / test.shape[0]
return score
def show_res(k_list, score_list):
fig = plt.figure()
# 修改RC参数,来让其支持中文
plt.rcParams['font.sans-serif'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False
plt.plot(k_list, score_list, color='r', linestyle='-.', linewidth=1.2, marker="o", markersize=7,
markerfacecolor='r', markeredgecolor='r')
plt.title('手写字knn算法预测准确率走势图')
plt.xlabel('k值')
plt.ylabel('准确率')
plt.xticks(k_list)
for i, j in zip(k_list, score_list):
plt.text(i, j, "%.3f" % j, horizontalalignment='center')
# plt.savefig('手写字knn算法预测准确率走势图.png')
plt.show()
def main():
# 1、加载数据
train, test = build_data()
print(train)
print(train.shape)
print(test)
print(test.shape)
# 2、算法预测
# 自己实现knn_owns算法
k_list = list(range(5, 15))
score_list = []
for k in k_list:
score = knn_owns(train, test, k)
score_list.append(score)
# 3、结果展示
print(score_list)
show_res(k_list, score_list)
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.zeros",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.show"
] | [((103, 130), 'numpy.load', 'np.load', (['"""./data/train.npy"""'], {}), "('./data/train.npy')\n", (110, 130), True, 'import numpy as np\n'), ((142, 168), 'numpy.load', 'np.load', (['"""./data/test.npy"""'], {}), "('./data/test.npy')\n", (149, 168), True, 'import numpy as np\n'), ((943, 955), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (953, 955), True, 'import matplotlib.pyplot as plt\n'), ((1075, 1217), 'matplotlib.pyplot.plot', 'plt.plot', (['k_list', 'score_list'], {'color': '"""r"""', 'linestyle': '"""-."""', 'linewidth': '(1.2)', 'marker': '"""o"""', 'markersize': '(7)', 'markerfacecolor': '"""r"""', 'markeredgecolor': '"""r"""'}), "(k_list, score_list, color='r', linestyle='-.', linewidth=1.2,\n marker='o', markersize=7, markerfacecolor='r', markeredgecolor='r')\n", (1083, 1217), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1260), 'matplotlib.pyplot.title', 'plt.title', (['"""手写字knn算法预测准确率走势图"""'], {}), "('手写字knn算法预测准确率走势图')\n", (1240, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1265, 1281), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k值"""'], {}), "('k值')\n", (1275, 1281), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1303), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""准确率"""'], {}), "('准确率')\n", (1296, 1303), True, 'import matplotlib.pyplot as plt\n'), ((1308, 1326), 'matplotlib.pyplot.xticks', 'plt.xticks', (['k_list'], {}), '(k_list)\n', (1318, 1326), True, 'import matplotlib.pyplot as plt\n'), ((1479, 1489), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1487, 1489), True, 'import matplotlib.pyplot as plt\n'), ((462, 497), 'numpy.zeros', 'np.zeros', ([], {'shape': '(train.shape[0], 2)'}), '(shape=(train.shape[0], 2))\n', (470, 497), True, 'import numpy as np\n'), ((660, 715), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'arr_dist', 'columns': "['dist', 'target']"}), "(data=arr_dist, columns=['dist', 'target'])\n", (672, 715), True, 'import pandas as pd\n'), ((1376, 1432), 'matplotlib.pyplot.text', 'plt.text', (['i', 'j', "('%.3f' % j)"], {'horizontalalignment': '"""center"""'}), "(i, j, '%.3f' % j, horizontalalignment='center')\n", (1384, 1432), True, 'import matplotlib.pyplot as plt\n'), ((318, 338), 'numpy.power', 'np.power', (['(v1 - v2)', '(2)'], {}), '(v1 - v2, 2)\n', (326, 338), True, 'import numpy as np\n')] |
import numpy as np
import h5py
import time
import logging
from utilities import calculate_scalar, scale
import config
class DataGenerator(object):
def __init__(self, hdf5_path, batch_size, holdout_fold, seed=1234):
"""
Inputs:
hdf5_path: str
batch_size: int
holdout_fold: int
seed: int, random seed
"""
self.batch_size = batch_size
self.holdout_fold = holdout_fold
self.random_state = np.random.RandomState(seed)
self.validate_random_state = np.random.RandomState(0)
# Load data
load_time = time.time()
hf = h5py.File(hdf5_path, 'r')
self.audio_names = np.array([s.decode() for s in hf['audio_name'][:]])
self.x = hf['mixture_logmel'][:]
self.y = hf['target'][:]
self.folds = hf['fold'][:]
hf.close()
logging.info('Loading data time: {:.3f} s'.format(
time.time() - load_time))
# Split data to training and validation
self.train_audio_indexes, self.validate_audio_indexes = \
self.get_train_validate_audio_indexes()
# Calculate scalar
(self.mean, self.std) = calculate_scalar(
self.x[self.train_audio_indexes])
def get_train_validate_audio_indexes(self):
audio_indexes = np.arange(len(self.audio_names))
train_audio_indexes = audio_indexes[self.folds != self.holdout_fold]
validate_audio_indexes = audio_indexes[self.folds == self.holdout_fold]
return train_audio_indexes, validate_audio_indexes
def generate_train(self):
"""Generate mini-batch data for training.
Returns:
batch_x: (batch_size, seq_len, freq_bins)
batch_y: (batch_size,)
"""
batch_size = self.batch_size
audio_indexes = np.array(self.train_audio_indexes)
audios_num = len(audio_indexes)
self.random_state.shuffle(audio_indexes)
iteration = 0
pointer = 0
while True:
# Reset pointer
if pointer >= audios_num:
pointer = 0
self.random_state.shuffle(audio_indexes)
# Get batch indexes
batch_audio_indexes = audio_indexes[pointer: pointer + batch_size]
pointer += batch_size
iteration += 1
batch_x = self.x[batch_audio_indexes]
batch_y = self.y[batch_audio_indexes]
# Transform data
batch_x = self.transform(batch_x)
batch_y = batch_y.astype(np.float32)
yield batch_x, batch_y
def generate_validate(self, data_type, shuffle, max_iteration=None):
"""Generate mini-batch data for evaluation.
Args:
data_type: 'train' | 'validate'
max_iteration: int, maximum iteration for validation
shuffle: bool
Returns:
batch_x: (batch_size, seq_len, freq_bins)
batch_y: (batch_size,)
batch_audio_names: (batch_size,)
"""
batch_size = self.batch_size
if data_type == 'train':
audio_indexes = np.array(self.train_audio_indexes)
elif data_type == 'validate':
audio_indexes = np.array(self.validate_audio_indexes)
else:
raise Exception('Invalid data_type!')
if shuffle:
self.validate_random_state.shuffle(audio_indexes)
audios_num = len(audio_indexes)
iteration = 0
pointer = 0
while True:
if iteration == max_iteration:
break
# Reset pointer
if pointer >= audios_num:
break
# Get batch indexes
batch_audio_indexes = audio_indexes[
pointer: pointer + batch_size]
pointer += batch_size
iteration += 1
batch_x = self.x[batch_audio_indexes]
batch_y = self.y[batch_audio_indexes]
batch_audio_names = self.audio_names[batch_audio_indexes]
# Transform data
batch_x = self.transform(batch_x)
batch_y = batch_y.astype(np.float32)
yield batch_x, batch_y, batch_audio_names
def transform(self, x):
"""Transform data.
Args:
x: (batch_x, seq_len, freq_bins) | (seq_len, freq_bins)
Returns:
Transformed data.
"""
return scale(x, self.mean, self.std)
class InferenceDataGenerator(DataGenerator):
def __init__(self, hdf5_path, batch_size, holdout_fold):
"""Data generator for test data.
Inputs:
dev_hdf5_path: str
test_hdf5_path: str
batch_size: int
"""
super(InferenceDataGenerator, self).__init__(
hdf5_path=hdf5_path,
batch_size=batch_size,
holdout_fold=holdout_fold)
# Load stft data
load_time = time.time()
hf = h5py.File(hdf5_path, 'r')
self.hf = hf
logging.info('Loading data time: {:.3f} s'.format(
time.time() - load_time))
def generate_test(self):
audios_num = len(self.test_x)
audio_indexes = np.arange(audios_num)
batch_size = self.batch_size
pointer = 0
while True:
# Reset pointer
if pointer >= audios_num:
break
# Get batch indexes
batch_audio_indexes = audio_indexes[pointer: pointer + batch_size]
pointer += batch_size
batch_x = self.test_x[batch_audio_indexes]
batch_audio_names = self.test_audio_names[batch_audio_indexes]
# Transform data
batch_x = self.transform(batch_x)
yield batch_x, batch_audio_names
def get_events_scene_mixture_stft(self, audio_name):
index = np.where(self.audio_names == audio_name)[0][0]
events_stft = self.hf['events_stft'][index]
scene_stft = self.hf['scene_stft'][index]
mixture_stft = self.hf['mixture_stft'][index]
return events_stft, scene_stft, mixture_stft | [
"numpy.arange",
"numpy.where",
"utilities.calculate_scalar",
"h5py.File",
"numpy.array",
"utilities.scale",
"time.time",
"numpy.random.RandomState"
] | [((483, 510), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (504, 510), True, 'import numpy as np\n'), ((548, 572), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (569, 572), True, 'import numpy as np\n'), ((614, 625), 'time.time', 'time.time', ([], {}), '()\n', (623, 625), False, 'import time\n'), ((639, 664), 'h5py.File', 'h5py.File', (['hdf5_path', '"""r"""'], {}), "(hdf5_path, 'r')\n", (648, 664), False, 'import h5py\n'), ((1207, 1257), 'utilities.calculate_scalar', 'calculate_scalar', (['self.x[self.train_audio_indexes]'], {}), '(self.x[self.train_audio_indexes])\n', (1223, 1257), False, 'from utilities import calculate_scalar, scale\n'), ((1903, 1937), 'numpy.array', 'np.array', (['self.train_audio_indexes'], {}), '(self.train_audio_indexes)\n', (1911, 1937), True, 'import numpy as np\n'), ((4577, 4606), 'utilities.scale', 'scale', (['x', 'self.mean', 'self.std'], {}), '(x, self.mean, self.std)\n', (4582, 4606), False, 'from utilities import calculate_scalar, scale\n'), ((5126, 5137), 'time.time', 'time.time', ([], {}), '()\n', (5135, 5137), False, 'import time\n'), ((5151, 5176), 'h5py.File', 'h5py.File', (['hdf5_path', '"""r"""'], {}), "(hdf5_path, 'r')\n", (5160, 5176), False, 'import h5py\n'), ((5414, 5435), 'numpy.arange', 'np.arange', (['audios_num'], {}), '(audios_num)\n', (5423, 5435), True, 'import numpy as np\n'), ((3227, 3261), 'numpy.array', 'np.array', (['self.train_audio_indexes'], {}), '(self.train_audio_indexes)\n', (3235, 3261), True, 'import numpy as np\n'), ((3329, 3366), 'numpy.array', 'np.array', (['self.validate_audio_indexes'], {}), '(self.validate_audio_indexes)\n', (3337, 3366), True, 'import numpy as np\n'), ((6131, 6171), 'numpy.where', 'np.where', (['(self.audio_names == audio_name)'], {}), '(self.audio_names == audio_name)\n', (6139, 6171), True, 'import numpy as np\n'), ((954, 965), 'time.time', 'time.time', ([], {}), '()\n', (963, 965), False, 'import time\n'), ((5279, 5290), 'time.time', 'time.time', ([], {}), '()\n', (5288, 5290), False, 'import time\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from astropy.tests.helper import pytest
import os.path as op
import itertools
import numpy as np
from astropy.table import Table
import warnings
from astropy.utils.exceptions import AstropyUserWarning
from numpy.testing import assert_allclose
from ..findstars import daofind, irafstarfind
from photutils.datasets import make_100gaussians_image
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
import skimage
HAS_SKIMAGE = True
except ImportError:
HAS_SKIMAGE = False
DATA = make_100gaussians_image()
THRESHOLDS = [8.0, 10.0]
FWHMS = [1.0, 1.5, 2.0]
warnings.simplefilter('always', AstropyUserWarning)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.skipif('not HAS_SKIMAGE')
class TestDAOFind(object):
@pytest.mark.parametrize(('threshold', 'fwhm'),
list(itertools.product(THRESHOLDS, FWHMS)))
def test_daofind(self, threshold, fwhm):
t = daofind(DATA, threshold, fwhm, sigma_radius=1.5)
datafn = ('daofind_test_thresh{0:04.1f}_fwhm{1:04.1f}'
'.txt'.format(threshold, fwhm))
datafn = op.join(op.dirname(op.abspath(__file__)), 'data', datafn)
t_ref = Table.read(datafn, format='ascii')
assert_allclose(np.array(t).astype(np.float),
np.array(t_ref).astype(np.float))
def test_daofind_include_border(self):
t = daofind(DATA, threshold=10, fwhm=2, sigma_radius=1.5,
exclude_border=False)
assert len(t) == 20
def test_daofind_exclude_border(self):
t = daofind(DATA, threshold=10, fwhm=2, sigma_radius=1.5,
exclude_border=True)
assert len(t) == 19
def test_daofind_nosources(self):
data = np.ones((3, 3))
t = daofind(data, threshold=10, fwhm=1)
assert len(t) == 0
def test_daofind_sharpness(self):
"""Sources found, but none pass the sharpness criteria."""
t = daofind(DATA, threshold=50, fwhm=1.0, sharplo=1.)
assert len(t) == 0
def test_daofind_roundness(self):
"""Sources found, but none pass the roundness criteria."""
t = daofind(DATA, threshold=50, fwhm=1.0, roundlo=1.)
assert len(t) == 0
def test_daofind_flux_negative(self):
"""Test handling of negative flux (here created by large sky)."""
data = np.ones((5, 5))
data[2, 2] = 10.
t = daofind(data, threshold=0.1, fwhm=1.0, sky=10)
assert not np.isfinite(t['mag'])
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.skipif('not HAS_SKIMAGE')
class TestIRAFStarFind(object):
@pytest.mark.parametrize(('threshold', 'fwhm'),
list(itertools.product(THRESHOLDS, FWHMS)))
def test_irafstarfind(self, threshold, fwhm):
t = irafstarfind(DATA, threshold, fwhm, sigma_radius=1.5)
datafn = ('irafstarfind_test_thresh{0:04.1f}_fwhm{1:04.1f}'
'.txt'.format(threshold, fwhm))
datafn = op.join(op.dirname(op.abspath(__file__)), 'data', datafn)
t_ref = Table.read(datafn, format='ascii')
assert_allclose(np.array(t).astype(np.float),
np.array(t_ref).astype(np.float))
def test_irafstarfind_nosources(self):
data = np.ones((3, 3))
t = irafstarfind(data, threshold=10, fwhm=1)
assert len(t) == 0
def test_irafstarfind_sharpness(self):
"""Sources found, but none pass the sharpness criteria."""
t = irafstarfind(DATA, threshold=50, fwhm=1.0, sharplo=2.)
assert len(t) == 0
def test_irafstarfind_roundness(self):
"""Sources found, but none pass the roundness criteria."""
t = irafstarfind(DATA, threshold=50, fwhm=1.0, roundlo=1.)
assert len(t) == 0
def test_irafstarfind_sky(self):
t = irafstarfind(DATA, threshold=25.0, fwhm=2.0, sky=10.)
assert len(t) == 4
def test_irafstarfind_largesky(self):
t = irafstarfind(DATA, threshold=25.0, fwhm=2.0, sky=100.)
assert len(t) == 0
| [
"numpy.ones",
"astropy.tests.helper.pytest.mark.skipif",
"itertools.product",
"photutils.datasets.make_100gaussians_image",
"numpy.array",
"numpy.isfinite",
"warnings.simplefilter",
"os.path.abspath",
"astropy.table.Table.read"
] | [((704, 729), 'photutils.datasets.make_100gaussians_image', 'make_100gaussians_image', ([], {}), '()\n', (727, 729), False, 'from photutils.datasets import make_100gaussians_image\n'), ((779, 830), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'AstropyUserWarning'], {}), "('always', AstropyUserWarning)\n", (800, 830), False, 'import warnings\n'), ((834, 869), 'astropy.tests.helper.pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_SCIPY"""'], {}), "('not HAS_SCIPY')\n", (852, 869), False, 'from astropy.tests.helper import pytest\n'), ((871, 908), 'astropy.tests.helper.pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_SKIMAGE"""'], {}), "('not HAS_SKIMAGE')\n", (889, 908), False, 'from astropy.tests.helper import pytest\n'), ((2688, 2723), 'astropy.tests.helper.pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_SCIPY"""'], {}), "('not HAS_SCIPY')\n", (2706, 2723), False, 'from astropy.tests.helper import pytest\n'), ((2725, 2762), 'astropy.tests.helper.pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_SKIMAGE"""'], {}), "('not HAS_SKIMAGE')\n", (2743, 2762), False, 'from astropy.tests.helper import pytest\n'), ((1371, 1405), 'astropy.table.Table.read', 'Table.read', (['datafn'], {'format': '"""ascii"""'}), "(datafn, format='ascii')\n", (1381, 1405), False, 'from astropy.table import Table\n'), ((1931, 1946), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (1938, 1946), True, 'import numpy as np\n'), ((2544, 2559), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (2551, 2559), True, 'import numpy as np\n'), ((3245, 3279), 'astropy.table.Table.read', 'Table.read', (['datafn'], {'format': '"""ascii"""'}), "(datafn, format='ascii')\n", (3255, 3279), False, 'from astropy.table import Table\n'), ((3451, 3466), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (3458, 3466), True, 'import numpy as np\n'), ((1022, 1058), 'itertools.product', 'itertools.product', (['THRESHOLDS', 'FWHMS'], {}), '(THRESHOLDS, FWHMS)\n', (1039, 1058), False, 'import itertools\n'), ((2663, 2684), 'numpy.isfinite', 'np.isfinite', (["t['mag']"], {}), "(t['mag'])\n", (2674, 2684), True, 'import numpy as np\n'), ((2881, 2917), 'itertools.product', 'itertools.product', (['THRESHOLDS', 'FWHMS'], {}), '(THRESHOLDS, FWHMS)\n', (2898, 2917), False, 'import itertools\n'), ((1316, 1336), 'os.path.abspath', 'op.abspath', (['__file__'], {}), '(__file__)\n', (1326, 1336), True, 'import os.path as op\n'), ((3190, 3210), 'os.path.abspath', 'op.abspath', (['__file__'], {}), '(__file__)\n', (3200, 3210), True, 'import os.path as op\n'), ((1430, 1441), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (1438, 1441), True, 'import numpy as np\n'), ((1484, 1499), 'numpy.array', 'np.array', (['t_ref'], {}), '(t_ref)\n', (1492, 1499), True, 'import numpy as np\n'), ((3304, 3315), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (3312, 3315), True, 'import numpy as np\n'), ((3358, 3373), 'numpy.array', 'np.array', (['t_ref'], {}), '(t_ref)\n', (3366, 3373), True, 'import numpy as np\n')] |
# playgui.py
# Source: https://github.com/DrGFreeman/rps-cv
#
# MIT License
#
# Copyright (c) 2017-2019 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file is the main program to run to play the Rock-Paper-Scissors game
# with the pygame graphical user interface (GUI).
import pickle
import random
import sys
import time
import pygame as pg
import pygame.locals
import numpy as np
import cv2
from rpscv import utils
from rpscv import imgproc as imp
from rpscv.gui import RPSGUI
def saveImage(img, gesture, notify=False):
# Define image path and filename
folder = utils.imgPathsRaw[gesture]
name = utils.gestureTxt[gesture] + '-' + time.strftime('%Y%m%d-%H%M%S')
extension = '.png'
if notify:
print('Saving {}'.format(folder + name + extension))
# Save image
cv2.imwrite(folder + name + extension, img)
if __name__ == '__main__':
"""Launches the Rock-Paper-Scissors game with a graphical interface
Command line arguments:
privacy: will display the privacy notice at beginning of game
loop: will launch a new game once current game is over."""
try:
# Initialize game mode variables
privacy = False
loop = False
# Read command line arguments
argv = sys.argv
argv.pop(0)
if len(sys.argv) > 0:
for arg in argv:
if arg == 'privacy':
privacy = True
elif arg == 'loop':
loop = True
else:
print('{} is not a recognized argument'.format(arg))
# Load classifier from pickle file
filename = 'clf.pkl'
with open(filename, 'rb') as f:
clf = pickle.load(f)
# Create camera object with pre-defined settings
cam = utils.cameraSetup()
# Initialize last gesture value
lastGesture = -1
# Define score at which game ends
endScore = 5
# Initialize GUI
gui = RPSGUI(privacy=privacy, loop=loop)
# Load static images for computer gestures
coImgs = {}
img = cv2.imread('img/gui/rock.png', cv2.IMREAD_COLOR)
coImgs[utils.ROCK] = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.imread('img/gui/paper.png', cv2.IMREAD_COLOR)
coImgs[utils.PAPER] = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.imread('img/gui/scissors.png',
cv2.IMREAD_COLOR)
coImgs[utils.SCISSORS] = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Load green image
greenImg = cv2.imread('img/gui/green.png', cv2.IMREAD_COLOR)
greenImg = cv2.cvtColor(greenImg, cv2.COLOR_BGR2RGB)
notify = False
while True:
# Get image from camera
img = imp.crop(cam.getOpenCVImage())
# Convert image to RGB (from BGR)
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Set player image to imgRGB
gui.setPlImg(imgRGB)
# Get grayscale image
gray = imp.getGray(imgRGB, threshold=17)
# Count non-background pixels
nonZero = np.count_nonzero(gray)
# Define waiting time
waitTime = 0
# Parameters for saving new images
gesture = None
# Check if player hand is present
if nonZero > 9000:
# Predict gesture
predGesture = clf.predict([gray])[0]
if predGesture == lastGesture:
successive += 1
else:
successive = 0
if successive == 2:
print('Player: {}'.format(utils.gestureTxt[predGesture]))
waitTime = 3000
gesture = predGesture
# Computer gesture
computerGesture = random.randint(0,2)
print('Computer: {}'.format(utils.gestureTxt[computerGesture]))
# Set computer image to computer gesture
gui.setCoImg(coImgs[computerGesture])
diff = computerGesture - predGesture
if diff in [-2, 1]:
print('Computer wins!')
gui.setWinner('computer')
elif diff in [-1, 2]:
print('Player wins!')
gui.setWinner('player')
else:
print('Tie')
gui.setWinner('tie')
print('Score: player {}, computer {}\n'.format(gui.plScore,
gui.coScore))
lastGesture = predGesture
else:
lastGesture = -1
# Set computer image to green
gui.setCoImg(greenImg)
gui.setWinner()
# Draw GUI
gui.draw()
# Flip pygame display
pg.display.flip()
# Wait
pg.time.wait(waitTime)
if gesture is not None:
# Save new image
saveImage(img, gesture, notify)
# Check pygame events
for event in pg.event.get():
if event.type == pg.locals.QUIT:
gui.quit()
# Check if scores reach endScore (end of game)
if gui.plScore == endScore or gui.coScore == endScore:
if gui.coScore > gui.plScore:
print('Game over, computer wins...\n')
else:
print('Game over, player wins!!!\n')
gui.gameOver()
finally:
f.close()
cam.close()
| [
"cv2.imwrite",
"pygame.event.get",
"pygame.time.wait",
"pygame.display.flip",
"time.strftime",
"pickle.load",
"numpy.count_nonzero",
"rpscv.imgproc.getGray",
"cv2.cvtColor",
"rpscv.utils.cameraSetup",
"cv2.imread",
"random.randint",
"rpscv.gui.RPSGUI"
] | [((1837, 1880), 'cv2.imwrite', 'cv2.imwrite', (['(folder + name + extension)', 'img'], {}), '(folder + name + extension, img)\n', (1848, 1880), False, 'import cv2\n'), ((1684, 1714), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (1697, 1714), False, 'import time\n'), ((2838, 2857), 'rpscv.utils.cameraSetup', 'utils.cameraSetup', ([], {}), '()\n', (2855, 2857), False, 'from rpscv import utils\n'), ((3028, 3062), 'rpscv.gui.RPSGUI', 'RPSGUI', ([], {'privacy': 'privacy', 'loop': 'loop'}), '(privacy=privacy, loop=loop)\n', (3034, 3062), False, 'from rpscv.gui import RPSGUI\n'), ((3149, 3197), 'cv2.imread', 'cv2.imread', (['"""img/gui/rock.png"""', 'cv2.IMREAD_COLOR'], {}), "('img/gui/rock.png', cv2.IMREAD_COLOR)\n", (3159, 3197), False, 'import cv2\n'), ((3227, 3263), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3239, 3263), False, 'import cv2\n'), ((3278, 3327), 'cv2.imread', 'cv2.imread', (['"""img/gui/paper.png"""', 'cv2.IMREAD_COLOR'], {}), "('img/gui/paper.png', cv2.IMREAD_COLOR)\n", (3288, 3327), False, 'import cv2\n'), ((3358, 3394), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3370, 3394), False, 'import cv2\n'), ((3409, 3461), 'cv2.imread', 'cv2.imread', (['"""img/gui/scissors.png"""', 'cv2.IMREAD_COLOR'], {}), "('img/gui/scissors.png', cv2.IMREAD_COLOR)\n", (3419, 3461), False, 'import cv2\n'), ((3520, 3556), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3532, 3556), False, 'import cv2\n'), ((3604, 3653), 'cv2.imread', 'cv2.imread', (['"""img/gui/green.png"""', 'cv2.IMREAD_COLOR'], {}), "('img/gui/green.png', cv2.IMREAD_COLOR)\n", (3614, 3653), False, 'import cv2\n'), ((3673, 3714), 'cv2.cvtColor', 'cv2.cvtColor', (['greenImg', 'cv2.COLOR_BGR2RGB'], {}), '(greenImg, cv2.COLOR_BGR2RGB)\n', (3685, 3714), False, 'import cv2\n'), ((2751, 2765), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2762, 2765), False, 'import pickle\n'), ((3914, 3950), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3926, 3950), False, 'import cv2\n'), ((4080, 4113), 'rpscv.imgproc.getGray', 'imp.getGray', (['imgRGB'], {'threshold': '(17)'}), '(imgRGB, threshold=17)\n', (4091, 4113), True, 'from rpscv import imgproc as imp\n'), ((4179, 4201), 'numpy.count_nonzero', 'np.count_nonzero', (['gray'], {}), '(gray)\n', (4195, 4201), True, 'import numpy as np\n'), ((6045, 6062), 'pygame.display.flip', 'pg.display.flip', ([], {}), '()\n', (6060, 6062), True, 'import pygame as pg\n'), ((6095, 6117), 'pygame.time.wait', 'pg.time.wait', (['waitTime'], {}), '(waitTime)\n', (6107, 6117), True, 'import pygame as pg\n'), ((6296, 6310), 'pygame.event.get', 'pg.event.get', ([], {}), '()\n', (6308, 6310), True, 'import pygame as pg\n'), ((4914, 4934), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (4928, 4934), False, 'import random\n')] |
import numpy as np
from mayavi import mlab
def sectional2nodal(x):
return np.r_[x[0], np.convolve(x, [0.5, 0.5], "valid"), x[-1]]
def nodal2sectional(x):
return 0.5 * (x[:-1] + x[1:])
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5 * max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
class Visualize(object):
def __init__(self, prob):
prob.run_model()
self.prob = prob
self.fig = None
def draw_spar(self, fname="spar.png"):
self.init_figure()
self.draw_ocean()
self.draw_mooring(self.prob["mooring_plot_matrix"])
zcut = 1.0 + self.prob["main_freeboard"]
self.draw_pontoons(self.prob["plot_matrix"], 0.5 * self.prob["fairlead_support_outer_diameter"], zcut)
self.draw_column(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"],
self.prob["main.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["main.wall_thickness"])
self.draw_ballast(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"] - t_full,
self.prob["main.permanent_ballast_height"],
self.prob["variable_ballast_height"],
)
self.draw_column(
[0.0, 0.0],
self.prob["hub_height"],
self.prob["tow.tower_section_height"],
0.5 * self.prob["tow.tower_outer_diameter"],
None,
(0.9,) * 3,
)
if self.prob["main.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
self.prob["main.buoyancy_tank_location"],
0.5 * self.prob["main.buoyancy_tank_diameter"],
self.prob["main.buoyancy_tank_height"],
)
self.set_figure(fname)
def draw_semi(self, fname="semi.png"):
self.init_figure()
self.draw_ocean()
self.draw_mooring(self.prob["mooring_plot_matrix"])
pontoonMat = self.prob["plot_matrix"]
zcut = 1.0 + np.maximum(self.prob["main_freeboard"], self.prob["offset_freeboard"])
self.draw_pontoons(pontoonMat, 0.5 * self.prob["pontoon_outer_diameter"], zcut)
self.draw_column(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"],
self.prob["main.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["main.wall_thickness"])
self.draw_ballast(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"] - t_full,
self.prob["main.permanent_ballast_height"],
self.prob["variable_ballast_height"],
)
if self.prob["main.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
self.prob["main.buoyancy_tank_location"],
0.5 * self.prob["main.buoyancy_tank_diameter"],
self.prob["main.buoyancy_tank_height"],
)
R_semi = self.prob["radius_to_offset_column"]
ncolumn = int(self.prob["number_of_offset_columns"])
angles = np.linspace(0, 2 * np.pi, ncolumn + 1)
x = R_semi * np.cos(angles)
y = R_semi * np.sin(angles)
for k in range(ncolumn):
self.draw_column(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
0.5 * self.prob["off.outer_diameter"],
self.prob["off.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["off.wall_thickness"])
self.draw_ballast(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
0.5 * self.prob["off.outer_diameter"] - t_full,
self.prob["off.permanent_ballast_height"],
0.0,
)
if self.prob["off.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
self.prob["off.buoyancy_tank_location"],
0.5 * self.prob["off.buoyancy_tank_diameter"],
self.prob["off.buoyancy_tank_height"],
)
self.draw_column(
[0.0, 0.0],
self.prob["hub_height"],
self.prob["tow.tower_section_height"],
0.5 * self.prob["tow.tower_outer_diameter"],
None,
(0.9,) * 3,
)
self.set_figure(fname)
def init_figure(self):
mysky = np.array([135, 206, 250]) / 255.0
mysky = tuple(mysky.tolist())
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# fig = mlab.figure(bgcolor=(1,)*3, size=(1600,1100))
# fig = mlab.figure(bgcolor=mysky, size=(1600,1100))
self.fig = mlab.figure(bgcolor=(0,) * 3, size=(1600, 1100))
def draw_ocean(self):
if self.fig is None:
self.init_figure()
npts = 100
# mybrown = np.array([244, 170, 66]) / 255.0
# mybrown = tuple(mybrown.tolist())
mywater = np.array([95, 158, 160]) / 255.0 # (0.0, 0.0, 0.8) [143, 188, 143]
mywater = tuple(mywater.tolist())
alpha = 0.3
# Waterplane box
x = y = 100 * np.linspace(-1, 1, npts)
X, Y = np.meshgrid(x, y)
Z = np.sin(100 * X * Y) # np.zeros(X.shape)
# ax.plot_surface(X, Y, Z, alpha=alpha, color=mywater)
mlab.mesh(X, Y, Z, opacity=alpha, color=mywater, figure=self.fig)
# Sea floor
Z = -self.prob["water_depth"] * np.ones(X.shape)
# ax.plot_surface(10*X, 10*Y, Z, alpha=1.0, color=mybrown)
# mlab.mesh(10*X,10*Y,Z, opacity=1.0, color=mybrown, figure=self.fig)
# Sides
# x = 500 * np.linspace(-1, 1, npts)
# z = self.prob['water_depth'] * np.linspace(-1, 0, npts)
# X,Z = np.meshgrid(x,z)
# Y = x.max()*np.ones(Z.shape)
##ax.plot_surface(X, Y, Z, alpha=alpha, color=mywater)
# mlab.mesh(X,Y,Z, opacity=alpha, color=mywater, figure=self.fig)
# mlab.mesh(X,-Y,Z, opacity=alpha, color=mywater, figure=self.fig)
# mlab.mesh(Y,X,Z, opacity=alpha, color=mywater, figure=self.fig)
##mlab.mesh(-Y,X,Z, opacity=alpha, color=mywater, figure=self.fig)
def draw_mooring(self, mooring):
mybrown = np.array([244, 170, 66]) / 255.0
mybrown = tuple(mybrown.tolist())
npts = 100
# Sea floor
print(self.prob["anchor_radius"])
r = np.linspace(0, self.prob["anchor_radius"], npts)
th = np.linspace(0, 2 * np.pi, npts)
R, TH = np.meshgrid(r, th)
X = R * np.cos(TH)
Y = R * np.sin(TH)
Z = -self.prob["water_depth"] * np.ones(X.shape)
# ax.plot_surface(X, Y, Z, alpha=1.0, color=mybrown)
mlab.mesh(X, Y, Z, opacity=1.0, color=mybrown, figure=self.fig)
cmoor = (0, 0.8, 0)
nlines = int(self.prob["number_of_mooring_connections"] * self.prob["mooring_lines_per_connection"])
for k in range(nlines):
# ax.plot(mooring[k,:,0], mooring[k,:,1], mooring[k,:,2], 'k', lw=2)
mlab.plot3d(
mooring[k, :, 0],
mooring[k, :, 1],
mooring[k, :, 2],
color=cmoor,
tube_radius=0.5 * self.prob["mooring_diameter"],
figure=self.fig,
)
def draw_pontoons(self, truss, R, freeboard):
nE = truss.shape[0]
c = (0.5, 0, 0)
for k in range(nE):
if np.any(truss[k, 2, :] > freeboard):
continue
mlab.plot3d(truss[k, 0, :], truss[k, 1, :], truss[k, 2, :], color=c, tube_radius=R, figure=self.fig)
def draw_column(self, centerline, freeboard, h_section, r_nodes, spacingVec=None, ckIn=None):
npts = 20
nsection = h_section.size
z_nodes = np.flipud(freeboard - np.r_[0.0, np.cumsum(np.flipud(h_section))])
th = np.linspace(0, 2 * np.pi, npts)
for k in range(nsection):
rk = np.linspace(r_nodes[k], r_nodes[k + 1], npts)
z = np.linspace(z_nodes[k], z_nodes[k + 1], npts)
R, TH = np.meshgrid(rk, th)
Z, _ = np.meshgrid(z, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
# Draw parameters
if ckIn is None:
ck = (0.6,) * 3 if np.mod(k, 2) == 0 else (0.4,) * 3
else:
ck = ckIn
# ax.plot_surface(X, Y, Z, alpha=0.5, color=ck)
mlab.mesh(X, Y, Z, opacity=0.7, color=ck, figure=self.fig)
if spacingVec is None:
continue
z = z_nodes[k] + spacingVec[k]
while z < z_nodes[k + 1]:
rk = np.interp(z, z_nodes[k:], r_nodes[k:])
# ax.plot(rk*np.cos(th), rk*np.sin(th), z*np.ones(th.shape), 'r', lw=0.25)
mlab.plot3d(
rk * np.cos(th) + centerline[0],
rk * np.sin(th) + centerline[1],
z * np.ones(th.shape),
color=(0.5, 0, 0),
figure=self.fig,
)
z += spacingVec[k]
"""
# Web
r = np.linspace(rk - self.prob['stiffener_web_height'][k], rk, npts)
R, TH = np.meshgrid(r, th)
Z, _ = np.meshgrid(z, th)
X = R*np.cos(TH)
Y = R*np.sin(TH)
ax.plot_surface(X, Y, Z, alpha=0.7, color='r')
# Flange
r = r[0]
h = np.linspace(0, self.prob['stiffener_flange_width'][k], npts)
zflange = z + h - 0.5*self.prob['stiffener_flange_width'][k]
R, TH = np.meshgrid(r, th)
Z, _ = np.meshgrid(zflange, th)
X = R*np.cos(TH)
Y = R*np.sin(TH)
ax.plot_surface(X, Y, Z, alpha=0.7, color='r')
"""
def draw_ballast(self, centerline, freeboard, h_section, r_nodes, h_perm, h_water):
npts = 40
th = np.linspace(0, 2 * np.pi, npts)
z_nodes = np.flipud(freeboard - np.r_[0.0, np.cumsum(np.flipud(h_section))])
# Permanent ballast
z_perm = z_nodes[0] + np.linspace(0, h_perm, npts)
r_perm = np.interp(z_perm, z_nodes, r_nodes)
R, TH = np.meshgrid(r_perm, th)
Z, _ = np.meshgrid(z_perm, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
ck = np.array([122, 85, 33]) / 255.0
ck = tuple(ck.tolist())
mlab.mesh(X, Y, Z, color=ck, figure=self.fig)
# Water ballast
z_water = z_perm[-1] + np.linspace(0, h_water, npts)
r_water = np.interp(z_water, z_nodes, r_nodes)
R, TH = np.meshgrid(r_water, th)
Z, _ = np.meshgrid(z_water, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
ck = (0.0, 0.1, 0.8) # Dark blue
mlab.mesh(X, Y, Z, color=ck, figure=self.fig)
def draw_buoyancy_tank(self, centerline, freeboard, h_section, loc, r_box, h_box):
npts = 20
z_nodes = np.flipud(freeboard - np.r_[0.0, np.cumsum(np.flipud(h_section))])
z_lower = loc * (z_nodes[-1] - z_nodes[0]) + z_nodes[0]
# Lower and Upper surfaces
r = np.linspace(0, r_box, npts)
th = np.linspace(0, 2 * np.pi, npts)
R, TH = np.meshgrid(r, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
Z = z_lower * np.ones(X.shape)
ck = (0.9,) * 3
mlab.mesh(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)
Z += h_box
mlab.mesh(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)
# Cylinder part
z = z_lower + np.linspace(0, h_box, npts)
Z, TH = np.meshgrid(z, th)
R = r_box * np.ones(Z.shape)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
mlab.mesh(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)
def set_figure(self, fname=None):
# ax.set_aspect('equal')
# set_axes_equal(ax)
# ax.autoscale_view(tight=True)
# ax.set_xlim([-125, 125])
# ax.set_ylim([-125, 125])
# ax.set_zlim([-220, 30])
# plt.axis('off')
# plt.show()
# mlab.move([-517.16728532, -87.0711504, 5.60826224], [1.35691603e+01, -2.84217094e-14, -1.06547500e+02])
# mlab.view(-170.68320804213343, 78.220729198686854, 549.40101471336777, [1.35691603e+01, 0.0, -1.06547500e+02])
if not fname is None:
fpart = fname.split(".")
if len(fpart) == 1 or not fpart[-1].lower() in ["jpg", "png", "bmp"]:
fname += ".png"
mlab.savefig(fname, figure=self.fig)
mlab.show()
| [
"numpy.mean",
"numpy.convolve",
"numpy.ones",
"mayavi.mlab.show",
"mayavi.mlab.savefig",
"mayavi.mlab.mesh",
"numpy.flipud",
"numpy.any",
"mayavi.mlab.figure",
"numpy.array",
"numpy.linspace",
"mayavi.mlab.plot3d",
"numpy.cos",
"numpy.interp",
"numpy.sin",
"numpy.meshgrid",
"numpy.ma... | [((678, 695), 'numpy.mean', 'np.mean', (['x_limits'], {}), '(x_limits)\n', (685, 695), True, 'import numpy as np\n'), ((756, 773), 'numpy.mean', 'np.mean', (['y_limits'], {}), '(y_limits)\n', (763, 773), True, 'import numpy as np\n'), ((834, 851), 'numpy.mean', 'np.mean', (['z_limits'], {}), '(z_limits)\n', (841, 851), True, 'import numpy as np\n'), ((4575, 4613), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(ncolumn + 1)'], {}), '(0, 2 * np.pi, ncolumn + 1)\n', (4586, 4613), True, 'import numpy as np\n'), ((6445, 6493), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '((0,) * 3)', 'size': '(1600, 1100)'}), '(bgcolor=(0,) * 3, size=(1600, 1100))\n', (6456, 6493), False, 'from mayavi import mlab\n'), ((6934, 6951), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (6945, 6951), True, 'import numpy as np\n'), ((6964, 6983), 'numpy.sin', 'np.sin', (['(100 * X * Y)'], {}), '(100 * X * Y)\n', (6970, 6983), True, 'import numpy as np\n'), ((7076, 7141), 'mayavi.mlab.mesh', 'mlab.mesh', (['X', 'Y', 'Z'], {'opacity': 'alpha', 'color': 'mywater', 'figure': 'self.fig'}), '(X, Y, Z, opacity=alpha, color=mywater, figure=self.fig)\n', (7085, 7141), False, 'from mayavi import mlab\n'), ((8151, 8199), 'numpy.linspace', 'np.linspace', (['(0)', "self.prob['anchor_radius']", 'npts'], {}), "(0, self.prob['anchor_radius'], npts)\n", (8162, 8199), True, 'import numpy as np\n'), ((8213, 8244), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'npts'], {}), '(0, 2 * np.pi, npts)\n', (8224, 8244), True, 'import numpy as np\n'), ((8261, 8279), 'numpy.meshgrid', 'np.meshgrid', (['r', 'th'], {}), '(r, th)\n', (8272, 8279), True, 'import numpy as np\n'), ((8460, 8523), 'mayavi.mlab.mesh', 'mlab.mesh', (['X', 'Y', 'Z'], {'opacity': '(1.0)', 'color': 'mybrown', 'figure': 'self.fig'}), '(X, Y, Z, opacity=1.0, color=mybrown, figure=self.fig)\n', (8469, 8523), False, 'from mayavi import mlab\n'), ((9613, 9644), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'npts'], {}), '(0, 2 * np.pi, npts)\n', (9624, 9644), True, 'import numpy as np\n'), ((11796, 11827), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'npts'], {}), '(0, 2 * np.pi, npts)\n', (11807, 11827), True, 'import numpy as np\n'), ((12018, 12053), 'numpy.interp', 'np.interp', (['z_perm', 'z_nodes', 'r_nodes'], {}), '(z_perm, z_nodes, r_nodes)\n', (12027, 12053), True, 'import numpy as np\n'), ((12070, 12093), 'numpy.meshgrid', 'np.meshgrid', (['r_perm', 'th'], {}), '(r_perm, th)\n', (12081, 12093), True, 'import numpy as np\n'), ((12109, 12132), 'numpy.meshgrid', 'np.meshgrid', (['z_perm', 'th'], {}), '(z_perm, th)\n', (12120, 12132), True, 'import numpy as np\n'), ((12304, 12349), 'mayavi.mlab.mesh', 'mlab.mesh', (['X', 'Y', 'Z'], {'color': 'ck', 'figure': 'self.fig'}), '(X, Y, Z, color=ck, figure=self.fig)\n', (12313, 12349), False, 'from mayavi import mlab\n'), ((12454, 12490), 'numpy.interp', 'np.interp', (['z_water', 'z_nodes', 'r_nodes'], {}), '(z_water, z_nodes, r_nodes)\n', (12463, 12490), True, 'import numpy as np\n'), ((12507, 12531), 'numpy.meshgrid', 'np.meshgrid', (['r_water', 'th'], {}), '(r_water, th)\n', (12518, 12531), True, 'import numpy as np\n'), ((12547, 12571), 'numpy.meshgrid', 'np.meshgrid', (['z_water', 'th'], {}), '(z_water, th)\n', (12558, 12571), True, 'import numpy as np\n'), ((12708, 12753), 'mayavi.mlab.mesh', 'mlab.mesh', (['X', 'Y', 'Z'], {'color': 'ck', 'figure': 'self.fig'}), '(X, Y, Z, color=ck, figure=self.fig)\n', (12717, 12753), False, 'from mayavi import mlab\n'), ((13057, 13084), 'numpy.linspace', 'np.linspace', (['(0)', 'r_box', 'npts'], {}), '(0, r_box, npts)\n', (13068, 13084), True, 'import numpy as np\n'), ((13098, 13129), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'npts'], {}), '(0, 2 * np.pi, npts)\n', (13109, 13129), True, 'import numpy as np\n'), ((13146, 13164), 'numpy.meshgrid', 'np.meshgrid', (['r', 'th'], {}), '(r, th)\n', (13157, 13164), True, 'import numpy as np\n'), ((13322, 13380), 'mayavi.mlab.mesh', 'mlab.mesh', (['X', 'Y', 'Z'], {'opacity': '(0.5)', 'color': 'ck', 'figure': 'self.fig'}), '(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)\n', (13331, 13380), False, 'from mayavi import mlab\n'), ((13408, 13466), 'mayavi.mlab.mesh', 'mlab.mesh', (['X', 'Y', 'Z'], {'opacity': '(0.5)', 'color': 'ck', 'figure': 'self.fig'}), '(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)\n', (13417, 13466), False, 'from mayavi import mlab\n'), ((13558, 13576), 'numpy.meshgrid', 'np.meshgrid', (['z', 'th'], {}), '(z, th)\n', (13569, 13576), True, 'import numpy as np\n'), ((13708, 13766), 'mayavi.mlab.mesh', 'mlab.mesh', (['X', 'Y', 'Z'], {'opacity': '(0.5)', 'color': 'ck', 'figure': 'self.fig'}), '(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)\n', (13717, 13766), False, 'from mayavi import mlab\n'), ((14533, 14544), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (14542, 14544), False, 'from mayavi import mlab\n'), ((92, 127), 'numpy.convolve', 'np.convolve', (['x', '[0.5, 0.5]', '"""valid"""'], {}), "(x, [0.5, 0.5], 'valid')\n", (103, 127), True, 'import numpy as np\n'), ((3243, 3313), 'numpy.maximum', 'np.maximum', (["self.prob['main_freeboard']", "self.prob['offset_freeboard']"], {}), "(self.prob['main_freeboard'], self.prob['offset_freeboard'])\n", (3253, 3313), True, 'import numpy as np\n'), ((4635, 4649), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (4641, 4649), True, 'import numpy as np\n'), ((4671, 4685), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (4677, 4685), True, 'import numpy as np\n'), ((6149, 6174), 'numpy.array', 'np.array', (['[135, 206, 250]'], {}), '([135, 206, 250])\n', (6157, 6174), True, 'import numpy as np\n'), ((6716, 6740), 'numpy.array', 'np.array', (['[95, 158, 160]'], {}), '([95, 158, 160])\n', (6724, 6740), True, 'import numpy as np\n'), ((6894, 6918), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'npts'], {}), '(-1, 1, npts)\n', (6905, 6918), True, 'import numpy as np\n'), ((7203, 7219), 'numpy.ones', 'np.ones', (['X.shape'], {}), '(X.shape)\n', (7210, 7219), True, 'import numpy as np\n'), ((7982, 8006), 'numpy.array', 'np.array', (['[244, 170, 66]'], {}), '([244, 170, 66])\n', (7990, 8006), True, 'import numpy as np\n'), ((8296, 8306), 'numpy.cos', 'np.cos', (['TH'], {}), '(TH)\n', (8302, 8306), True, 'import numpy as np\n'), ((8323, 8333), 'numpy.sin', 'np.sin', (['TH'], {}), '(TH)\n', (8329, 8333), True, 'import numpy as np\n'), ((8374, 8390), 'numpy.ones', 'np.ones', (['X.shape'], {}), '(X.shape)\n', (8381, 8390), True, 'import numpy as np\n'), ((8786, 8935), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['mooring[k, :, 0]', 'mooring[k, :, 1]', 'mooring[k, :, 2]'], {'color': 'cmoor', 'tube_radius': "(0.5 * self.prob['mooring_diameter'])", 'figure': 'self.fig'}), "(mooring[k, :, 0], mooring[k, :, 1], mooring[k, :, 2], color=\n cmoor, tube_radius=0.5 * self.prob['mooring_diameter'], figure=self.fig)\n", (8797, 8935), False, 'from mayavi import mlab\n'), ((9188, 9222), 'numpy.any', 'np.any', (['(truss[k, 2, :] > freeboard)'], {}), '(truss[k, 2, :] > freeboard)\n', (9194, 9222), True, 'import numpy as np\n'), ((9261, 9365), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['truss[k, 0, :]', 'truss[k, 1, :]', 'truss[k, 2, :]'], {'color': 'c', 'tube_radius': 'R', 'figure': 'self.fig'}), '(truss[k, 0, :], truss[k, 1, :], truss[k, 2, :], color=c,\n tube_radius=R, figure=self.fig)\n', (9272, 9365), False, 'from mayavi import mlab\n'), ((9696, 9741), 'numpy.linspace', 'np.linspace', (['r_nodes[k]', 'r_nodes[k + 1]', 'npts'], {}), '(r_nodes[k], r_nodes[k + 1], npts)\n', (9707, 9741), True, 'import numpy as np\n'), ((9758, 9803), 'numpy.linspace', 'np.linspace', (['z_nodes[k]', 'z_nodes[k + 1]', 'npts'], {}), '(z_nodes[k], z_nodes[k + 1], npts)\n', (9769, 9803), True, 'import numpy as np\n'), ((9824, 9843), 'numpy.meshgrid', 'np.meshgrid', (['rk', 'th'], {}), '(rk, th)\n', (9835, 9843), True, 'import numpy as np\n'), ((9863, 9881), 'numpy.meshgrid', 'np.meshgrid', (['z', 'th'], {}), '(z, th)\n', (9874, 9881), True, 'import numpy as np\n'), ((10221, 10279), 'mayavi.mlab.mesh', 'mlab.mesh', (['X', 'Y', 'Z'], {'opacity': '(0.7)', 'color': 'ck', 'figure': 'self.fig'}), '(X, Y, Z, opacity=0.7, color=ck, figure=self.fig)\n', (10230, 10279), False, 'from mayavi import mlab\n'), ((11972, 12000), 'numpy.linspace', 'np.linspace', (['(0)', 'h_perm', 'npts'], {}), '(0, h_perm, npts)\n', (11983, 12000), True, 'import numpy as np\n'), ((12232, 12255), 'numpy.array', 'np.array', (['[122, 85, 33]'], {}), '([122, 85, 33])\n', (12240, 12255), True, 'import numpy as np\n'), ((12406, 12435), 'numpy.linspace', 'np.linspace', (['(0)', 'h_water', 'npts'], {}), '(0, h_water, npts)\n', (12417, 12435), True, 'import numpy as np\n'), ((13273, 13289), 'numpy.ones', 'np.ones', (['X.shape'], {}), '(X.shape)\n', (13280, 13289), True, 'import numpy as np\n'), ((13514, 13541), 'numpy.linspace', 'np.linspace', (['(0)', 'h_box', 'npts'], {}), '(0, h_box, npts)\n', (13525, 13541), True, 'import numpy as np\n'), ((13597, 13613), 'numpy.ones', 'np.ones', (['Z.shape'], {}), '(Z.shape)\n', (13604, 13613), True, 'import numpy as np\n'), ((14488, 14524), 'mayavi.mlab.savefig', 'mlab.savefig', (['fname'], {'figure': 'self.fig'}), '(fname, figure=self.fig)\n', (14500, 14524), False, 'from mayavi import mlab\n'), ((10444, 10482), 'numpy.interp', 'np.interp', (['z', 'z_nodes[k:]', 'r_nodes[k:]'], {}), '(z, z_nodes[k:], r_nodes[k:])\n', (10453, 10482), True, 'import numpy as np\n'), ((12149, 12159), 'numpy.cos', 'np.cos', (['TH'], {}), '(TH)\n', (12155, 12159), True, 'import numpy as np\n'), ((12192, 12202), 'numpy.sin', 'np.sin', (['TH'], {}), '(TH)\n', (12198, 12202), True, 'import numpy as np\n'), ((12588, 12598), 'numpy.cos', 'np.cos', (['TH'], {}), '(TH)\n', (12594, 12598), True, 'import numpy as np\n'), ((12631, 12641), 'numpy.sin', 'np.sin', (['TH'], {}), '(TH)\n', (12637, 12641), True, 'import numpy as np\n'), ((13181, 13191), 'numpy.cos', 'np.cos', (['TH'], {}), '(TH)\n', (13187, 13191), True, 'import numpy as np\n'), ((13224, 13234), 'numpy.sin', 'np.sin', (['TH'], {}), '(TH)\n', (13230, 13234), True, 'import numpy as np\n'), ((13630, 13640), 'numpy.cos', 'np.cos', (['TH'], {}), '(TH)\n', (13636, 13640), True, 'import numpy as np\n'), ((13673, 13683), 'numpy.sin', 'np.sin', (['TH'], {}), '(TH)\n', (13679, 13683), True, 'import numpy as np\n'), ((9902, 9912), 'numpy.cos', 'np.cos', (['TH'], {}), '(TH)\n', (9908, 9912), True, 'import numpy as np\n'), ((9949, 9959), 'numpy.sin', 'np.sin', (['TH'], {}), '(TH)\n', (9955, 9959), True, 'import numpy as np\n'), ((10071, 10083), 'numpy.mod', 'np.mod', (['k', '(2)'], {}), '(k, 2)\n', (10077, 10083), True, 'import numpy as np\n'), ((10733, 10750), 'numpy.ones', 'np.ones', (['th.shape'], {}), '(th.shape)\n', (10740, 10750), True, 'import numpy as np\n'), ((9575, 9595), 'numpy.flipud', 'np.flipud', (['h_section'], {}), '(h_section)\n', (9584, 9595), True, 'import numpy as np\n'), ((10628, 10638), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (10634, 10638), True, 'import numpy as np\n'), ((10681, 10691), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (10687, 10691), True, 'import numpy as np\n'), ((11889, 11909), 'numpy.flipud', 'np.flipud', (['h_section'], {}), '(h_section)\n', (11898, 11909), True, 'import numpy as np\n'), ((12921, 12941), 'numpy.flipud', 'np.flipud', (['h_section'], {}), '(h_section)\n', (12930, 12941), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from matplotlib import gridspec
import h5py
import os
from util import *
import dxchange
import numpy as np
params_2d_cell = {'grid_delta': np.load('cell/phantom/grid_delta.npy'),
'compression_mode': 2,
'obj':[],
'ref':[],
'nei':'500',
'save_path': 'cell/ptychography/comparison',
'fig_ax':[],
'radius_ls':[],
'nei_intersection_ls':[],
'radius_intersection_ls':[],
'T_half_bit_ls':[],
'show_plot_title': True,
'plot_T_half_th': True,
'save_mask': False,
}
params = params_2d_cell
print('dimension of the sample = ' +', '.join(map(str, params['grid_delta'].shape)))
n_sample_pixel = np.count_nonzero(params['grid_delta']> 1e-10)
print('n_sample_pixel = %d' %n_sample_pixel)
print('finite support area ratio in sample = %.3f' %(n_sample_pixel/(params['grid_delta'].shape[0]*params['grid_delta'].shape[1])))
if params['compression_mode'] == 0: compression_mode = 'normal'
if params['compression_mode'] == 1: compression_mode = 'PCA_compressed'
if params['compression_mode'] == 2: compression_mode = 'AE_compressed'
matplotlib.rcParams['pdf.fonttype'] = 'truetype'
fontProperties = {'family': 'serif', 'serif': ['Helvetica'], 'weight': 'normal', 'size': 12}
plt.rc('font', **fontProperties)
#spec = gridspec.GridSpec(1, 2, width_ratios=[7, 1])
#fig = plt.figure(figsize=(8, 5))
spec = gridspec.GridSpec(1, 1)
fig = plt.figure(figsize=(6, 4))
params['fig_ax'] = fig.add_subplot(spec[0, 0])
path = os.path.dirname(params['save_path'])
if compression_mode == 'normal':
nei_ls = ['']
elif compression_mode == 'PCA_compressed':
nei_ls = [1, 10, 30 , 50, 100, 300, 1000, 1500, 2000, 2500, 3000, 3500]
elif compression_mode == 'AE_compressed':
nei_ls = ['n2e7','AE_72x72','AE_72x72_ZS','AE_rWeightLoss_72x72','AE_rWeightLoss_72x72_ZS']
nei_intersection_ls = []
radius_intersection_ls = []
for nei in nei_ls:
params['nei'] = nei
if compression_mode == 'normal' :
obj_dir = os.path.join(path, 'n2e7')
ref_dir = os.path.join(path, 'n2e7_ref')
elif compression_mode == 'PCA_compressed' :
obj_dir = os.path.join(path, 'n2e7_nei' + str(nei))
ref_dir = os.path.join(path, 'n2e7_nei' + str(nei) + '_ref')
elif compression_mode == 'AE_compressed' :
obj_dir = os.path.join(path, nei)
ref_dir = os.path.join(path, '../phantom/')
if compression_mode=='AE_compressed':
params['obj'] = dxchange.read_tiff(os.path.join(obj_dir, 'delta_ds_1.tiff'))
params['obj'] = params['obj'][:, :, 0]
params['ref'] = dxchange.read_tiff(os.path.join(ref_dir, 'delta.tiff'))
else:
params['obj'] = dxchange.read_tiff(os.path.join(obj_dir, 'delta_ds_1.tiff'))
params['obj'] = params['obj'][:, :, 0]
params['ref'] = dxchange.read_tiff(os.path.join(ref_dir, 'delta_ds_1.tiff'))
params['ref'] = params['ref'][:, :, 0]
if params['show_plot_title']: Plot_title = compression_mode
else: Plot_title = None
nei_intersection, radius_intersection, params['radius_ls'], params['T_half_bit_ls'] = fourier_ring_correlation_PCA(**params)
if nei_intersection != None:
params['nei_intersection_ls'].append(nei_intersection)
params['radius_intersection_ls'].append(radius_intersection)
else:
pass
if params['plot_T_half_th']:
half_bit_threshold(params['fig_ax'], params['radius_ls'], params['T_half_bit_ls'])
#params['fig_ax'].legend(loc=3, bbox_to_anchor=(1.0, 0.0, 0.5, 0.5), fontsize=12, ncol=1, title='photon number')
plt.savefig(os.path.join(params['save_path'], 'frc_PCAmode'+str(params['compression_mode'])+'.pdf'), format='pdf')
fig.clear()
plt.close(fig)
np.savez(os.path.join(params['save_path'], 'frc_PCAmode'+ str(params['compression_mode'])+'_intersection'), np.array(params['nei_intersection_ls']), np.array(params['radius_intersection_ls']/params['radius_ls'][-1]))
fig = plt.figure(figsize=(8, 5))
fig_ax = fig.add_subplot(spec[0,0])
fig_ax.plot(params['nei_intersection_ls'], params['radius_intersection_ls']/params['radius_ls'][-1], '-bs', markerfacecolor='none', markeredgecolor='blue', label = compression_mode)
print(params['nei_intersection_ls'])
print(params['radius_intersection_ls'])
fig_ax.set_xlabel('n_eigenimages')
fig_ax.set_ylabel('FRC/half-bit crossing fraction')
# fig_ax.set_ylim(0,1.1)
fig_ax.set_xscale('log')
fig_ax.legend(loc=3, bbox_to_anchor=(1.0,0.0,0.5,0.5), ncol=1, title = 'data type')
plt.savefig(os.path.join(params['save_path'], 'frc_'+ str(params['compression_mode'])+'_intersection.pdf'), format='pdf', dpi=600)
fig.clear()
plt.close(fig)
| [
"matplotlib.use",
"os.path.join",
"numpy.count_nonzero",
"os.path.dirname",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.load",
"matplotlib.pyplot.rc"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (32, 39), False, 'import matplotlib\n'), ((909, 955), 'numpy.count_nonzero', 'np.count_nonzero', (["(params['grid_delta'] > 1e-10)"], {}), "(params['grid_delta'] > 1e-10)\n", (925, 955), True, 'import numpy as np\n'), ((1484, 1516), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **fontProperties)\n", (1490, 1516), True, 'import matplotlib.pyplot as plt\n'), ((1611, 1634), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(1)'], {}), '(1, 1)\n', (1628, 1634), False, 'from matplotlib import gridspec\n'), ((1641, 1667), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (1651, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1760), 'os.path.dirname', 'os.path.dirname', (["params['save_path']"], {}), "(params['save_path'])\n", (1739, 1760), False, 'import os\n'), ((3931, 3945), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3940, 3945), True, 'import matplotlib.pyplot as plt\n'), ((4172, 4198), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (4182, 4198), True, 'import matplotlib.pyplot as plt\n'), ((4860, 4874), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4869, 4874), True, 'import matplotlib.pyplot as plt\n'), ((214, 252), 'numpy.load', 'np.load', (['"""cell/phantom/grid_delta.npy"""'], {}), "('cell/phantom/grid_delta.npy')\n", (221, 252), True, 'import numpy as np\n'), ((4056, 4095), 'numpy.array', 'np.array', (["params['nei_intersection_ls']"], {}), "(params['nei_intersection_ls'])\n", (4064, 4095), True, 'import numpy as np\n'), ((4097, 4165), 'numpy.array', 'np.array', (["(params['radius_intersection_ls'] / params['radius_ls'][-1])"], {}), "(params['radius_intersection_ls'] / params['radius_ls'][-1])\n", (4105, 4165), True, 'import numpy as np\n'), ((2222, 2248), 'os.path.join', 'os.path.join', (['path', '"""n2e7"""'], {}), "(path, 'n2e7')\n", (2234, 2248), False, 'import os\n'), ((2267, 2297), 'os.path.join', 'os.path.join', (['path', '"""n2e7_ref"""'], {}), "(path, 'n2e7_ref')\n", (2279, 2297), False, 'import os\n'), ((2708, 2748), 'os.path.join', 'os.path.join', (['obj_dir', '"""delta_ds_1.tiff"""'], {}), "(obj_dir, 'delta_ds_1.tiff')\n", (2720, 2748), False, 'import os\n'), ((2841, 2876), 'os.path.join', 'os.path.join', (['ref_dir', '"""delta.tiff"""'], {}), "(ref_dir, 'delta.tiff')\n", (2853, 2876), False, 'import os\n'), ((2931, 2971), 'os.path.join', 'os.path.join', (['obj_dir', '"""delta_ds_1.tiff"""'], {}), "(obj_dir, 'delta_ds_1.tiff')\n", (2943, 2971), False, 'import os\n'), ((3064, 3104), 'os.path.join', 'os.path.join', (['ref_dir', '"""delta_ds_1.tiff"""'], {}), "(ref_dir, 'delta_ds_1.tiff')\n", (3076, 3104), False, 'import os\n'), ((2541, 2564), 'os.path.join', 'os.path.join', (['path', 'nei'], {}), '(path, nei)\n', (2553, 2564), False, 'import os\n'), ((2583, 2616), 'os.path.join', 'os.path.join', (['path', '"""../phantom/"""'], {}), "(path, '../phantom/')\n", (2595, 2616), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 16:23:54 2020
@author: huangyuyao
"""
import torch
from torch.utils.data import Dataset
from sklearn.preprocessing import MaxAbsScaler
from torch.utils.data import DataLoader
import os
import numpy as np
import pandas as pd
import scipy
from glob import glob
from scipy.io import mmread
from sklearn.preprocessing import LabelEncoder
import time
from torchvision import transforms, datasets
from torch import nn, optim
from torch.nn import init
from tqdm import trange
class SingleCellDataset(Dataset):
def __init__(self, path,
low = 0,
high = 0.9,
min_peaks = 0,
transpose = False,
transforms=[]):
self.data, self.peaks, self.barcode = load_data(path, transpose)
if min_peaks > 0:
self.filter_cell(min_peaks)
self.filter_peak(low, high)
for transform in transforms:
self.data = transform(self.data)
self.n_cells, self.n_peaks = self.data.shape
self.shape = self.data.shape
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
data = self.data[index];
if type(data) is not np.ndarray:
data = data.toarray().squeeze()
return data
def info(self):
print("Dataset Info")
print('Cell number: {}\nPeak number: {}'.format(self.n_cells, self.n_peaks))
def filter_peak(self, low=0, high=0.9):
total_cells = self.data.shape[0]
count = np.array((self.data >0).sum(0)).squeeze()
indices = np.where((count > low*total_cells) & (count < high*total_cells))[0]
self.data = self.data[:, indices]
self.peaks = self.peaks[indices]
print('filterpeak------')
def filter_cell(self, min_peaks=0):
if min_peaks < 1:
min_peaks = len(self.peaks)*min_peaks
indices = np.where(np.sum(self.data>0, 1)>=min_peaks)[0]
self.data = self.data[indices]
self.barcode = self.barcode[indices]
p = type(self.barcode)
print('filtercell------')
print(p)
def write_data(self,path):
print('tmp dataset saving')
data_ = self.data
data1 = data_.todense()
data =data1.T
#print(type(data))
recon_x = pd.DataFrame(data, index=self.peaks, columns=self.barcode)
recon_x.to_csv(os.path.join(path, 'tmp_data.txt'), sep='\t')
def load_data(path, transpose=False):
print("Loading data ...")
t0 = time.time()
if os.path.isdir(path):
count, peaks, barcode = read_mtx(path)
elif os.path.isfile(path):
count, peaks, barcode = read_csv(path)
else:
raise ValueError("File {} not exists".format(path))
if transpose:
count = count.transpose()
print('Original data contains {} cells x {} peaks'.format(*count.shape))
assert (len(barcode), len(peaks)) == count.shape
print("Finished loading takes {:.2f} min".format((time.time()-t0)/60))
return count, peaks, barcode
def read_mtx(path):
for filename in glob(path+'/*'):
basename = os.path.basename(filename)
if (('count' in basename) or ('matrix' in basename)) and ('mtx' in basename):
count = mmread(filename).T.tocsr().astype('float32')
elif 'barcode' in basename:
barcode = pd.read_csv(filename, sep='\t', header=None)[0].values
elif 'gene' in basename or 'peak' in basename:
feature = pd.read_csv(filename, sep='\t', header=None).iloc[:, -1].values
return count, feature, barcode
def read_csv(path):
if ('.txt' in path) or ('tsv' in path):
sep = '\t'
elif '.csv' in path:
sep = ','
else:
raise ValueError("File {} not in format txt or csv".format(path))
data = pd.read_csv(path, sep=sep, index_col=0).T.astype('float32')
genes = data.columns.values
barcode = data.index.values
counts = scipy.sparse.csr_matrix(data.values)
return counts, genes, barcode
# model
def build_mlp(layers, activation=nn.ReLU()):
net = []
for i in range(1, len(layers)):
net.append(nn.Linear(layers[i-1], layers[i]))
net.append(activation)
return nn.Sequential(*net)
class Encoder(nn.Module):
def __init__(self,dims):
super(Encoder, self).__init__()
[x_dim, h_dim, z_dim] = dims
self.hidden = build_mlp([x_dim]+h_dim +[z_dim])
def forward(self, x):
x = self.hidden(x)
return x
class Decoder(nn.Module):
def __init__(self, dims, output_activation=None):
super(Decoder, self).__init__()
[z_dim, h_dim, x_dim] = dims
self.hidden = build_mlp([z_dim, *h_dim])
self.reconstruction = nn.Linear([z_dim, *h_dim][-1], x_dim)
self.output_activation = output_activation
def forward(self, x):
x = self.hidden(x)
if self.output_activation is not None:
return self.output_activation(self.reconstruction(x))
else:
return self.reconstruction(x)
class AE(nn.Module):
def __init__(self,dims):
super(AE, self).__init__()
[x_dim, z_dim, encode_dim, decode_dim] = dims
self.encoder = Encoder([x_dim, encode_dim, z_dim])
self.decoder = Decoder([z_dim, decode_dim, x_dim])
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
feature = self.encoder(x)
recon_x = self.decoder(feature)
return recon_x
def loss_func(self,x):
feature = self.encoder(x)
recon_x = self.decoder(feature)
criteon = nn.MSELoss()
loss = criteon(recon_x,x)
return loss
def fit(self,dataloader,outdir,lr = 0.001,epochs = 10000 ,max_iter = 10000):
optimizer = optim.Adam(model.parameters(), lr=lr)
iteration =0
Loss = []
early_stopping = EarlyStopping()
with trange(max_iter, disable=False) as pbar:
while True:
epoch_loss = 0
for i,x in enumerate(dataloader):
epoch_lr = adjust_learning_rate(lr, optimizer, iteration)
optimizer.zero_grad()
loss = self.loss_func(x)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
pbar.set_postfix_str('loss={:.3f}'.format(loss))
pbar.update(1)
iteration+=1
Loss.append(loss)
if iteration >= max_iter:
break
else:
early_stopping(epoch_loss, self)
if early_stopping.early_stop:
print('EarlyStopping: run {} iteration'.format(iteration))
break
continue
break
def encodeBatch(self, dataloader,out='z',transforms=None):
output = []
for i, inputs in enumerate(dataloader):
x = inputs
x = x.view(x.size(0), -1).float()
feature = self.encoder(x)
if out == 'z':
output.append(feature.detach().cpu())
elif out == 'x':
recon_x = self.decoder(feature)
output.append(recon_x.detach().cpu().data)
output = torch.cat(output).numpy()
if out == 'x':
for transform in transforms:
output = transform(output)
return output
class AAE(AE):
def __init__(self, dims, n_centroids):
super(AAE, self).__init__(dims)
self.n_centroids = n_centroids
def adjust_learning_rate(init_lr, optimizer, iteration):
lr = max(init_lr * (0.9 ** (iteration//10)), 0.00002)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
class EarlyStopping:
def __init__(self, patience=100, verbose=False, outdir='./'):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.loss_min = np.Inf
self.model_file = os.path.join(outdir, 'model.pt')
def __call__(self, loss, model):
if np.isnan(loss):
self.early_stop = True
score = -loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(loss, model)
elif score < self.best_score:
self.counter += 1
if self.verbose:
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
model.load_model(self.model_file)
else:
self.best_score = score
self.save_checkpoint(loss, model)
self.counter = 0
def save_checkpoint(self, loss, model):
if self.verbose:
print(f'Loss decreased ({self.loss_min:.6f} --> {loss:.6f}). Saving model ...')
torch.save(model.state_dict(), self.model_file)
self.loss_min = loss
# plot
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
import seaborn as sns
def plot_embedding(X, labels, classes=None, method='tSNE', cmap='tab20', figsize=(4, 4), markersize=4, marker=None,
return_emb=False, save=False, save_emb=False, show_legend=True, show_axis_label=True, **legend_params):
if marker is not None:
X = np.concatenate([X, marker], axis=0)
N = len(labels)
if X.shape[1] != 2:
if method == 'tSNE':
from sklearn.manifold import TSNE
X = TSNE(n_components=2, random_state=124).fit_transform(X)
if method == 'UMAP':
from umap import UMAP
X = UMAP(n_neighbors=30, min_dist=0.1).fit_transform(X)
if method == 'PCA':
from sklearn.decomposition import PCA
X = PCA(n_components=2, random_state=124).fit_transform(X)
plt.figure(figsize=figsize)
if classes is None:
classes = np.unique(labels)
if cmap is not None:
cmap = cmap
elif len(classes) <= 10:
cmap = 'tab10'
elif len(classes) <= 20:
cmap = 'tab20'
else:
cmap = 'husl'
colors = sns.color_palette(cmap, n_colors=len(classes))
for i, c in enumerate(classes):
plt.scatter(X[:N][labels==c, 0], X[:N][labels==c, 1], s=markersize, color=colors[i], label=c)
if marker is not None:
plt.scatter(X[N:, 0], X[N:, 1], s=10*markersize, color='black', marker='*')
# plt.axis("off")
legend_params_ = {'loc': 'center left',
'bbox_to_anchor':(1.0, 0.45),
'fontsize': 10,
'ncol': 1,
'frameon': False,
'markerscale': 1.5
}
legend_params_.update(**legend_params)
if show_legend:
plt.legend(**legend_params_)
sns.despine(offset=10, trim=True)
if show_axis_label:
plt.xlabel(method+' dim 1', fontsize=12)
plt.ylabel(method+' dim 2', fontsize=12)
if save:
plt.savefig(save, format='jpg', bbox_inches='tight')
else:
plt.show()
if save_emb:
np.savetxt(save_emb, X)
if return_emb:
return X
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print( path+'path create')
return True
else:
print ('already exist')
return False
normalizer = MaxAbsScaler()
dataset = SingleCellDataset('%s'%(sys.argv[2]), low=0.01, high=0.9, min_peaks=100,
transforms=[normalizer.fit_transform])
trainloader = DataLoader(dataset, batch_size=100, shuffle=False, drop_last=False)
testloader = DataLoader(dataset, batch_size=100, shuffle=False, drop_last=False)
cell_num = dataset.shape[0]
input_dim = dataset.shape[1]
n_centroids = 8
name = 'Forebrain'
z_dim = int('%s'%(sys.argv[4]))
h_dim = [1024, 128]
decode_dim = []
lr = 0.01
epochs = 9999
max_iter = int('%s'%(sys.argv[1]))
mkpath='%s'%(sys.argv[3])
mkdir(mkpath)
outdir = mkpath
dims = [input_dim, z_dim, h_dim, decode_dim]
model = AAE(dims, n_centroids= n_centroids)
print('\n ### Training…… ###')
model.fit(trainloader,lr=lr,epochs=epochs,max_iter=max_iter,outdir = outdir)
#torch.save(model.to('cpu').state_dict(), os.path.join(outdir, 'model_tmp.pt'))
feature = model.encodeBatch(testloader,out='z')
pd.DataFrame(feature, index=dataset.barcode).to_csv(os.path.join(outdir, 'feature.txt'), sep='\t', header=False)
recon_x = model.encodeBatch(testloader, out='x', transforms=[normalizer.inverse_transform])
recon_x = pd.DataFrame(recon_x.T, index=dataset.peaks, columns=dataset.barcode)
recon_x.to_csv(os.path.join(outdir, 'imputed_data.txt'), sep='\t')
print("Plotting embedding")
reference = '%s'%(sys.argv[5])
emb = 'UMAP'
#emb = 'tSNE'
ref = pd.read_csv(reference, sep='\t', header=None, index_col=0)[1]
labels = ref.reindex(dataset.barcode, fill_value='unknown')
X= plot_embedding(feature, labels, method=emb,
save=os.path.join(outdir, 'emb_{}ae.jpg'.format(emb)), save_emb=os.path.join(outdir, 'emb_{}.txt'.format(emb)),return_emb = True)
| [
"torch.nn.ReLU",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"torch.nn.Sequential",
"torch.nn.init.xavier_normal_",
"torch.nn.MSELoss",
"umap.UMAP",
"os.path.exists",
"seaborn.despine",
"numpy.where",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"sklearn.manifold.TSNE",
"os.... | [((10157, 10178), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (10171, 10178), False, 'import matplotlib\n'), ((12748, 12762), 'sklearn.preprocessing.MaxAbsScaler', 'MaxAbsScaler', ([], {}), '()\n', (12760, 12762), False, 'from sklearn.preprocessing import MaxAbsScaler\n'), ((12933, 13000), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(100)', 'shuffle': '(False)', 'drop_last': '(False)'}), '(dataset, batch_size=100, shuffle=False, drop_last=False)\n', (12943, 13000), False, 'from torch.utils.data import DataLoader\n'), ((13015, 13082), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(100)', 'shuffle': '(False)', 'drop_last': '(False)'}), '(dataset, batch_size=100, shuffle=False, drop_last=False)\n', (13025, 13082), False, 'from torch.utils.data import DataLoader\n'), ((13944, 14013), 'pandas.DataFrame', 'pd.DataFrame', (['recon_x.T'], {'index': 'dataset.peaks', 'columns': 'dataset.barcode'}), '(recon_x.T, index=dataset.peaks, columns=dataset.barcode)\n', (13956, 14013), True, 'import pandas as pd\n'), ((2729, 2740), 'time.time', 'time.time', ([], {}), '()\n', (2738, 2740), False, 'import time\n'), ((2749, 2768), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2762, 2768), False, 'import os\n'), ((3323, 3340), 'glob.glob', 'glob', (["(path + '/*')"], {}), "(path + '/*')\n", (3327, 3340), False, 'from glob import glob\n'), ((4215, 4251), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['data.values'], {}), '(data.values)\n', (4238, 4251), False, 'import scipy\n'), ((4343, 4352), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4350, 4352), False, 'from torch import nn, optim\n'), ((4505, 4524), 'torch.nn.Sequential', 'nn.Sequential', (['*net'], {}), '(*net)\n', (4518, 4524), False, 'from torch import nn, optim\n'), ((11059, 11086), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (11069, 11086), True, 'from matplotlib import pyplot as plt\n'), ((12073, 12106), 'seaborn.despine', 'sns.despine', ([], {'offset': '(10)', 'trim': '(True)'}), '(offset=10, trim=True)\n', (12084, 12106), True, 'import seaborn as sns\n'), ((12527, 12547), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (12541, 12547), False, 'import os\n'), ((13777, 13812), 'os.path.join', 'os.path.join', (['outdir', '"""feature.txt"""'], {}), "(outdir, 'feature.txt')\n", (13789, 13812), False, 'import os\n'), ((14030, 14070), 'os.path.join', 'os.path.join', (['outdir', '"""imputed_data.txt"""'], {}), "(outdir, 'imputed_data.txt')\n", (14042, 14070), False, 'import os\n'), ((14190, 14248), 'pandas.read_csv', 'pd.read_csv', (['reference'], {'sep': '"""\t"""', 'header': 'None', 'index_col': '(0)'}), "(reference, sep='\\t', header=None, index_col=0)\n", (14201, 14248), True, 'import pandas as pd\n'), ((2508, 2566), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'self.peaks', 'columns': 'self.barcode'}), '(data, index=self.peaks, columns=self.barcode)\n', (2520, 2566), True, 'import pandas as pd\n'), ((2828, 2848), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2842, 2848), False, 'import os\n'), ((3360, 3386), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (3376, 3386), False, 'import os\n'), ((5077, 5114), 'torch.nn.Linear', 'nn.Linear', (['[z_dim, *h_dim][-1]', 'x_dim'], {}), '([z_dim, *h_dim][-1], x_dim)\n', (5086, 5114), False, 'from torch import nn, optim\n'), ((6261, 6273), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6271, 6273), False, 'from torch import nn, optim\n'), ((9129, 9161), 'os.path.join', 'os.path.join', (['outdir', '"""model.pt"""'], {}), "(outdir, 'model.pt')\n", (9141, 9161), False, 'import os\n'), ((9214, 9228), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (9222, 9228), True, 'import numpy as np\n'), ((10526, 10561), 'numpy.concatenate', 'np.concatenate', (['[X, marker]'], {'axis': '(0)'}), '([X, marker], axis=0)\n', (10540, 10561), True, 'import numpy as np\n'), ((11131, 11148), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (11140, 11148), True, 'import numpy as np\n'), ((11457, 11558), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:N][labels == c, 0]', 'X[:N][labels == c, 1]'], {'s': 'markersize', 'color': 'colors[i]', 'label': 'c'}), '(X[:N][labels == c, 0], X[:N][labels == c, 1], s=markersize,\n color=colors[i], label=c)\n', (11468, 11558), True, 'from matplotlib import pyplot as plt\n'), ((11588, 11665), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[N:, 0]', 'X[N:, 1]'], {'s': '(10 * markersize)', 'color': '"""black"""', 'marker': '"""*"""'}), "(X[N:, 0], X[N:, 1], s=10 * markersize, color='black', marker='*')\n", (11599, 11665), True, 'from matplotlib import pyplot as plt\n'), ((12039, 12067), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '(**legend_params_)\n', (12049, 12067), True, 'from matplotlib import pyplot as plt\n'), ((12141, 12183), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["(method + ' dim 1')"], {'fontsize': '(12)'}), "(method + ' dim 1', fontsize=12)\n", (12151, 12183), True, 'from matplotlib import pyplot as plt\n'), ((12191, 12233), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["(method + ' dim 2')"], {'fontsize': '(12)'}), "(method + ' dim 2', fontsize=12)\n", (12201, 12233), True, 'from matplotlib import pyplot as plt\n'), ((12257, 12309), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {'format': '"""jpg"""', 'bbox_inches': '"""tight"""'}), "(save, format='jpg', bbox_inches='tight')\n", (12268, 12309), True, 'from matplotlib import pyplot as plt\n'), ((12330, 12340), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12338, 12340), True, 'from matplotlib import pyplot as plt\n'), ((12378, 12401), 'numpy.savetxt', 'np.savetxt', (['save_emb', 'X'], {}), '(save_emb, X)\n', (12388, 12401), True, 'import numpy as np\n'), ((12579, 12596), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (12590, 12596), False, 'import os\n'), ((13725, 13769), 'pandas.DataFrame', 'pd.DataFrame', (['feature'], {'index': 'dataset.barcode'}), '(feature, index=dataset.barcode)\n', (13737, 13769), True, 'import pandas as pd\n'), ((1752, 1820), 'numpy.where', 'np.where', (['((count > low * total_cells) & (count < high * total_cells))'], {}), '((count > low * total_cells) & (count < high * total_cells))\n', (1760, 1820), True, 'import numpy as np\n'), ((2591, 2625), 'os.path.join', 'os.path.join', (['path', '"""tmp_data.txt"""'], {}), "(path, 'tmp_data.txt')\n", (2603, 2625), False, 'import os\n'), ((4426, 4461), 'torch.nn.Linear', 'nn.Linear', (['layers[i - 1]', 'layers[i]'], {}), '(layers[i - 1], layers[i])\n', (4435, 4461), False, 'from torch import nn, optim\n'), ((6612, 6643), 'tqdm.trange', 'trange', (['max_iter'], {'disable': '(False)'}), '(max_iter, disable=False)\n', (6618, 6643), False, 'from tqdm import trange\n'), ((4073, 4112), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': 'sep', 'index_col': '(0)'}), '(path, sep=sep, index_col=0)\n', (4084, 4112), True, 'import pandas as pd\n'), ((5852, 5886), 'torch.nn.init.xavier_normal_', 'init.xavier_normal_', (['m.weight.data'], {}), '(m.weight.data)\n', (5871, 5886), False, 'from torch.nn import init\n'), ((8269, 8286), 'torch.cat', 'torch.cat', (['output'], {}), '(output)\n', (8278, 8286), False, 'import torch\n'), ((2100, 2124), 'numpy.sum', 'np.sum', (['(self.data > 0)', '(1)'], {}), '(self.data > 0, 1)\n', (2106, 2124), True, 'import numpy as np\n'), ((3222, 3233), 'time.time', 'time.time', ([], {}), '()\n', (3231, 3233), False, 'import time\n'), ((10702, 10740), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(124)'}), '(n_components=2, random_state=124)\n', (10706, 10740), False, 'from sklearn.manifold import TSNE\n'), ((10840, 10874), 'umap.UMAP', 'UMAP', ([], {'n_neighbors': '(30)', 'min_dist': '(0.1)'}), '(n_neighbors=30, min_dist=0.1)\n', (10844, 10874), False, 'from umap import UMAP\n'), ((10989, 11026), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)', 'random_state': '(124)'}), '(n_components=2, random_state=124)\n', (10992, 11026), False, 'from sklearn.decomposition import PCA\n'), ((3600, 3644), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '"""\t"""', 'header': 'None'}), "(filename, sep='\\t', header=None)\n", (3611, 3644), True, 'import pandas as pd\n'), ((3495, 3511), 'scipy.io.mmread', 'mmread', (['filename'], {}), '(filename)\n', (3501, 3511), False, 'from scipy.io import mmread\n'), ((3734, 3778), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '"""\t"""', 'header': 'None'}), "(filename, sep='\\t', header=None)\n", (3745, 3778), True, 'import pandas as pd\n')] |
# coding: utf-8
# pylint: disable=invalid-name, no-member, too-many-arguments
# pylint: disable=too-many-instance-attributes, too-many-locals
# pylint: disable=arguments-differ
""" dynamic EIT solver using JAC """
# Copyright (c) <NAME>. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division, absolute_import, print_function
import numpy as np
import scipy.linalg as la
from numpy.linalg import multi_dot
from .base import EitBase
class JAC(EitBase):
""" A sensitivity-based EIT imaging class """
def setup(self, p=0.20, lamb=0.001, method='kotre', W=None, Wx=None, xp=None, alpx=0.0):
"""
JAC, default file parser is 'std'
Parameters
----------
p, lamb: float
JAC parameters
method: str
regularization methods
"""
jac = self.J
if W is None:
W = np.eye(jac.shape[0])
if Wx is None:
Wx = np.eye(jac.shape[1])
if xp is None:
xp = np.zeros((jac.shape[1]))
# passing imaging parameters
self.params = {
'p': p,
'lamb': lamb,
'method': method,
'W': W,
'Wx': Wx,
'xp': xp,
'alpx': alpx
}
# pre-compute H0 for dynamical imaging
# H = (J.T*J + R)^(-1) * J.T
self.H = h_matrix(self.J, p, lamb, method, W)
# pre-compute Q for dynamical imaging
# Q = (J.T*W*J + R)^(-1)
self.Q, r_mat = qr_matrix(self.J, p, lamb, W, Wx, alpx, method=method)
self.params['Wx'] = (1-alpx)*r_mat + alpx*Wx
def solve(self, v1, v0, normalize=False):
""" dynamic solve_eit
Parameters
----------
v1: NDArray
current frame
v0: NDArray, optional
referenced frame, d = H(v1 - v0)
normalize: Boolean
true for conducting normalization
Returns
-------
ds: NDArray
complex-valued NDArray, changes of conductivities
"""
if normalize:
dv = self.normalize(v1, v0)
else:
dv = (v1 - v0)
# s = -Hv
jac = self.J
q_mat = self.Q
W = self.params['W']
Wx = self.params['Wx']
lamb = self.params['lamb']
xp = self.params['xp']
h_mat = multi_dot([q_mat, jac.transpose(), W])
p_mat = np.dot(q_mat, Wx)
ds = -np.dot(h_mat, dv) + lamb * np.dot(p_mat, xp)
return ds
def solve_P(self, v1, v0, normalize=False):
""" dynamic solve_eit
Parameters
----------
v1: NDArray
current frame
v0: NDArray, optional
referenced frame, d = H(v1 - v0)
normalize: Boolean
true for conducting normalization
Returns
-------
(xi, eta): Points on L-curve
"""
if normalize:
dv = self.normalize(v1, v0)
else:
dv = (v1 - v0)
# s = -Hv
jac = self.J
h_mat = multi_dot([self.Q, jac.transpose(), self.params['W']])
p_mat = np.dot(self.Q, self.params['Wx'])
ds = -np.dot(h_mat, dv) + self.params['lamb'] * np.dot(p_mat, self.params['xp'])
Ax_minus_y = np.dot(-jac, ds) - dv
x_minus_xp = ds - self.params['xp']
xi = np.log10(multi_dot([np.conjugate(Ax_minus_y).transpose(), self.params['W'], Ax_minus_y]))
eta = np.log10(multi_dot([np.conjugate(x_minus_xp).transpose(), self.params['Wx'], x_minus_xp]))
# xi = multi_dot([np.conjugate(Ax_minus_y).transpose(), self.params['W'], Ax_minus_y])
# eta = multi_dot([np.conjugate(x_minus_xp).transpose(), self.params['Wx'], x_minus_xp])
return (xi, eta)
def solve_G(self, v1, v0, normalize=False):
""" dynamic solve_eit
Parameters
----------
v1: NDArray
current frame
v0: NDArray, optional
referenced frame, d = H(v1 - v0)
normalize: Boolean
true for conducting normalization
Returns
-------
G: GCV value
"""
if normalize:
dv = self.normalize(v1, v0)
else:
dv = (v1 - v0)
b = dv
L1 = np.linalg.cholesky(self.params['W'])
L1_T = L1.transpose()
A = -self.J
A_prime = np.dot(L1_T, A)
b_prime = np.dot(L1_T, dv - np.dot(A, self.params['xp']))
AI_mat = multi_dot([self.Q, A.transpose(), L1])
xreg_prime = np.dot(AI_mat, b_prime)
Ax_minus_y = np.dot(A_prime, xreg_prime) - b_prime # A'x'-b'
numer = np.dot(np.conjugate(Ax_minus_y).transpose(), Ax_minus_y)
Im = np.eye(A_prime.shape[0])
denom = np.trace(Im - np.dot(A_prime, AI_mat)) ** 2
G = numer / denom
return G
def map(self, v):
""" return Hv """
return -np.dot(self.H, v)
def solve_gs(self, v1, v0):
""" solving by weighted frequency """
a = np.dot(v1, v0) / np.dot(v0, v0)
dv = (v1 - a * v0)
ds = -np.dot(self.H, dv)
# return average epsilon on element
return ds
def jt_solve(self, v1, v0, normalize=True):
"""
a 'naive' back projection using the transpose of Jac.
This scheme is the one published by kotre (1989):
[1] <NAME>. (1989).
A sensitivity coefficient method for the reconstruction of
electrical impedance tomograms.
Clinical Physics and Physiological Measurement,
10(3), 275–281. doi:10.1088/0143-0815/10/3/008
The input (dv) and output (ds) is log-normalized
"""
if normalize:
dv = np.log(np.abs(v1) / np.abs(v0)) * np.sign(v0)
else:
dv = (v1 - v0)
# s_r = J^Tv_r
ds = -np.dot(self.J.conj().T, dv)
return np.exp(ds) - 1.0
def gn(self, v, x0=None, maxiter=1, gtol=1e-4, p=None, lamb=None,
lamb_decay=1.0, lamb_min=0, method='kotre', verbose=False):
"""
Gaussian Newton Static Solver
You can use a different p, lamb other than the default ones in setup
Parameters
----------
v: NDArray
boundary measurement
x0: NDArray, optional
initial guess
maxiter: int, optional
number of maximum iterations
p, lamb: float
JAC parameters (can be overridden)
lamb_decay: float
decay of lamb0, i.e., lamb0 = lamb0 * lamb_decay of each iteration
lamb_min: float
minimal value of lamb
method: str, optional
'kotre' or 'lm'
verbose: bool, optional
print debug information
xp: NDArray, optional
prior information
Returns
-------
sigma: NDArray
Complex-valued conductivities
Note
----
Gauss-Newton Iterative solver,
x1 = x0 - (J^TJ + lamb*R)^(-1) * r0
where:
R = diag(J^TJ)**p
r0 (residual) = real_measure - forward_v
"""
from sklearn.metrics import r2_score
if x0 is None:
x0 = self.perm
if p is None:
p = self.params['p']
if lamb is None:
lamb = self.params['lamb']
if method is None:
method = self.params['method']
# convergence test
x0_norm = np.linalg.norm(x0)
convergence = []
r2i = []
xp = self.params['xp']
for i in range(maxiter):
# forward solver
fs = self.fwd.solve_eit(self.ex_mat, step=self.step,
perm=x0, parser=self.parser)
# Residual
r0 = v - fs.v
r1 = x0 - xp
jac = fs.jac
# Damped Gaussian-Newton
h_mat, r_mat = hr_matrix(jac, p, lamb, method)
# update
d_k = np.dot(h_mat, np.dot(jac.transpose(), r0) + lamb * np.dot(r_mat, r1))
x0 = x0 - d_k
# convergence test
c = np.linalg.norm(d_k) / x0_norm
r2 = r2_score(v, fs.v)
convergence.append(c)
r2i.append(r2)
if c < gtol:
break
if verbose:
print('iter = %d, lamb = %f, gtol = %f' % (i, lamb, c))
# update regularization parameter
# TODO: support user defined decreasing order of lambda series
lamb *= lamb_decay
if lamb < lamb_min:
lamb = lamb_min
self.tol = {'convergence': convergence, 'r2': r2i}
return x0
def project(self, ds):
""" project ds using spatial difference filter (deprecated)
Parameters
----------
ds: NDArray
delta sigma (conductivities)
Returns
-------
NDArray
"""
d_mat = sar(self.tri)
return np.dot(d_mat, ds)
def h_matrix(jac, p, lamb, method='kotre', W=None):
"""
JAC method of dynamic EIT solver:
H = (J.T*J + lamb*R)^(-1) * J.T
Parameters
----------
jac: NDArray
Jacobian
p, lamb: float
regularization parameters
method: str, optional
regularization method
Returns
-------
H: NDArray
pseudo-inverse matrix of JAC
"""
if W is None:
j_w_j = np.dot(jac.transpose(), jac)
else:
j_w_j = multi_dot([jac.transpose(), W, jac])
if method == 'kotre':
# see adler-dai-lionheart-2007
# p=0 : noise distribute on the boundary ('dgn')
# p=0.5 : noise distribute on the middle
# p=1 : noise distribute on the center ('lm')
r_mat = np.diag(np.diag(j_w_j)) ** p
elif method == 'lm':
# Marquardt–Levenberg, 'lm' for short
# or can be called NOSER, DLS
r_mat = np.diag(np.diag(j_w_j))
else:
# Damped Gauss Newton, 'dgn' for short
r_mat = np.eye(jac.shape[1])
# build H
h_mat = np.dot(la.inv(j_w_j + lamb * r_mat), jac.transpose())
return h_mat
# def h_matrix(jac, p, lamb, method='kotre'):
# """
# JAC method of dynamic EIT solver:
# H = (J.T*J + lamb*R)^(-1) * J.T
#
# Parameters
# ----------
# jac: NDArray
# Jacobian
# p, lamb: float
# regularization parameters
# method: str, optional
# regularization method
#
# Returns
# -------
# H: NDArray
# pseudo-inverse matrix of JAC
# """
# j_w_j = np.dot(jac.transpose(), jac)
# if method == 'kotre':
# # see adler-dai-lionheart-2007
# # p=0 : noise distribute on the boundary ('dgn')
# # p=0.5 : noise distribute on the middle
# # p=1 : noise distribute on the center ('lm')
# r_mat = np.diag(np.diag(j_w_j))**p
# elif method == 'lm':
# # Marquardt–Levenberg, 'lm' for short
# # or can be called NOSER, DLS
# r_mat = np.diag(np.diag(j_w_j))
# else:
# # <NAME>, 'dgn' for short
# r_mat = np.eye(jac.shape[1])
#
# # build H
# h_mat = np.dot(la.inv(j_w_j + lamb*r_mat), jac.transpose())
# return h_mat
def hr_matrix(jac, p, lamb, method='kotre'):
"""
JAC method of dynamic EIT solver:
H = (J.T*J + lamb*R)^(-1) * J.T
Parameters
----------
jac: NDArray
Jacobian
p, lamb: float
regularization parameters
method: str, optional
regularization method
Returns
-------
H: NDArray
pseudo-inverse matrix of JAC
"""
j_w_j = np.dot(jac.transpose(), jac)
if method == 'kotre':
# see adler-dai-lionheart-2007
# p=0 : noise distribute on the boundary ('dgn')
# p=0.5 : noise distribute on the middle
# p=1 : noise distribute on the center ('lm')
r_mat = np.diag(np.diag(j_w_j)) ** p
elif method == 'lm':
# Marquardt–Levenberg, 'lm' for short
# or can be called NOSER, DLS
r_mat = np.diag(np.diag(j_w_j))
else:
# Damped Gauss Newton, 'dgn' for short
r_mat = np.eye(jac.shape[1])
# build H
h_mat = la.inv(j_w_j + lamb * r_mat)
return h_mat, r_mat
def qr_matrix(jac, p, lamb, W, Wx, alpx, method='kotre'):
"""
JAC method of dynamic EIT solver:
Q = (J.T*W*J + lamb*Wx)^(-1)
Parameters
----------
jac: NDArray
Jacobian
p, lamb: float
regularization parameters
method: str, optional
regularization method
Returns
-------
H: NDArray
pseudo-inverse matrix of JAC
"""
j_w_j = multi_dot([jac.transpose(), W, jac])
if method == 'kotre':
# see adler-dai-lionheart-2007
# p=0 : noise distribute on the boundary ('dgn')
# p=0.5 : noise distribute on the middle
# p=1 : noise distribute on the center ('lm')
r_mat = np.diag(np.diag(j_w_j)) ** p
elif method == 'lm':
# Marquardt–Levenberg, 'lm' for short
# or can be called NOSER, DLS
r_mat = np.diag(np.diag(j_w_j))
else:
# Damped Gauss Newton, 'dgn' for short
r_mat = np.eye(jac.shape[1])
# build Q
q_mat = la.inv(j_w_j + lamb * ((1-alpx)*r_mat + alpx*Wx))
return q_mat, r_mat
def sar(el2no):
"""
extract spatial difference matrix on the neighbors of each element
in 2D fem using triangular mesh.
Parameters
----------
el2no: NDArray
triangle structures
Returns
-------
D: NDArray
SAR matrix
"""
ne = el2no.shape[0]
d_mat = np.eye(ne)
for i in range(ne):
ei = el2no[i, :]
#
i0 = np.argwhere(el2no == ei[0])[:, 0]
i1 = np.argwhere(el2no == ei[1])[:, 0]
i2 = np.argwhere(el2no == ei[2])[:, 0]
idx = np.unique(np.hstack([i0, i1, i2]))
# build row-i
for j in idx:
d_mat[i, j] = -1
nn = idx.size - 1
d_mat[i, i] = nn
return d_mat | [
"numpy.abs",
"numpy.eye",
"numpy.hstack",
"numpy.conjugate",
"numpy.diag",
"numpy.exp",
"numpy.dot",
"numpy.zeros",
"numpy.argwhere",
"numpy.sign",
"numpy.linalg.norm",
"numpy.linalg.cholesky",
"sklearn.metrics.r2_score",
"scipy.linalg.inv"
] | [((12321, 12349), 'scipy.linalg.inv', 'la.inv', (['(j_w_j + lamb * r_mat)'], {}), '(j_w_j + lamb * r_mat)\n', (12327, 12349), True, 'import scipy.linalg as la\n'), ((13370, 13425), 'scipy.linalg.inv', 'la.inv', (['(j_w_j + lamb * ((1 - alpx) * r_mat + alpx * Wx))'], {}), '(j_w_j + lamb * ((1 - alpx) * r_mat + alpx * Wx))\n', (13376, 13425), True, 'import scipy.linalg as la\n'), ((13759, 13769), 'numpy.eye', 'np.eye', (['ne'], {}), '(ne)\n', (13765, 13769), True, 'import numpy as np\n'), ((2477, 2494), 'numpy.dot', 'np.dot', (['q_mat', 'Wx'], {}), '(q_mat, Wx)\n', (2483, 2494), True, 'import numpy as np\n'), ((3197, 3230), 'numpy.dot', 'np.dot', (['self.Q', "self.params['Wx']"], {}), "(self.Q, self.params['Wx'])\n", (3203, 3230), True, 'import numpy as np\n'), ((4346, 4382), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (["self.params['W']"], {}), "(self.params['W'])\n", (4364, 4382), True, 'import numpy as np\n'), ((4452, 4467), 'numpy.dot', 'np.dot', (['L1_T', 'A'], {}), '(L1_T, A)\n', (4458, 4467), True, 'import numpy as np\n'), ((4612, 4635), 'numpy.dot', 'np.dot', (['AI_mat', 'b_prime'], {}), '(AI_mat, b_prime)\n', (4618, 4635), True, 'import numpy as np\n'), ((4794, 4818), 'numpy.eye', 'np.eye', (['A_prime.shape[0]'], {}), '(A_prime.shape[0])\n', (4800, 4818), True, 'import numpy as np\n'), ((7549, 7567), 'numpy.linalg.norm', 'np.linalg.norm', (['x0'], {}), '(x0)\n', (7563, 7567), True, 'import numpy as np\n'), ((9084, 9101), 'numpy.dot', 'np.dot', (['d_mat', 'ds'], {}), '(d_mat, ds)\n', (9090, 9101), True, 'import numpy as np\n'), ((10179, 10207), 'scipy.linalg.inv', 'la.inv', (['(j_w_j + lamb * r_mat)'], {}), '(j_w_j + lamb * r_mat)\n', (10185, 10207), True, 'import scipy.linalg as la\n'), ((941, 961), 'numpy.eye', 'np.eye', (['jac.shape[0]'], {}), '(jac.shape[0])\n', (947, 961), True, 'import numpy as np\n'), ((1002, 1022), 'numpy.eye', 'np.eye', (['jac.shape[1]'], {}), '(jac.shape[1])\n', (1008, 1022), True, 'import numpy as np\n'), ((1063, 1085), 'numpy.zeros', 'np.zeros', (['jac.shape[1]'], {}), '(jac.shape[1])\n', (1071, 1085), True, 'import numpy as np\n'), ((3343, 3359), 'numpy.dot', 'np.dot', (['(-jac)', 'ds'], {}), '(-jac, ds)\n', (3349, 3359), True, 'import numpy as np\n'), ((4658, 4685), 'numpy.dot', 'np.dot', (['A_prime', 'xreg_prime'], {}), '(A_prime, xreg_prime)\n', (4664, 4685), True, 'import numpy as np\n'), ((4989, 5006), 'numpy.dot', 'np.dot', (['self.H', 'v'], {}), '(self.H, v)\n', (4995, 5006), True, 'import numpy as np\n'), ((5098, 5112), 'numpy.dot', 'np.dot', (['v1', 'v0'], {}), '(v1, v0)\n', (5104, 5112), True, 'import numpy as np\n'), ((5115, 5129), 'numpy.dot', 'np.dot', (['v0', 'v0'], {}), '(v0, v0)\n', (5121, 5129), True, 'import numpy as np\n'), ((5171, 5189), 'numpy.dot', 'np.dot', (['self.H', 'dv'], {}), '(self.H, dv)\n', (5177, 5189), True, 'import numpy as np\n'), ((5972, 5982), 'numpy.exp', 'np.exp', (['ds'], {}), '(ds)\n', (5978, 5982), True, 'import numpy as np\n'), ((8262, 8279), 'sklearn.metrics.r2_score', 'r2_score', (['v', 'fs.v'], {}), '(v, fs.v)\n', (8270, 8279), False, 'from sklearn.metrics import r2_score\n'), ((10124, 10144), 'numpy.eye', 'np.eye', (['jac.shape[1]'], {}), '(jac.shape[1])\n', (10130, 10144), True, 'import numpy as np\n'), ((12273, 12293), 'numpy.eye', 'np.eye', (['jac.shape[1]'], {}), '(jac.shape[1])\n', (12279, 12293), True, 'import numpy as np\n'), ((13322, 13342), 'numpy.eye', 'np.eye', (['jac.shape[1]'], {}), '(jac.shape[1])\n', (13328, 13342), True, 'import numpy as np\n'), ((13842, 13869), 'numpy.argwhere', 'np.argwhere', (['(el2no == ei[0])'], {}), '(el2no == ei[0])\n', (13853, 13869), True, 'import numpy as np\n'), ((13889, 13916), 'numpy.argwhere', 'np.argwhere', (['(el2no == ei[1])'], {}), '(el2no == ei[1])\n', (13900, 13916), True, 'import numpy as np\n'), ((13936, 13963), 'numpy.argwhere', 'np.argwhere', (['(el2no == ei[2])'], {}), '(el2no == ei[2])\n', (13947, 13963), True, 'import numpy as np\n'), ((13994, 14017), 'numpy.hstack', 'np.hstack', (['[i0, i1, i2]'], {}), '([i0, i1, i2])\n', (14003, 14017), True, 'import numpy as np\n'), ((2509, 2526), 'numpy.dot', 'np.dot', (['h_mat', 'dv'], {}), '(h_mat, dv)\n', (2515, 2526), True, 'import numpy as np\n'), ((2536, 2553), 'numpy.dot', 'np.dot', (['p_mat', 'xp'], {}), '(p_mat, xp)\n', (2542, 2553), True, 'import numpy as np\n'), ((3246, 3263), 'numpy.dot', 'np.dot', (['h_mat', 'dv'], {}), '(h_mat, dv)\n', (3252, 3263), True, 'import numpy as np\n'), ((3288, 3320), 'numpy.dot', 'np.dot', (['p_mat', "self.params['xp']"], {}), "(p_mat, self.params['xp'])\n", (3294, 3320), True, 'import numpy as np\n'), ((4504, 4532), 'numpy.dot', 'np.dot', (['A', "self.params['xp']"], {}), "(A, self.params['xp'])\n", (4510, 4532), True, 'import numpy as np\n'), ((5839, 5850), 'numpy.sign', 'np.sign', (['v0'], {}), '(v0)\n', (5846, 5850), True, 'import numpy as np\n'), ((8215, 8234), 'numpy.linalg.norm', 'np.linalg.norm', (['d_k'], {}), '(d_k)\n', (8229, 8234), True, 'import numpy as np\n'), ((9881, 9895), 'numpy.diag', 'np.diag', (['j_w_j'], {}), '(j_w_j)\n', (9888, 9895), True, 'import numpy as np\n'), ((10035, 10049), 'numpy.diag', 'np.diag', (['j_w_j'], {}), '(j_w_j)\n', (10042, 10049), True, 'import numpy as np\n'), ((12030, 12044), 'numpy.diag', 'np.diag', (['j_w_j'], {}), '(j_w_j)\n', (12037, 12044), True, 'import numpy as np\n'), ((12184, 12198), 'numpy.diag', 'np.diag', (['j_w_j'], {}), '(j_w_j)\n', (12191, 12198), True, 'import numpy as np\n'), ((13079, 13093), 'numpy.diag', 'np.diag', (['j_w_j'], {}), '(j_w_j)\n', (13086, 13093), True, 'import numpy as np\n'), ((13233, 13247), 'numpy.diag', 'np.diag', (['j_w_j'], {}), '(j_w_j)\n', (13240, 13247), True, 'import numpy as np\n'), ((4730, 4754), 'numpy.conjugate', 'np.conjugate', (['Ax_minus_y'], {}), '(Ax_minus_y)\n', (4742, 4754), True, 'import numpy as np\n'), ((4849, 4872), 'numpy.dot', 'np.dot', (['A_prime', 'AI_mat'], {}), '(A_prime, AI_mat)\n', (4855, 4872), True, 'import numpy as np\n'), ((5812, 5822), 'numpy.abs', 'np.abs', (['v1'], {}), '(v1)\n', (5818, 5822), True, 'import numpy as np\n'), ((5825, 5835), 'numpy.abs', 'np.abs', (['v0'], {}), '(v0)\n', (5831, 5835), True, 'import numpy as np\n'), ((8122, 8139), 'numpy.dot', 'np.dot', (['r_mat', 'r1'], {}), '(r_mat, r1)\n', (8128, 8139), True, 'import numpy as np\n'), ((3443, 3467), 'numpy.conjugate', 'np.conjugate', (['Ax_minus_y'], {}), '(Ax_minus_y)\n', (3455, 3467), True, 'import numpy as np\n'), ((3547, 3571), 'numpy.conjugate', 'np.conjugate', (['x_minus_xp'], {}), '(x_minus_xp)\n', (3559, 3571), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.distributed.fleet.base.role_maker as role_maker
import paddle.distributed.fleet as fleet
import paddle.fluid as fluid
import unittest
import paddle.nn.functional as F
import numpy as np
paddle.enable_static()
def gen_data():
return {
"x": np.random.random(size=(128, 32)).astype('float32'),
"y": np.random.randint(
2, size=(128, 1)).astype('int64')
}
def mlp(input_x, input_y, hid_dim=128, label_dim=2):
fc_1 = paddle.static.nn.fc(x=input_x, size=hid_dim, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim, activation='tanh')
prediction = paddle.static.nn.fc(x=[fc_2],
size=label_dim,
activation='softmax')
cost = F.cross_entropy(input=prediction, label=input_y)
avg_cost = paddle.mean(x=cost)
return avg_cost
class TestFleetAMPInit(unittest.TestCase):
def test_fleet_amp_init(self):
if not fluid.core.is_compiled_with_cuda():
return
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
with paddle.static.program_guard(main_program, startup_program):
input_x = paddle.static.data(
name="x", shape=[None, 32], dtype='float32')
input_y = paddle.static.data(
name="y", shape=[None, 1], dtype='int64')
cost = mlp(input_x, input_y)
optimizer = paddle.optimizer.Momentum(
learning_rate=0.001,
momentum=0.9,
weight_decay=fluid.regularizer.L2Decay(1e-4),
multi_precision=True)
optimizer = paddle.static.amp.decorate(optimizer)
optimizer = fleet.distributed_optimizer(optimizer)
optimizer.minimize(cost)
place = paddle.CUDAPlace(0)
exe = paddle.static.Executor(place)
exe.run(startup_program)
optimizer.amp_init(place)
step = 1
for i in range(step):
cost_val = exe.run(program=main_program,
feed=gen_data(),
fetch_list=[cost.name])
def test_fleet_amp_meta_optimizer_init(self):
if not fluid.core.is_compiled_with_cuda():
return
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
with paddle.static.program_guard(main_program, startup_program):
input_x = paddle.static.data(
name="x", shape=[None, 32], dtype='float32')
input_y = paddle.static.data(
name="y", shape=[None, 1], dtype='int64')
cost = mlp(input_x, input_y)
optimizer = paddle.optimizer.Momentum(
learning_rate=0.001,
momentum=0.9,
weight_decay=fluid.regularizer.L2Decay(1e-4),
multi_precision=True)
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.amp = True
strategy.amp_configs = {'use_pure_fp16': True}
strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 2}
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(cost)
print(fleet._get_applied_meta_list())
place = paddle.CUDAPlace(0)
exe = paddle.static.Executor(place)
exe.run(startup_program)
optimizer.amp_init(place)
step = 3
for i in range(step):
cost_val = exe.run(program=main_program,
feed=gen_data(),
fetch_list=[cost.name])
print(cost_val)
if __name__ == '__main__':
unittest.main()
| [
"paddle.distributed.fleet.init",
"paddle.distributed.fleet.DistributedStrategy",
"paddle.nn.functional.cross_entropy",
"paddle.mean",
"unittest.main",
"paddle.distributed.fleet._get_applied_meta_list",
"numpy.random.random",
"paddle.enable_static",
"paddle.fluid.core.is_compiled_with_cuda",
"paddl... | [((828, 850), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (848, 850), False, 'import paddle\n'), ((1097, 1160), 'paddle.static.nn.fc', 'paddle.static.nn.fc', ([], {'x': 'input_x', 'size': 'hid_dim', 'activation': '"""tanh"""'}), "(x=input_x, size=hid_dim, activation='tanh')\n", (1116, 1160), False, 'import paddle\n'), ((1172, 1232), 'paddle.static.nn.fc', 'paddle.static.nn.fc', ([], {'x': 'fc_1', 'size': 'hid_dim', 'activation': '"""tanh"""'}), "(x=fc_1, size=hid_dim, activation='tanh')\n", (1191, 1232), False, 'import paddle\n'), ((1250, 1317), 'paddle.static.nn.fc', 'paddle.static.nn.fc', ([], {'x': '[fc_2]', 'size': 'label_dim', 'activation': '"""softmax"""'}), "(x=[fc_2], size=label_dim, activation='softmax')\n", (1269, 1317), False, 'import paddle\n'), ((1403, 1451), 'paddle.nn.functional.cross_entropy', 'F.cross_entropy', ([], {'input': 'prediction', 'label': 'input_y'}), '(input=prediction, label=input_y)\n', (1418, 1451), True, 'import paddle.nn.functional as F\n'), ((1467, 1486), 'paddle.mean', 'paddle.mean', ([], {'x': 'cost'}), '(x=cost)\n', (1478, 1486), False, 'import paddle\n'), ((4588, 4603), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4601, 4603), False, 'import unittest\n'), ((1681, 1704), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (1702, 1704), False, 'import paddle\n'), ((1731, 1754), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (1752, 1754), False, 'import paddle\n'), ((1771, 1822), 'paddle.distributed.fleet.base.role_maker.PaddleCloudRoleMaker', 'role_maker.PaddleCloudRoleMaker', ([], {'is_collective': '(True)'}), '(is_collective=True)\n', (1802, 1822), True, 'import paddle.distributed.fleet.base.role_maker as role_maker\n'), ((1831, 1847), 'paddle.distributed.fleet.init', 'fleet.init', (['role'], {}), '(role)\n', (1841, 1847), True, 'import paddle.distributed.fleet as fleet\n'), ((2565, 2584), 'paddle.CUDAPlace', 'paddle.CUDAPlace', (['(0)'], {}), '(0)\n', (2581, 2584), False, 'import paddle\n'), ((2600, 2629), 'paddle.static.Executor', 'paddle.static.Executor', (['place'], {}), '(place)\n', (2622, 2629), False, 'import paddle\n'), ((3046, 3069), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (3067, 3069), False, 'import paddle\n'), ((3096, 3119), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (3117, 3119), False, 'import paddle\n'), ((3136, 3187), 'paddle.distributed.fleet.base.role_maker.PaddleCloudRoleMaker', 'role_maker.PaddleCloudRoleMaker', ([], {'is_collective': '(True)'}), '(is_collective=True)\n', (3167, 3187), True, 'import paddle.distributed.fleet.base.role_maker as role_maker\n'), ((3196, 3212), 'paddle.distributed.fleet.init', 'fleet.init', (['role'], {}), '(role)\n', (3206, 3212), True, 'import paddle.distributed.fleet as fleet\n'), ((4191, 4210), 'paddle.CUDAPlace', 'paddle.CUDAPlace', (['(0)'], {}), '(0)\n', (4207, 4210), False, 'import paddle\n'), ((4226, 4255), 'paddle.static.Executor', 'paddle.static.Executor', (['place'], {}), '(place)\n', (4248, 4255), False, 'import paddle\n'), ((1602, 1636), 'paddle.fluid.core.is_compiled_with_cuda', 'fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (1634, 1636), True, 'import paddle.fluid as fluid\n'), ((1862, 1920), 'paddle.static.program_guard', 'paddle.static.program_guard', (['main_program', 'startup_program'], {}), '(main_program, startup_program)\n', (1889, 1920), False, 'import paddle\n'), ((1944, 2007), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""x"""', 'shape': '[None, 32]', 'dtype': '"""float32"""'}), "(name='x', shape=[None, 32], dtype='float32')\n", (1962, 2007), False, 'import paddle\n'), ((2047, 2107), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""y"""', 'shape': '[None, 1]', 'dtype': '"""int64"""'}), "(name='y', shape=[None, 1], dtype='int64')\n", (2065, 2107), False, 'import paddle\n'), ((2410, 2447), 'paddle.static.amp.decorate', 'paddle.static.amp.decorate', (['optimizer'], {}), '(optimizer)\n', (2436, 2447), False, 'import paddle\n'), ((2472, 2510), 'paddle.distributed.fleet.distributed_optimizer', 'fleet.distributed_optimizer', (['optimizer'], {}), '(optimizer)\n', (2499, 2510), True, 'import paddle.distributed.fleet as fleet\n'), ((2967, 3001), 'paddle.fluid.core.is_compiled_with_cuda', 'fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (2999, 3001), True, 'import paddle.fluid as fluid\n'), ((3227, 3285), 'paddle.static.program_guard', 'paddle.static.program_guard', (['main_program', 'startup_program'], {}), '(main_program, startup_program)\n', (3254, 3285), False, 'import paddle\n'), ((3309, 3372), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""x"""', 'shape': '[None, 32]', 'dtype': '"""float32"""'}), "(name='x', shape=[None, 32], dtype='float32')\n", (3327, 3372), False, 'import paddle\n'), ((3412, 3472), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""y"""', 'shape': '[None, 1]', 'dtype': '"""int64"""'}), "(name='y', shape=[None, 1], dtype='int64')\n", (3430, 3472), False, 'import paddle\n'), ((3774, 3820), 'paddle.distributed.fleet.DistributedStrategy', 'paddle.distributed.fleet.DistributedStrategy', ([], {}), '()\n', (3818, 3820), False, 'import paddle\n'), ((4041, 4089), 'paddle.distributed.fleet.distributed_optimizer', 'fleet.distributed_optimizer', (['optimizer', 'strategy'], {}), '(optimizer, strategy)\n', (4068, 4089), True, 'import paddle.distributed.fleet as fleet\n'), ((4142, 4172), 'paddle.distributed.fleet._get_applied_meta_list', 'fleet._get_applied_meta_list', ([], {}), '()\n', (4170, 4172), True, 'import paddle.distributed.fleet as fleet\n'), ((895, 927), 'numpy.random.random', 'np.random.random', ([], {'size': '(128, 32)'}), '(size=(128, 32))\n', (911, 927), True, 'import numpy as np\n'), ((960, 995), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(128, 1)'}), '(2, size=(128, 1))\n', (977, 995), True, 'import numpy as np\n'), ((2314, 2347), 'paddle.fluid.regularizer.L2Decay', 'fluid.regularizer.L2Decay', (['(0.0001)'], {}), '(0.0001)\n', (2339, 2347), True, 'import paddle.fluid as fluid\n'), ((3679, 3712), 'paddle.fluid.regularizer.L2Decay', 'fluid.regularizer.L2Decay', (['(0.0001)'], {}), '(0.0001)\n', (3704, 3712), True, 'import paddle.fluid as fluid\n')] |
"""
training helpers for segmentation
ported from: https://github.com/bfortuner/pytorch_tiramisu
"""
import os
import sys
import math
import string
import random
import shutil
import numpy as np
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.autograd import Variable
import torch.nn.functional as F
from . import imgs as img_utils
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform(m.weight)
m.bias.data.zero_()
def predict(model, input_loader, n_batches=1):
input_loader.batch_size = 1
predictions = []
model.eval()
for input, target in input_loader:
data = Variable(input.cuda(), volatile=True)
label = Variable(target.cuda())
output = model(data)
pred = get_predictions(output)
predictions.append([input,target,pred])
return predictions
def view_sample_predictions(model, loader, n):
inputs, targets = next(iter(loader))
data = Variable(inputs.cuda(), volatile=True)
label = Variable(targets.cuda())
output = model(data)
pred = get_predictions(output)
batch_size = inputs.size(0)
for i in range(min(n, batch_size)):
img_utils.view_image(inputs[i])
img_utils.view_annotated(targets[i])
img_utils.view_annotated(pred[i])
def get_predictions(output_batch):
bs,c,h,w = output_batch.size()
tensor = output_batch.data
values, indices = tensor.cpu().max(1)
indices = indices.view(bs,h,w)
return indices
def train(model, trn_loader, optimizer, criterion):
model.train()
trn_loss = 0
trn_error = 0
for idx, (inputs, targets) in enumerate(trn_loader):
inputs = inputs.cuda(non_blocking = True)
targets = targets.cuda(non_blocking = True)
optimizer.zero_grad()
loss_dict = criterion(model, inputs, targets)
loss, output = loss_dict['loss'], loss_dict['output']
loss.backward()
optimizer.step()
trn_loss += loss.item()
_, _, trn_acc_curr = numpy_metrics(output.data.cpu().numpy(), targets.data.cpu().numpy())
trn_error += (1 - trn_acc_curr)
trn_loss /= len(trn_loader)
trn_error /= len(trn_loader)
return trn_loss, trn_error
def test(model, test_loader, criterion, num_classes = 11, return_outputs = False, return_scale = False):
model.eval()
with torch.no_grad():
test_loss = 0
test_error = 0
I_tot = np.zeros(num_classes)
U_tot = np.zeros(num_classes)
if return_outputs:
output_list = []
target_list = []
scale_list = []
for data, target in test_loader:
data = data.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = model(data)
loss_dict = criterion(model, data, target)
loss, output = loss_dict['loss'], loss_dict['output']
#test_loss += masked_loss(output, target, criterion)
test_loss += loss
I, U, acc = numpy_metrics(output.cpu().numpy(), target.cpu().numpy(), n_classes=11, void_labels=[11])
I_tot += I
U_tot += U
test_error += (1 - acc)
if return_outputs:
output_list.append(output.cpu().numpy())
target_list.append(target.cpu().numpy())
if return_scale:
scale_list.append(loss_dict['scale'].cpu().numpy())
test_loss /= len(test_loader)
test_error /= len(test_loader)
m_jacc = np.mean(I_tot / U_tot)
if not return_outputs:
return test_loss, test_error, m_jacc
else:
return test_loss, test_error, m_jacc, {'outputs': output_list, 'targets': target_list, 'scales': scale_list}
def numpy_metrics(y_pred, y_true, n_classes = 11, void_labels=[11]):
"""
Similar to theano_metrics to metrics but instead y_pred and y_true are now numpy arrays
from: https://github.com/SimJeg/FC-DenseNet/blob/master/metrics.py
void label is 11 by default
"""
# Put y_pred and y_true under the same shape
y_pred = np.argmax(y_pred, axis=1)
# We use not_void in case the prediction falls in the void class of the groundtruth
not_void = ~ np.any([y_true == label for label in void_labels], axis=0)
I = np.zeros(n_classes)
U = np.zeros(n_classes)
for i in range(n_classes):
y_true_i = y_true == i
y_pred_i = y_pred == i
I[i] = np.sum(y_true_i & y_pred_i)
U[i] = np.sum((y_true_i | y_pred_i) & not_void)
accuracy = np.sum(I) / np.sum(not_void)
return I, U, accuracy
| [
"numpy.mean",
"numpy.argmax",
"numpy.any",
"torch.nn.init.kaiming_uniform",
"numpy.zeros",
"numpy.sum",
"torch.no_grad"
] | [((4243, 4268), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (4252, 4268), True, 'import numpy as np\n'), ((4443, 4462), 'numpy.zeros', 'np.zeros', (['n_classes'], {}), '(n_classes)\n', (4451, 4462), True, 'import numpy as np\n'), ((4471, 4490), 'numpy.zeros', 'np.zeros', (['n_classes'], {}), '(n_classes)\n', (4479, 4490), True, 'import numpy as np\n'), ((508, 541), 'torch.nn.init.kaiming_uniform', 'nn.init.kaiming_uniform', (['m.weight'], {}), '(m.weight)\n', (531, 541), True, 'import torch.nn as nn\n'), ((2471, 2486), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2484, 2486), False, 'import torch\n'), ((2549, 2570), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (2557, 2570), True, 'import numpy as np\n'), ((2587, 2608), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (2595, 2608), True, 'import numpy as np\n'), ((3660, 3682), 'numpy.mean', 'np.mean', (['(I_tot / U_tot)'], {}), '(I_tot / U_tot)\n', (3667, 3682), True, 'import numpy as np\n'), ((4375, 4435), 'numpy.any', 'np.any', (['[(y_true == label) for label in void_labels]'], {'axis': '(0)'}), '([(y_true == label) for label in void_labels], axis=0)\n', (4381, 4435), True, 'import numpy as np\n'), ((4601, 4628), 'numpy.sum', 'np.sum', (['(y_true_i & y_pred_i)'], {}), '(y_true_i & y_pred_i)\n', (4607, 4628), True, 'import numpy as np\n'), ((4644, 4684), 'numpy.sum', 'np.sum', (['((y_true_i | y_pred_i) & not_void)'], {}), '((y_true_i | y_pred_i) & not_void)\n', (4650, 4684), True, 'import numpy as np\n'), ((4701, 4710), 'numpy.sum', 'np.sum', (['I'], {}), '(I)\n', (4707, 4710), True, 'import numpy as np\n'), ((4713, 4729), 'numpy.sum', 'np.sum', (['not_void'], {}), '(not_void)\n', (4719, 4729), True, 'import numpy as np\n')] |
## examine the effect of acquire_time and variation of dark current
# fix total exposure time and changing acquire time to observe the
# quality of PDF.
import os
import numpy as np
import matplotlib.pyplot as plt
import tifffile as tif
round_num = input('Which round is this test? ')
acq_time_list = [0.1, 0.5, 1., 3., 5., 10.] # time per frame
total_exp_time = 300 # adjust based on sample
glbl.dk_window = 0.01 # make sure everytime collect a dark
ex = Experiment('0630test', bt)
sp_str = 'ct_'+ str(total_exp_time)
ScanPlan(sp_str)
for num in acq_time_list:
print('collecting {} over list {}'.format(num, acq_time_list))
glbl.frame_acq_time = num # change aquire time first
sample_str = 'acq_time_'+str(num) # make new sample, so tiff name updates
Sample(sample_str, ex)
prun(sample_str, sp_str)
Tim_save_tiff(db[-1])
# plot and save dark current intensity
dark_files = [fn for fn in os.listdir(glbl.tiff_base) if
fn.startswith('dark')]
dark_img_container = []
dark_img_int_container = []
for el in dark_files:
dark_img = tif.imread(os.path.join(glbl.tiff_base, el))
dark_img_container.append(dark_img)
dark_img_int_container.append(np.sum(np.sum(dark_img)))
np.save('dark_img_round={}'.format(round_num), dark_img_container)
if os.path.isfile('dark_img_round={}'.format(round_num)):
print("dark_img_round={} has been saved in current dir"
.format(round_num))
np.save('dark_int_round={}'.format(round_num), dark_img_int_container)
if os.path.isfile('dark_int_round={}'.format(round_num)):
print("dark_int_round={} has been saved in current dir"
.format(round_num))
fig = plt.figure()
plt.plot(acq_time_list, dark_img_int_container)
plt.show()
# load raw files just in case
raw_files = [fn for fn in os.listdir(glbl.tiff_base) if
fn.startswith('raw')]
raw_img_container = []
for el in raw_files:
raw_img = tif.imread(os.path.join(glbl.tiff_base, el))
raw_img_container.append(raw_img)
np.save('raw_img_round={}'.format(round_num), raw_img_container)
if os.path.isfile('raw_img_round={}'.format(round_num)):
print("raw_img_round={} has been saved in current dir"
.format(round_num))
# refinement....
# starting the second round of this loop by doing ``run -i test.py``
# again and compare data quality / dark current value to the first round
# Also see if we can apply dark with the same acquire time but from different
# rounds.
###### function used ######
W_DIR = glbl.tiff_base
_fname_field = ['sa_name','sp_name']
def _feature_gen(header):
''' generate a human readable file name.
file name is generated by metadata information in header
'''
uid = header.start.uid[:6]
feature_list = []
field = header['start']
for key in _fname_field:
# get special label
try:
if header.start['xp_isdark']:
feature_list.append('dark')
except KeyError:
pass
try:
el = field[key]
# truncate string length
if len(el)>12:
value = el[:12]
else:
value = el
# clear space
feature = [ ch for ch in list(el) if ch!=' ']
feature_list.append(''.join(feature))
except KeyError:
pass # protection to allow missing required fields. This should not happen
feature_list.append(uid)
f_name = "_".join(feature_list)
return f_name
def _timestampstr(timestamp):
''' convert timestamp to strftime formate '''
timestring = datetime.datetime.fromtimestamp(float(timestamp)).strftime('%Y%m%d-%H%M')
return timestring
def Tim_save_tiff(headers, dark_subtraction=True, *, max_count=None):
''' save images obtained from dataBroker as tiff format files.
Parameters
----------
headers : list
a list of header objects obtained from a query to dataBroker
dark_subtraction : bool, optional
Default is True, which allows dark/background subtraction to
be done before saving each image. If header doesn't contain
necessary information to perform dark subtraction, uncorrected
image will be saved.
max_count : int, optional
The maximum number of events to process per-run. This can be
useful to 'preview' an export or if there are corrupted files
in the data stream (ex from the IOC crashing during data acquisition).
'''
F_EXTEN = '.tif' # request from beamline scientist. No difference actually.
e = '''Can not find a proper dark image applied to this header.
Files will be saved but not no dark subtraction will be applied'''
is_dark_subtracted = False # Flip it only if subtraction is successfully done
# prepare header
if type(list(headers)[1]) == str:
header_list = list()
header_list.append(headers)
else:
header_list = headers
for header in header_list:
print('Saving your image(s) now....')
# information at header level
img_field = _identify_image_field(header)
dark_img = None
if 'sc_dk_field_uid' not in header.start:
warnings.warn("Requested to do dark correction, but header does "
"not contain a 'dk_field_uid' entry. "
"Disabling dark subtraction.")
dark_subtraction = False
if dark_subtraction:
dark_uid_appended = header.start['sc_dk_field_uid']
try:
# bluesky only looks for uid it defines
dark_search = {'group': 'XPD',
'sc_dark_uid': dark_uid_appended} # the one we need to look up data
dark_header = db(**dark_search)
dark_img = np.asarray(get_images(dark_header,
img_field)).squeeze()
except ValueError:
print(e) # protection. Should not happen
warnings.warn("Requested to do dark correction, but "
"extracting the dark image failed. Proceeding "
"without correction.")
for ev in get_events(header, fill=True):
img = ev['data'][img_field]
ind = ev['seq_num']
f_name = _feature_gen(header)
# time when triggering area detector
event_timestamp = ev['timestamps']['pe1_image']
f_name = '_'.join([f_name, _timestampstr(event_timestamp)])
# dark subtration logic
if dark_img is not None:
img -= dark_img
# add prefix if subtracted
f_name = 'sub_' + f_name
# complete file name
if 'temperature' in ev['data']:
f_name = f_name + '_' + str(ev['data']['temperature']) + 'K'
# index is still needed as we don't want timestamp in file
# name down to seconds
combind_f_name = '{}_{:05d}{}'.format(f_name, ind, F_EXTEN)
w_name = os.path.join(W_DIR, combind_f_name)
dark_w_name = os.path.join(W_DIR, 'dark'+combind_f_name)
raw_w_name = os.path.join(W_DIR, 'raw'+combind_f_name)
tif.imsave(w_name, img) # subtracted
tif.imsave(dark_w_name, dark_img) # dark image
tif.imsave(raw_w_name, dark_img + img) # raw image
if os.path.isfile(w_name):
print('subtracted image "%s" has been saved at "%s"' %
(combind_f_name, W_DIR))
else:
print('Sorry, something went wrong with your tif saving')
return
if os.path.isfile(dark_w_name):
print('dark image "%s" has been saved at "%s"' %
(os.path.basename(dark_w_name), W_DIR))
if os.path.isfile(raw_w_name):
print('raw image "%s" has been saved at "%s"' %
(os.path.basename(raw_w_name), W_DIR))
if max_count is not None and ind >= max_count:
# break the loop if max_count reached, move to next header
break
print('||********Saving process FINISHED********||')
| [
"os.listdir",
"matplotlib.pyplot.plot",
"os.path.join",
"os.path.isfile",
"numpy.sum",
"matplotlib.pyplot.figure",
"os.path.basename",
"tifffile.imsave",
"matplotlib.pyplot.show"
] | [((1670, 1682), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1680, 1682), True, 'import matplotlib.pyplot as plt\n'), ((1683, 1730), 'matplotlib.pyplot.plot', 'plt.plot', (['acq_time_list', 'dark_img_int_container'], {}), '(acq_time_list, dark_img_int_container)\n', (1691, 1730), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1741), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1739, 1741), True, 'import matplotlib.pyplot as plt\n'), ((925, 951), 'os.listdir', 'os.listdir', (['glbl.tiff_base'], {}), '(glbl.tiff_base)\n', (935, 951), False, 'import os\n'), ((1092, 1124), 'os.path.join', 'os.path.join', (['glbl.tiff_base', 'el'], {}), '(glbl.tiff_base, el)\n', (1104, 1124), False, 'import os\n'), ((1800, 1826), 'os.listdir', 'os.listdir', (['glbl.tiff_base'], {}), '(glbl.tiff_base)\n', (1810, 1826), False, 'import os\n'), ((1935, 1967), 'os.path.join', 'os.path.join', (['glbl.tiff_base', 'el'], {}), '(glbl.tiff_base, el)\n', (1947, 1967), False, 'import os\n'), ((1207, 1223), 'numpy.sum', 'np.sum', (['dark_img'], {}), '(dark_img)\n', (1213, 1223), True, 'import numpy as np\n'), ((7106, 7141), 'os.path.join', 'os.path.join', (['W_DIR', 'combind_f_name'], {}), '(W_DIR, combind_f_name)\n', (7118, 7141), False, 'import os\n'), ((7168, 7212), 'os.path.join', 'os.path.join', (['W_DIR', "('dark' + combind_f_name)"], {}), "(W_DIR, 'dark' + combind_f_name)\n", (7180, 7212), False, 'import os\n'), ((7236, 7279), 'os.path.join', 'os.path.join', (['W_DIR', "('raw' + combind_f_name)"], {}), "(W_DIR, 'raw' + combind_f_name)\n", (7248, 7279), False, 'import os\n'), ((7290, 7313), 'tifffile.imsave', 'tif.imsave', (['w_name', 'img'], {}), '(w_name, img)\n', (7300, 7313), True, 'import tifffile as tif\n'), ((7339, 7372), 'tifffile.imsave', 'tif.imsave', (['dark_w_name', 'dark_img'], {}), '(dark_w_name, dark_img)\n', (7349, 7372), True, 'import tifffile as tif\n'), ((7398, 7436), 'tifffile.imsave', 'tif.imsave', (['raw_w_name', '(dark_img + img)'], {}), '(raw_w_name, dark_img + img)\n', (7408, 7436), True, 'import tifffile as tif\n'), ((7464, 7486), 'os.path.isfile', 'os.path.isfile', (['w_name'], {}), '(w_name)\n', (7478, 7486), False, 'import os\n'), ((7736, 7763), 'os.path.isfile', 'os.path.isfile', (['dark_w_name'], {}), '(dark_w_name)\n', (7750, 7763), False, 'import os\n'), ((7907, 7933), 'os.path.isfile', 'os.path.isfile', (['raw_w_name'], {}), '(raw_w_name)\n', (7921, 7933), False, 'import os\n'), ((7853, 7882), 'os.path.basename', 'os.path.basename', (['dark_w_name'], {}), '(dark_w_name)\n', (7869, 7882), False, 'import os\n'), ((8022, 8050), 'os.path.basename', 'os.path.basename', (['raw_w_name'], {}), '(raw_w_name)\n', (8038, 8050), False, 'import os\n')] |
import os,sys
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import numpy as np
from collections import defaultdict
from itertools import dropwhile
import nltk
import string
from nltk import word_tokenize
wids_global = defaultdict(lambda: len(wids_global))
class vqa_dataset(Dataset):
# Dataset for utterances and types
def __init__(self, file, train_flag, wids=None):
self.utts = []
self.types = []
if train_flag:
wids = wids_global
wids['_PAD'] = 0
wids['<sos>'] = 1
wids['<eos>'] = 2
wids['UNK'] = 3
word_tokens = self.remove_rarewords(file, 10)
#sys.exit()
f = open(file)
c = 0
for line in f:
c+=1
if c > 5000000000000:
#For debugging, faster to load just 10 lines
continue
line = word_tokenize(line.split('\n')[0])
for i, w in enumerate(line):
if not train_flag: # Validation Mode / Testing Mode
if w in wids and w in word_tokens:
pass
else:
line[i] = 'UNK'
elif train_flag and w in word_tokens: # Training Mode and not rare word
wid = wids[w]
else: # Training mode but rare word
line[i] = 'UNK'
self.utts.append([1] + [wids[w] for w in line] + [2])
self.types.append(self.get_type(line))
def remove_rarewords(self, file, freq):
words = defaultdict(int)
freq = int(freq)
c = 0
f = open(file)
for line in f:
c += 1
if c > 50000000: # For debugging, faster to load just 10 lines
continue
line = line.split('\n')[0]
line = word_tokenize(line) + ['_PAD'] + ['<sos>'] + ['<eos>'] + ["UNK"] # Punctuation and stuff
#print(line)
for w in line:
words[w] += 1
for k in list(words):
#print(k, words[k])
if words[k] < freq:
##print("Deleting")
##print('\n')
del words[k]
f.close()
return words
def __len__(self):
return len(self.utts)
def __getitem__(self, item):
return self.utts[item], self.types[item]
def get_wids():
return wids_global
def get_type(self,line):
if line[0] == "Is" or line[0] == 'Are':
return 0
elif line[0] == "How" and line[1] == "many":
return 1
else:
return 2
def collate_fn(batch):
"""Create batch"""
#print(batch)
input_lengths = [len(x[0]) for x in batch]
max_input_len = np.max(input_lengths) + 1
# Add single zeros frame at least, so plus 1
max_target_len = np.max([len(x[0]) for x in batch]) + 1
a = np.array( [ _pad(x[0], max_input_len) for x in batch ], dtype=np.int)
b = np.array( [ x[1] for x in batch ], dtype=np.int)
a_batch = torch.LongTensor(a)
b_batch = torch.LongTensor(b)
input_lengths = torch.LongTensor(input_lengths)
return a_batch, b_batch
def _pad(seq, max_len):
return np.pad(seq, (0, max_len - len(seq)),
mode='constant', constant_values=0)
| [
"nltk.word_tokenize",
"torch.LongTensor",
"numpy.max",
"numpy.array",
"collections.defaultdict"
] | [((2959, 3004), 'numpy.array', 'np.array', (['[x[1] for x in batch]'], {'dtype': 'np.int'}), '([x[1] for x in batch], dtype=np.int)\n', (2967, 3004), True, 'import numpy as np\n'), ((3022, 3041), 'torch.LongTensor', 'torch.LongTensor', (['a'], {}), '(a)\n', (3038, 3041), False, 'import torch\n'), ((3056, 3075), 'torch.LongTensor', 'torch.LongTensor', (['b'], {}), '(b)\n', (3072, 3075), False, 'import torch\n'), ((3096, 3127), 'torch.LongTensor', 'torch.LongTensor', (['input_lengths'], {}), '(input_lengths)\n', (3112, 3127), False, 'import torch\n'), ((1572, 1588), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1583, 1588), False, 'from collections import defaultdict\n'), ((2735, 2756), 'numpy.max', 'np.max', (['input_lengths'], {}), '(input_lengths)\n', (2741, 2756), True, 'import numpy as np\n'), ((1843, 1862), 'nltk.word_tokenize', 'word_tokenize', (['line'], {}), '(line)\n', (1856, 1862), False, 'from nltk import word_tokenize\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 3 13:06:11 2021
@author: Oliver
"""
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
"""__________________REFERENCE SOLUTION________________"""
"""DONE"""
def integrand(x):
return x**2+4*x-12
ans, err = quad(integrand, -10,10)
def integral(x):
return 1/3*x**3+2*x**2-12*x
x= np.linspace(1,1000,8000)
plt.plot(x,integral(x),'g',label ='reference')
plt.legend()
plt.show()
print(f'The analytical solution to the integral is: {ans}')
print()
# Excercise 1: Implement the Midpoint/rectangular Rule code and execute
# it to integrate the function x^2+4x-12 in the domain -10<x<10
"""______________________________________________________________________"""
def calculate_dx(a,b,n):
return (b-a)/float(n)
"""DONE"""
def rect_rule(f,a,b,n):
total = 0.0
dx = calculate_dx(a,b,n)
t = []
for i in range(0,n):
total += abs(f((a+(i*dx))))
t.append(total)
plt.plot(range(n),t)
plt.show()
return dx*total
def f(x):
return x**2+4*x-12
print(f' Excercise 1, midpoint/rectangular rule result: {rect_rule(f,-10,10,10000)}')
print(f'Analitycal - midpoint/rectangular rule result: {abs(rect_rule(f,-10,10,10000) - ans)}')
print(f'Relative error midpoint/rectangular result: {abs(ans - rect_rule(f,-10,10,10000) )/abs(rect_rule(f,-10,10,10000))}')
print()
"""______________________________________________________________________"""
#Excercise 2: Implement this trapezoid Rule code and execute it to integrate
# the function x**2 +4*x-12 in the domain -10<x<10 (enter inputs first)
"""NOT DONE PLOTTING"""
def trapz(f,a,b,N=50):
x = np.linspace(a,b,N) # N+1 points make N subintervals
print(x)
y = f(x)
y_right = y[1:] # right endpoints
y_left = y[:-1] # left endpoints
total = []
for i in range(N+1,1,-1):
dx = (b-a)/i
Z = (dx/2) *np.sum(y_right+y_left)
total.append(Z)
plt.plot(x,total,label = 'traps')
plt.legend()
plt.show()
# while i <= N:
# k += 2
# g.append((dx/2)*np.sum(f(k)+f(k+1))
# plt.plot(range(N),g)
# plt.show
return Z
a= -10
b= 10
n= 10000
print(f' The trapezoidal rule result is: {trapz(f,a,b,n)}')
print(f'Analitycal - trapezoidal rule: {abs(trapz(f,a,b,n) - ans)}')
print(f'Relative error trapezoidal rule result: {abs(ans - trapz(f,a,b,n) )/abs(trapz(f,a,b,n))}')
print()
"""______________________________________________________________________"""
# Excercise 3: Implement this Simpson's One Third Rule code and execute it
# to integrate the funciton x**2+4x-12 in the domain -10<x<10
"""NOT DONE PLOTTING"""
def simps(f,a,b,N=50):
if N % 2 == 1:
raise ValueError("N must be an even integer")
t=[]
x = np.linspace(a,b,N+1)
y = f(x)
for i in range(1,N+2,1):
dx = (b-a)/i
S = dx/3 * np.sum(y[0:-1:2] + 4*y[1::2] + y[2::2]) # s[i:j:k] - "slice of s from i to j with step k
t.append(S)
plt.plot(x,t)
plt.show()
return S # y[2::2] - start at the 2nd element and skip through in steps of 2 each time
f = lambda x: x**2+4*x-12
solution = simps(f,-10,10,10000)
print(f' The simpson one third rule solution: {solution}')
print(f'Analitycal - simpson one third rule: {abs(solution - ans)}')
print(f'Relative error simpson one third rule result: {abs(ans - solution )/abs(solution)}')
print()
"""______________________________________________________________________"""
# Exercise 4: Implement this Simpson’s three eightths Rule code and execute it to
# integrate the function x2 +4x – 12 in the domain -10 < x < 10 (enter inputs first).
"""DONE"""
def func(x):
return abs(x**2+4*x-12)
def calculate(lower_limit, upper_limit, interval_limit ):
interval_size = (float(upper_limit - lower_limit) / interval_limit)
sum = func(lower_limit) + func(upper_limit);
# Calculates value till integral limit
n=0
k = []
t = []
for i in range(1, interval_limit ):
if (i % 3 == 0):
k.append(n)
n +=1
sum = sum + 2 * func(lower_limit + i * interval_size)
t.append(sum)
else:
sum = sum + 3 * func(lower_limit + i * interval_size)
plt.plot(k,t,'r')
return ((float( 3 * interval_size) / 8 ) * sum )
# driver function
interval_limit = 10000
lower_limit = -10
upper_limit = 10
integral_res = calculate(lower_limit, upper_limit, interval_limit)
# rounding the final answer to 6 decimal places
print (f' The simpson three eigthths rule solution: {round(integral_res, 6)}')
print(f'Analitycal - three eightths rule: {abs(round(integral_res, 6) - ans)}')
print(f'Relative error simpson three eightths rule result: {abs(ans - round(integral_res, 6) )/abs(round(integral_res, 6))}')
# Plot the evolution of the integral value as a function of the number of
# integration intervals for each technique (Hint: you will have to modify each
# code to run for different values of N and plot the integral obtained for each
# run). Use arrays to store values and matplotlib to plot Integral v. N).
| [
"scipy.integrate.quad",
"matplotlib.pyplot.plot",
"numpy.sum",
"numpy.linspace",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((308, 332), 'scipy.integrate.quad', 'quad', (['integrand', '(-10)', '(10)'], {}), '(integrand, -10, 10)\n', (312, 332), False, 'from scipy.integrate import quad\n'), ((387, 413), 'numpy.linspace', 'np.linspace', (['(1)', '(1000)', '(8000)'], {}), '(1, 1000, 8000)\n', (398, 413), True, 'import numpy as np\n'), ((461, 473), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (471, 473), True, 'import matplotlib.pyplot as plt\n'), ((475, 485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (483, 485), True, 'import matplotlib.pyplot as plt\n'), ((1050, 1060), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1058, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1757), 'numpy.linspace', 'np.linspace', (['a', 'b', 'N'], {}), '(a, b, N)\n', (1748, 1757), True, 'import numpy as np\n'), ((2037, 2070), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'total'], {'label': '"""traps"""'}), "(x, total, label='traps')\n", (2045, 2070), True, 'import matplotlib.pyplot as plt\n'), ((2076, 2088), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2086, 2088), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2104), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2102, 2104), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2920), 'numpy.linspace', 'np.linspace', (['a', 'b', '(N + 1)'], {}), '(a, b, N + 1)\n', (2907, 2920), True, 'import numpy as np\n'), ((3118, 3132), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 't'], {}), '(x, t)\n', (3126, 3132), True, 'import matplotlib.pyplot as plt\n'), ((3137, 3147), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3145, 3147), True, 'import matplotlib.pyplot as plt\n'), ((4414, 4433), 'matplotlib.pyplot.plot', 'plt.plot', (['k', 't', '"""r"""'], {}), "(k, t, 'r')\n", (4422, 4433), True, 'import matplotlib.pyplot as plt\n'), ((1984, 2008), 'numpy.sum', 'np.sum', (['(y_right + y_left)'], {}), '(y_right + y_left)\n', (1990, 2008), True, 'import numpy as np\n'), ((3003, 3044), 'numpy.sum', 'np.sum', (['(y[0:-1:2] + 4 * y[1::2] + y[2::2])'], {}), '(y[0:-1:2] + 4 * y[1::2] + y[2::2])\n', (3009, 3044), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
"""Print information about python."""
__authors__ = ["<NAME>"]
__date__ = "09/09/2016"
__license__ = "MIT"
import sys
import platform
print("Python %s bits" % (tuple.__itemsize__ * 8))
print(" maxsize: %s\t maxunicode: %s" % (sys.maxsize, sys.maxunicode))
print(sys.version)
print(" ")
print("Platform: " + platform.platform())
print("- Machine: " + platform.machine())
print(" ")
try:
from distutils.sysconfig import get_config_vars
except ImportError:
from sysconfig import get_config_vars
print("Config: " + str(get_config_vars("CONFIG_ARGS")))
print("")
try:
import numpy
except ImportError:
print("Numpy not installed")
else:
print("Numpy %s" % numpy.version.version)
print(" include %s" % numpy.get_include())
print(" options %s" % numpy.get_printoptions())
print("")
try:
import pyopencl
except Exception as error:
print("Unable to import pyopencl: %s" % error)
else:
print("PyOpenCL platform:")
try:
cl_platforms = pyopencl.get_platforms()
except pyopencl.LogicError:
print("The module pyOpenCL has been imported but get_platforms failed")
else:
for p in cl_platforms:
print(" %s" % p)
for d in p.get_devices():
print(" %s max_workgroup_size is %s" % (d, d.max_work_group_size))
try:
from silx.opencl import ocl
except Exception:
print("Unable to import silx")
else:
print("PyOpenCL platform as seen by silx:")
if ocl:
for p in ocl.platforms:
print(" %s:" % p)
for d in p.devices:
print(" %s max_workgroup_size is %s" % (d, d.max_work_group_size))
have_qt_binding = False
try:
import PyQt5.QtCore
have_qt_binding = True
print("Qt (from PyQt5): %s" % PyQt5.QtCore.qVersion())
except ImportError:
pass
try:
import PyQt4.QtCore
have_qt_binding = True
print("Qt (from PyQt4): %s" % PyQt4.QtCore.qVersion())
except ImportError:
pass
try:
import PySide2.QtCore
have_qt_binding = True
print("Qt (from PySide2): %s" % PySide2.QtCore.qVersion())
except ImportError:
pass
try:
import PySide.QtCore
have_qt_binding = True
print("Qt (from PySide): %s" % PySide.QtCore.qVersion())
except ImportError:
pass
if not have_qt_binding:
print("No Qt binding")
try:
import sip
print("SIP: %s" % sip.SIP_VERSION_STR)
except ImportError:
pass
| [
"pyopencl.get_platforms",
"numpy.get_printoptions",
"platform.platform",
"sysconfig.get_config_vars",
"numpy.get_include",
"platform.machine"
] | [((356, 375), 'platform.platform', 'platform.platform', ([], {}), '()\n', (373, 375), False, 'import platform\n'), ((399, 417), 'platform.machine', 'platform.machine', ([], {}), '()\n', (415, 417), False, 'import platform\n'), ((1036, 1060), 'pyopencl.get_platforms', 'pyopencl.get_platforms', ([], {}), '()\n', (1058, 1060), False, 'import pyopencl\n'), ((573, 603), 'sysconfig.get_config_vars', 'get_config_vars', (['"""CONFIG_ARGS"""'], {}), "('CONFIG_ARGS')\n", (588, 603), False, 'from sysconfig import get_config_vars\n'), ((775, 794), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (792, 794), False, 'import numpy\n'), ((827, 851), 'numpy.get_printoptions', 'numpy.get_printoptions', ([], {}), '()\n', (849, 851), False, 'import numpy\n')] |
"""
SynthTIGER
Copyright (c) 2021-present NAVER Corp.
MIT license
"""
import numpy as np
from synthtiger.components.component import Component
class FlowLayout(Component):
def __init__(self, space=(0, 0), vertical=False):
super().__init__()
self.space = space
self.vertical = vertical
def sample(self, meta=None):
if meta is None:
meta = {}
space = meta.get("space", np.random.randint(self.space[0], self.space[1] + 1))
vertical = meta.get("vertical", self.vertical)
meta = {
"space": space,
"vertical": vertical,
}
return meta
def apply(self, layers, meta=None):
meta = self.sample(meta)
space = meta["space"]
vertical = meta["vertical"]
for layer in layers:
layer.center = (0, 0)
if vertical:
for idx in range(1, len(layers)):
layers[idx].top = layers[idx - 1].bottom + space
else:
for idx in range(1, len(layers)):
layers[idx].left = layers[idx - 1].right + space
return meta
| [
"numpy.random.randint"
] | [((433, 484), 'numpy.random.randint', 'np.random.randint', (['self.space[0]', '(self.space[1] + 1)'], {}), '(self.space[0], self.space[1] + 1)\n', (450, 484), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
def gaussian_kernel(kernel_size,sigma):
kx = cv2.getGaussianKernel(kernel_size,sigma)
ky = cv2.getGaussianKernel(kernel_size,sigma)
return np.multiply(kx,np.transpose(ky))
#Lecture image en niveau de gris et conversion en float64
img=np.float64(cv2.imread('./Image_Pairs/Graffiti0.png',cv2.IMREAD_GRAYSCALE))
(h,w) = img.shape
print("Dimension de l'image :",h,"lignes x",w,"colonnes")
print("Type de l'image :",img.dtype)
#Début du calcul
t1 = cv2.getTickCount()
Theta = cv2.copyMakeBorder(img,0,0,0,0,cv2.BORDER_REPLICATE)
# Mettre ici le calcul de la fonction d'intérêt de Harris
alpha = 0.01
#kenelH = (1/9)*np.ones((3,3),np.uint8)
kenelH = gaussian_kernel(3, 3)
kenelIx = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
Ix = cv2.filter2D(img, -1, kenelIx)
kenelIy = np.array([[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]])
Iy = cv2.filter2D(img, -1, kenelIy)
H11 = cv2.filter2D(Ix*Ix, -1, kenelH)
H12 = cv2.filter2D(Ix*Iy, -1, kenelH)
H21 = cv2.filter2D(Iy*Ix, -1, kenelH)
H22 = cv2.filter2D(Iy*Iy, -1, kenelH)
Theta = H11*H22 - H12*H21 - alpha*(H11+H22)**2
#
#
# Calcul des maxima locaux et seuillage
Theta_maxloc = cv2.copyMakeBorder(Theta,0,0,0,0,cv2.BORDER_REPLICATE)
d_maxloc = 3
seuil_relatif = 0.01
se = np.ones((d_maxloc,d_maxloc),np.uint8)
Theta_dil = cv2.dilate(Theta,se)
#Suppression des non-maxima-locaux
Theta_maxloc[Theta < Theta_dil] = 0.0
#On néglige également les valeurs trop faibles
Theta_maxloc[Theta < seuil_relatif*Theta.max()] = 0.0
t2 = cv2.getTickCount()
time = (t2 - t1)/ cv2.getTickFrequency()
print("Mon calcul des points de Harris :",time,"s")
print("Nombre de cycles par pixel :",(t2 - t1)/(h*w),"cpp")
plt.subplot(131)
plt.imshow(img,cmap = 'gray')
plt.title('Image originale')
plt.subplot(132)
plt.imshow(Theta,cmap = 'gray')
plt.title('Fonction de Harris')
se_croix = np.uint8([[1, 0, 0, 0, 1],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 1, 0],
[1, 0, 0, 0, 1]])
Theta_ml_dil = cv2.dilate(Theta_maxloc,se_croix)
#Relecture image pour affichage couleur
Img_pts=cv2.imread('./Image_Pairs/Graffiti0.png',cv2.IMREAD_COLOR)
(h,w,c) = Img_pts.shape
print("Dimension de l'image :",h,"lignes x",w,"colonnes x",c,"canaux")
print("Type de l'image :",Img_pts.dtype)
#On affiche les points (croix) en rouge
Img_pts[Theta_ml_dil > 0] = [255,0,0]
plt.subplot(133)
plt.imshow(Img_pts)
plt.title('Points de Harris')
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.uint8",
"numpy.ones",
"matplotlib.pyplot.show",
"cv2.copyMakeBorder",
"cv2.filter2D",
"cv2.getGaussianKernel",
"numpy.array",
"cv2.getTickCount",
"matplotlib.pyplot.title",
"cv2.dilate",
"numpy.transpose",
"matplotlib.pyplot.subplot",
"cv2.getTickFrequenc... | [((528, 546), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (544, 546), False, 'import cv2\n'), ((555, 612), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', '(0)', '(0)', '(0)', '(0)', 'cv2.BORDER_REPLICATE'], {}), '(img, 0, 0, 0, 0, cv2.BORDER_REPLICATE)\n', (573, 612), False, 'import cv2\n'), ((760, 806), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n', (768, 806), True, 'import numpy as np\n'), ((852, 882), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'kenelIx'], {}), '(img, -1, kenelIx)\n', (864, 882), False, 'import cv2\n'), ((893, 939), 'numpy.array', 'np.array', (['[[-1, -2, -1], [0, 0, 0], [1, 2, 1]]'], {}), '([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])\n', (901, 939), True, 'import numpy as np\n'), ((985, 1015), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'kenelIy'], {}), '(img, -1, kenelIy)\n', (997, 1015), False, 'import cv2\n'), ((1022, 1055), 'cv2.filter2D', 'cv2.filter2D', (['(Ix * Ix)', '(-1)', 'kenelH'], {}), '(Ix * Ix, -1, kenelH)\n', (1034, 1055), False, 'import cv2\n'), ((1060, 1093), 'cv2.filter2D', 'cv2.filter2D', (['(Ix * Iy)', '(-1)', 'kenelH'], {}), '(Ix * Iy, -1, kenelH)\n', (1072, 1093), False, 'import cv2\n'), ((1098, 1131), 'cv2.filter2D', 'cv2.filter2D', (['(Iy * Ix)', '(-1)', 'kenelH'], {}), '(Iy * Ix, -1, kenelH)\n', (1110, 1131), False, 'import cv2\n'), ((1136, 1169), 'cv2.filter2D', 'cv2.filter2D', (['(Iy * Iy)', '(-1)', 'kenelH'], {}), '(Iy * Iy, -1, kenelH)\n', (1148, 1169), False, 'import cv2\n'), ((1275, 1334), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['Theta', '(0)', '(0)', '(0)', '(0)', 'cv2.BORDER_REPLICATE'], {}), '(Theta, 0, 0, 0, 0, cv2.BORDER_REPLICATE)\n', (1293, 1334), False, 'import cv2\n'), ((1369, 1408), 'numpy.ones', 'np.ones', (['(d_maxloc, d_maxloc)', 'np.uint8'], {}), '((d_maxloc, d_maxloc), np.uint8)\n', (1376, 1408), True, 'import numpy as np\n'), ((1419, 1440), 'cv2.dilate', 'cv2.dilate', (['Theta', 'se'], {}), '(Theta, se)\n', (1429, 1440), False, 'import cv2\n'), ((1622, 1640), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (1638, 1640), False, 'import cv2\n'), ((1795, 1811), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (1806, 1811), True, 'from matplotlib import pyplot as plt\n'), ((1812, 1840), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""gray"""'}), "(img, cmap='gray')\n", (1822, 1840), True, 'from matplotlib import pyplot as plt\n'), ((1842, 1870), 'matplotlib.pyplot.title', 'plt.title', (['"""Image originale"""'], {}), "('Image originale')\n", (1851, 1870), True, 'from matplotlib import pyplot as plt\n'), ((1872, 1888), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (1883, 1888), True, 'from matplotlib import pyplot as plt\n'), ((1889, 1919), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Theta'], {'cmap': '"""gray"""'}), "(Theta, cmap='gray')\n", (1899, 1919), True, 'from matplotlib import pyplot as plt\n'), ((1921, 1952), 'matplotlib.pyplot.title', 'plt.title', (['"""Fonction de Harris"""'], {}), "('Fonction de Harris')\n", (1930, 1952), True, 'from matplotlib import pyplot as plt\n'), ((1965, 2065), 'numpy.uint8', 'np.uint8', (['[[1, 0, 0, 0, 1], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [1, 0,\n 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 1], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 1, 0\n ], [1, 0, 0, 0, 1]])\n', (1973, 2065), True, 'import numpy as np\n'), ((2100, 2134), 'cv2.dilate', 'cv2.dilate', (['Theta_maxloc', 'se_croix'], {}), '(Theta_maxloc, se_croix)\n', (2110, 2134), False, 'import cv2\n'), ((2182, 2241), 'cv2.imread', 'cv2.imread', (['"""./Image_Pairs/Graffiti0.png"""', 'cv2.IMREAD_COLOR'], {}), "('./Image_Pairs/Graffiti0.png', cv2.IMREAD_COLOR)\n", (2192, 2241), False, 'import cv2\n'), ((2455, 2471), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (2466, 2471), True, 'from matplotlib import pyplot as plt\n'), ((2472, 2491), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Img_pts'], {}), '(Img_pts)\n', (2482, 2491), True, 'from matplotlib import pyplot as plt\n'), ((2492, 2521), 'matplotlib.pyplot.title', 'plt.title', (['"""Points de Harris"""'], {}), "('Points de Harris')\n", (2501, 2521), True, 'from matplotlib import pyplot as plt\n'), ((2523, 2533), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2531, 2533), True, 'from matplotlib import pyplot as plt\n'), ((118, 159), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['kernel_size', 'sigma'], {}), '(kernel_size, sigma)\n', (139, 159), False, 'import cv2\n'), ((168, 209), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['kernel_size', 'sigma'], {}), '(kernel_size, sigma)\n', (189, 209), False, 'import cv2\n'), ((328, 391), 'cv2.imread', 'cv2.imread', (['"""./Image_Pairs/Graffiti0.png"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('./Image_Pairs/Graffiti0.png', cv2.IMREAD_GRAYSCALE)\n", (338, 391), False, 'import cv2\n'), ((1659, 1681), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ([], {}), '()\n', (1679, 1681), False, 'import cv2\n'), ((235, 251), 'numpy.transpose', 'np.transpose', (['ky'], {}), '(ky)\n', (247, 251), True, 'import numpy as np\n')] |
from __future__ import print_function
import argparse
import shutil
import torch
import torchvision
import random
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import matplotlib.pyplot as plt
writer = SummaryWriter()
from resnet import ResNet_small
from torchsummary import summary
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
def train(model, dataloader, optimizer, scheduler, loss_fn, epoch):
# Set the model into train mode
model.train()
train_loss = 0
correct = 0
total = 0
datacount = len(dataloader)
for batch_idx, (train_batch, labels_batch) in enumerate(dataloader):
# move the data onto the device
train_batch, labels_batch = train_batch.to(device), labels_batch.to(device)
optimizer.zero_grad()
# compute model outputs and loss
outputs = model(train_batch)
loss = loss_fn(outputs, labels_batch.squeeze())
loss.backward()
# after computing gradients based on current batch loss,
# apply them to parameters
optimizer.step()
scheduler.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += labels_batch.size(0)
correct += predicted.eq(labels_batch.squeeze()).sum().item()
# write to tensorboard
writer.add_scalar(
"train/loss",
train_loss / (batch_idx + 1),
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"train/accuracy",
100.0 * correct / total,
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"train/lr",
scheduler._last_lr[0],
(datacount * (epoch + 1)) + (batch_idx + 1),
)
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(train_batch),
len(dataloader.dataset),
100.0 * batch_idx / len(dataloader),
(train_loss / (batch_idx + 1)),
# loss,
),
end="\r",
flush=True,
)
print()
return train_loss / datacount, 100.0 * correct / total
def test(model, dataloader, loss_fn, epoch):
model.eval()
test_loss = 0
correct = 0
total = 0
datacount = len(dataloader)
with torch.no_grad():
for batch_idx, (test_batch, labels_batch) in enumerate(dataloader):
# move the data onto device
test_batch, labels_batch = test_batch.to(device), labels_batch.to(device)
# compute the model output
outputs = model(test_batch)
loss = loss_fn(outputs, labels_batch.squeeze())
test_loss += loss.item()
_, predicted = outputs.max(1)
total += labels_batch.size(0)
correct += predicted.eq(labels_batch.squeeze()).sum().item()
# log the test_loss
writer.add_scalar(
"test/loss",
test_loss / (batch_idx + 1),
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"test/accuracy",
100.0 * correct / total,
(datacount * (epoch + 1)) + (batch_idx + 1),
)
test_loss = test_loss / datacount
acc = 100 * correct / total
print("Test accuracy:", acc)
return test_loss, acc
def save_ckp(state, checkpoint_dir):
f_path = "gender-best-checkpoint.pt"
torch.save(state, f_path)
def main():
# Training settings
parser = argparse.ArgumentParser(description="PyTorch GENDER CV LAB")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 128)",
)
parser.add_argument(
"--epochs",
type=int,
default=200,
metavar="N",
help="number of epochs to train (default: 200)",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--save_model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--load_checkpoint",
type=str,
default=False,
help="Path of checkpoint to restore, if none will start training from 0",
)
args = parser.parse_args()
random.seed(args.seed)
os.environ["PYTHONHASHSEED"] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 8, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
# Load
x_train = np.load("data/x_train.npy")
x_test = np.load("data/x_test.npy")
x_train = x_train / 255
x_test = x_test / 255
x_train = torch.from_numpy(x_train).squeeze().permute(0, 3, 1, 2).float()
x_test = torch.from_numpy(x_test).squeeze().permute(0, 3, 1, 2).float()
y_train = np.load("data/y_train.npy")
y_test = np.load("data/y_test.npy")
y_train = torch.from_numpy(y_train).squeeze().long()
y_test = torch.from_numpy(y_test).squeeze().long()
dataset1 = torch.utils.data.TensorDataset(x_train, y_train.unsqueeze(1))
dataset2 = torch.utils.data.TensorDataset(x_test, y_test.unsqueeze(1))
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = ResNet_small().to(device)
print(summary(model, (3, 100, 100)))
print(
"Trainable parameters",
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=0.1, steps_per_epoch=len(train_loader), epochs=200
) # epoch 187
epoch = 1
loss = nn.CrossEntropyLoss()
if args.load_checkpoint:
print("Loading checkpoint args.load_checkpoint")
checkpoint = torch.load(args.load_checkpoint)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
epoch = checkpoint["epoch"]
best_acc = 0
l_train_loss = []
l_test_loss = []
l_train_acc = []
l_test_acc = []
l_lr = []
for epoch in range(epoch, args.epochs + 1):
train_loss, train_acc = train(
model, train_loader, optimizer, scheduler, loss, epoch
)
test_loss, test_acc = test(model, test_loader, loss, epoch)
if test_acc > best_acc:
best_acc = test_acc
if test_acc > 97.0:
print("Error < 3.0 achieved, stopped training")
break
if args.save_model and test_acc >= best_acc:
checkpoint = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
}
print("Saving checkpoint as best model to gender-best-checkpoint.pt")
save_ckp(checkpoint, "")
l_train_loss.append(train_loss)
l_test_loss.append(test_loss)
l_train_acc.append(train_acc)
l_test_acc.append(test_acc)
l_lr.append(scheduler._last_lr[0])
# PLOTS
fig = plt.figure()
plt.plot(l_train_loss, color="red", label="Train")
plt.plot(l_test_loss, color="blue", label="Test")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Loss", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_loss.png")
plt.close()
fig = plt.figure()
plt.plot(l_train_acc, color="red", label="Train")
plt.plot(l_test_acc, color="blue", label="Test")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Accuracy", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_acc.png")
plt.close()
fig = plt.figure()
plt.plot(l_lr, color="orange", label="Learning rate")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Learning rate", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_lr.png")
plt.close()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.grid",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.ylabel",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"argparse.ArgumentParser",
"resnet.ResNet_small",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.cl... | [((406, 421), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (419, 421), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((499, 524), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (522, 524), False, 'import torch\n'), ((534, 577), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (546, 577), False, 'import torch\n'), ((3788, 3813), 'torch.save', 'torch.save', (['state', 'f_path'], {}), '(state, f_path)\n', (3798, 3813), False, 'import torch\n'), ((3865, 3925), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch GENDER CV LAB"""'}), "(description='PyTorch GENDER CV LAB')\n", (3888, 3925), False, 'import argparse\n'), ((4755, 4777), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (4766, 4777), False, 'import random\n'), ((4832, 4857), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4846, 4857), True, 'import numpy as np\n'), ((4862, 4890), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4879, 4890), False, 'import torch\n'), ((4895, 4928), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4917, 4928), False, 'import torch\n'), ((5279, 5306), 'numpy.load', 'np.load', (['"""data/x_train.npy"""'], {}), "('data/x_train.npy')\n", (5286, 5306), True, 'import numpy as np\n'), ((5320, 5346), 'numpy.load', 'np.load', (['"""data/x_test.npy"""'], {}), "('data/x_test.npy')\n", (5327, 5346), True, 'import numpy as np\n'), ((5572, 5599), 'numpy.load', 'np.load', (['"""data/y_train.npy"""'], {}), "('data/y_train.npy')\n", (5579, 5599), True, 'import numpy as np\n'), ((5613, 5639), 'numpy.load', 'np.load', (['"""data/y_test.npy"""'], {}), "('data/y_test.npy')\n", (5620, 5639), True, 'import numpy as np\n'), ((5926, 5979), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset1'], {}), '(dataset1, **train_kwargs)\n', (5953, 5979), False, 'import torch\n'), ((5998, 6050), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset2'], {}), '(dataset2, **test_kwargs)\n', (6025, 6050), False, 'import torch\n'), ((6513, 6534), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6532, 6534), True, 'import torch.nn as nn\n'), ((8027, 8039), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8037, 8039), True, 'import matplotlib.pyplot as plt\n'), ((8044, 8094), 'matplotlib.pyplot.plot', 'plt.plot', (['l_train_loss'], {'color': '"""red"""', 'label': '"""Train"""'}), "(l_train_loss, color='red', label='Train')\n", (8052, 8094), True, 'import matplotlib.pyplot as plt\n'), ((8099, 8148), 'matplotlib.pyplot.plot', 'plt.plot', (['l_test_loss'], {'color': '"""blue"""', 'label': '"""Test"""'}), "(l_test_loss, color='blue', label='Test')\n", (8107, 8148), True, 'import matplotlib.pyplot as plt\n'), ((8153, 8186), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {'fontsize': '(10)'}), "('Epochs', fontsize=10)\n", (8163, 8186), True, 'import matplotlib.pyplot as plt\n'), ((8191, 8221), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {'fontsize': '(8)'}), "('Loss', fontsize=8)\n", (8201, 8221), True, 'import matplotlib.pyplot as plt\n'), ((8226, 8238), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8236, 8238), True, 'import matplotlib.pyplot as plt\n'), ((8243, 8253), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8251, 8253), True, 'import matplotlib.pyplot as plt\n'), ((8301, 8312), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8310, 8312), True, 'import matplotlib.pyplot as plt\n'), ((8324, 8336), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8334, 8336), True, 'import matplotlib.pyplot as plt\n'), ((8341, 8390), 'matplotlib.pyplot.plot', 'plt.plot', (['l_train_acc'], {'color': '"""red"""', 'label': '"""Train"""'}), "(l_train_acc, color='red', label='Train')\n", (8349, 8390), True, 'import matplotlib.pyplot as plt\n'), ((8395, 8443), 'matplotlib.pyplot.plot', 'plt.plot', (['l_test_acc'], {'color': '"""blue"""', 'label': '"""Test"""'}), "(l_test_acc, color='blue', label='Test')\n", (8403, 8443), True, 'import matplotlib.pyplot as plt\n'), ((8448, 8481), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {'fontsize': '(10)'}), "('Epochs', fontsize=10)\n", (8458, 8481), True, 'import matplotlib.pyplot as plt\n'), ((8486, 8520), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {'fontsize': '(8)'}), "('Accuracy', fontsize=8)\n", (8496, 8520), True, 'import matplotlib.pyplot as plt\n'), ((8525, 8537), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8535, 8537), True, 'import matplotlib.pyplot as plt\n'), ((8542, 8552), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8550, 8552), True, 'import matplotlib.pyplot as plt\n'), ((8599, 8610), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8608, 8610), True, 'import matplotlib.pyplot as plt\n'), ((8622, 8634), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8632, 8634), True, 'import matplotlib.pyplot as plt\n'), ((8639, 8692), 'matplotlib.pyplot.plot', 'plt.plot', (['l_lr'], {'color': '"""orange"""', 'label': '"""Learning rate"""'}), "(l_lr, color='orange', label='Learning rate')\n", (8647, 8692), True, 'import matplotlib.pyplot as plt\n'), ((8697, 8730), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {'fontsize': '(10)'}), "('Epochs', fontsize=10)\n", (8707, 8730), True, 'import matplotlib.pyplot as plt\n'), ((8735, 8774), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Learning rate"""'], {'fontsize': '(8)'}), "('Learning rate', fontsize=8)\n", (8745, 8774), True, 'import matplotlib.pyplot as plt\n'), ((8779, 8791), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8789, 8791), True, 'import matplotlib.pyplot as plt\n'), ((8796, 8806), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8804, 8806), True, 'import matplotlib.pyplot as plt\n'), ((8852, 8863), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8861, 8863), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2642), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2640, 2642), False, 'import torch\n'), ((6100, 6129), 'torchsummary.summary', 'summary', (['model', '(3, 100, 100)'], {}), '(model, (3, 100, 100))\n', (6107, 6129), False, 'from torchsummary import summary\n'), ((6642, 6674), 'torch.load', 'torch.load', (['args.load_checkpoint'], {}), '(args.load_checkpoint)\n', (6652, 6674), False, 'import torch\n'), ((6064, 6078), 'resnet.ResNet_small', 'ResNet_small', ([], {}), '()\n', (6076, 6078), False, 'from resnet import ResNet_small\n'), ((5655, 5680), 'torch.from_numpy', 'torch.from_numpy', (['y_train'], {}), '(y_train)\n', (5671, 5680), False, 'import torch\n'), ((5711, 5735), 'torch.from_numpy', 'torch.from_numpy', (['y_test'], {}), '(y_test)\n', (5727, 5735), False, 'import torch\n'), ((5417, 5442), 'torch.from_numpy', 'torch.from_numpy', (['x_train'], {}), '(x_train)\n', (5433, 5442), False, 'import torch\n'), ((5494, 5518), 'torch.from_numpy', 'torch.from_numpy', (['x_test'], {}), '(x_test)\n', (5510, 5518), False, 'import torch\n')] |
#!/usr/bin/python3
import numpy as np
from pathlib import Path
from mseg.utils.json_utils import read_json_file
from mseg.utils.names_utils import (
get_dataloader_id_to_classname_map,
load_dataset_colors_arr
)
from mseg.utils.test_utils import dict_is_equal
from mseg.dataset_apis.MapillaryMaskDataset import MapillaryMaskDataset
_TEST_DIR = Path(__file__).resolve().parent
# One set of data is from Scene Parsing Challenge, other is not.
CONFIG_JSON_FPATH = _TEST_DIR / 'test_data/mapillary-vistas-dataset_public_v1.1_config.json'
def read_mapillary_config_helper():
"""
"""
return read_json_file(CONFIG_JSON_FPATH)
def test_load_names():
"""
"""
tax_data = read_mapillary_config_helper()
id_to_classname_map = get_dataloader_id_to_classname_map(
dataset_name='mapillary-public66',
include_ignore_idx_cls=False
)
gt_id_to_classname = {i: entry['readable'] for i, entry in enumerate(tax_data['labels'])}
dict_is_equal(gt_id_to_classname, id_to_classname_map)
assert len(id_to_classname_map.keys()) == 66
def test_load_colors():
"""
"""
tax_data = read_mapillary_config_helper()
num_classes = 66
gt_dataset_ordered_colors = np.zeros((66,3), dtype=np.uint8)
for i in range(num_classes):
gt_dataset_ordered_colors[i] = np.array(tax_data['labels'][i]['color'])
colors = load_dataset_colors_arr('mapillary-public66')
assert np.allclose(colors, gt_dataset_ordered_colors)
"""
def test_get_segment_mask():
dataroot = f'{_TEST_DIR}/test_data/Mapillary_test_data'
m_api = MapillaryMaskDataset(dataroot)
seq_id = '' # dummy value
segmentid = 7936
fname_stem = 'aDailxp-VC9IbQWfIp-8Rw'
split = 'train'
mask = m_api.get_segment_mask(seq_id, segmentid, fname_stem, split)
# stream running through the bottom of an image, hill above it
gt_mask = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0]
], dtype=np.uint8)
assert np.allclose(mask[::500,::500], gt_mask)
"""
if __name__ == '__main__':
test_get_segment_mask()
| [
"numpy.allclose",
"mseg.utils.test_utils.dict_is_equal",
"pathlib.Path",
"numpy.array",
"numpy.zeros",
"mseg.utils.names_utils.get_dataloader_id_to_classname_map",
"mseg.utils.json_utils.read_json_file",
"mseg.utils.names_utils.load_dataset_colors_arr"
] | [((596, 629), 'mseg.utils.json_utils.read_json_file', 'read_json_file', (['CONFIG_JSON_FPATH'], {}), '(CONFIG_JSON_FPATH)\n', (610, 629), False, 'from mseg.utils.json_utils import read_json_file\n'), ((731, 834), 'mseg.utils.names_utils.get_dataloader_id_to_classname_map', 'get_dataloader_id_to_classname_map', ([], {'dataset_name': '"""mapillary-public66"""', 'include_ignore_idx_cls': '(False)'}), "(dataset_name='mapillary-public66',\n include_ignore_idx_cls=False)\n", (765, 834), False, 'from mseg.utils.names_utils import get_dataloader_id_to_classname_map, load_dataset_colors_arr\n'), ((930, 984), 'mseg.utils.test_utils.dict_is_equal', 'dict_is_equal', (['gt_id_to_classname', 'id_to_classname_map'], {}), '(gt_id_to_classname, id_to_classname_map)\n', (943, 984), False, 'from mseg.utils.test_utils import dict_is_equal\n'), ((1157, 1190), 'numpy.zeros', 'np.zeros', (['(66, 3)'], {'dtype': 'np.uint8'}), '((66, 3), dtype=np.uint8)\n', (1165, 1190), True, 'import numpy as np\n'), ((1305, 1350), 'mseg.utils.names_utils.load_dataset_colors_arr', 'load_dataset_colors_arr', (['"""mapillary-public66"""'], {}), "('mapillary-public66')\n", (1328, 1350), False, 'from mseg.utils.names_utils import get_dataloader_id_to_classname_map, load_dataset_colors_arr\n'), ((1359, 1405), 'numpy.allclose', 'np.allclose', (['colors', 'gt_dataset_ordered_colors'], {}), '(colors, gt_dataset_ordered_colors)\n', (1370, 1405), True, 'import numpy as np\n'), ((1253, 1293), 'numpy.array', 'np.array', (["tax_data['labels'][i]['color']"], {}), "(tax_data['labels'][i]['color'])\n", (1261, 1293), True, 'import numpy as np\n'), ((349, 363), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (353, 363), False, 'from pathlib import Path\n')] |
import numpy as np
import datetime
import time
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S').replace(' ', '-').replace(':', '-')
import os
os.environ["OMP_NUM_THREADS"] = "4" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "4" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "6" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "4" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "6" # export NUMEXPR_NUM_THREADS=6
from auxiliaryFunctions import project_onto_simplex, performUpdate, exitCriterion, stepSize
"""## Away FW or Pairwise FW"""
#Maintains active list of weights and vertices.
def runFWSimplex(x0, function, feasibleReg, tolerance, maxTime, FWVariant = "AFW", typeStep = "SS", criterion = "PG", criterionRef = 0.0):
#Quantities we want to output.
grad = function.fEvalGrad(x0)
FWGap = [np.dot(grad, x0 - feasibleReg.LPOracle(grad))]
fVal = [function.fEval(x0)]
timing = [time.time()]
x = x0.copy()
itCount = 1
while(True):
if(FWVariant == "AFW"):
x, vertvar, gap = awayStepFWSimplex(function, feasibleReg, x, typeStep)
else:
x, vertvar, gap = pairwiseStepFWSimplex(function, feasibleReg, x, typeStep)
performUpdate(function, x, FWGap, fVal, timing, gap)
if(exitCriterion(itCount, fVal[-1], FWGap[-1], criterion = criterion, numCriterion = tolerance, critRef = criterionRef) or timing[-1] - timing[0] > maxTime):
timing[:] = [t - timing[0] for t in timing]
return x, FWGap, fVal, timing
itCount += 1
def awayStepFWSimplex(function, feasibleReg, x, typeStep):
grad = function.fEvalGrad(x)
v = feasibleReg.LPOracle(grad)
a, indexMax = feasibleReg.AwayOracle(grad, x)
vertvar = 0
#Choose FW direction, can overwrite index.
if(np.dot(grad, x - v) > np.dot(grad, a - x)):
d = v - x
alphaMax = 1.0
optStep = stepSize(function, d, grad, typeStep)
alpha = min(optStep, alphaMax)
#Less than maxStep
if(alpha != alphaMax):
#newVertex returns true if vertex is new.
if(np.dot(v, x) == 0.0):
vertvar = 1
#Max step length away step, only one vertex now.
else:
vertvar = -1
else:
d = x - a
alphaMax = x[indexMax]/(1.0 - x[indexMax])
optStep = stepSize(function, d, grad, typeStep)
alpha = min(optStep, alphaMax)
#Max step, need to delete a vertex.
if(alpha == alphaMax):
vertvar = -1
return x + alpha*d, vertvar, np.dot(grad, x - v)
#Perform one step of the Pairwise FW algorithm
#Also specifies if the number of vertices has decreased var = -1 or
#if it has increased var = +1. Otherwise 0.
def pairwiseStepFWSimplex(function, feasibleReg, x, typeStep):
grad = function.fEvalGrad(x)
v = feasibleReg.LPOracle(grad)
a, index = feasibleReg.AwayOracle(grad, x)
vertVar = 0
#Find the weight of the extreme point a in the decomposition.
alphaMax = x[index]
#Update weight of away vertex.
d = v - a
optStep = stepSize(function, d, grad, typeStep)
alpha = min(optStep, alphaMax)
if(alpha == alphaMax):
vertVar = -1
#Update the FW vertex
if(np.dot(v, x) == 0.0):
vertVar = 1
return x + alpha*d, vertVar, np.dot(grad, x - v)
"""
# LaCG Variants
"""
#Locally Accelerated Conditional Gradients.
class LaCG:
def run(self, x0, function, feasReg, tol, maxIter = 5e5, FWVariant = "AFW", typeStep = "SS"):
#Perform lineseach?
self.lineSearch = typeStep
#Function parameters.
self.restart = []
self.L = function.largestEig()
self.mu = function.smallestEig()
self.tol = tol
self.theta = np.sqrt(0.5*self.mu/self.L)
#Copy the variables.
self.xAFW, self.xAGD, x, self.y, self.w = [x0.copy(), x0.copy(), x0.copy(), x0.copy(), x0.copy()]
#Store the data from the initial iterations.
itCount = 1
self.A = 1.0
self.z = -function.fEvalGrad(self.xAFW) + self.L*self.xAFW
#Initial data measurements.
grad = function.fEvalGrad(x0)
FWGap = [np.dot(grad, x0 - feasReg.LPOracle(grad))]
fVal = [function.fEval(x0)]
timing = [time.time()]
while(fVal[-1] - fValOpt > tol):
print(fVal[-1] - fValOpt)
x, gap = self.runIter(function, feasReg, x, itCount + 1, FWVariant)
performUpdate(function, x, FWGap, fVal, timing, gap)
itCount += 1
if(timing[-1] - timing[0] > TIME_LIMIT):
break
timing[:] = [t - timing[0] for t in timing]
return x, FWGap, fVal, timing
def runIter(self, function, feasReg, x, it, FWVariant):
#Information about variation of active set in vertVar
if(FWVariant == "AFW"):
self.xAFW, vertVar, gap = awayStepFWSimplex(function, feasReg, x, typeStep = self.lineSearch)
else:
self.xAFW, vertVar, gap = pairwiseStepFWSimplex(function, feasReg, x, typeStep = self.lineSearch)
self.xAGD = self.accelStep(function, x)
#If we return the Accelerated point, the gap is invalid, set it to zero for later processing.
if(function.fEval(self.xAGD) < function.fEval(self.xAFW)):
return self.xAGD, gap
else:
return self.xAFW, gap
#Whenever we perform an accelerated step, we can use a warm start for the
#optimization subproblem, using w0 and alphaw0
def accelStep(self, function, x):
self.A = self.A/(1 - self.theta)
a = self.theta*self.A
self.y = (x + self.theta*self.w)/(1 + self.theta)
self.z += a*(self.mu*self.y - function.fEvalGrad(self.y))
#Compute the projection directly.
indices = np.where(x > 0.0)[0]
#Calculate the vector.
b = self.z[indices]/(self.mu*self.A + self.L - self.mu)
aux = project_onto_simplex(b)
self.w = np.zeros(len(x))
self.w[indices] = aux
xAGD = (1 - self.theta)*x + self.theta*self.w
return xAGD
#Takes an input scheme and tries to accelerate it.
#Need to specify function and scheme wich will be used for optimizing.
class catalystSchemeSimplex:
def run(self, x0, function, feasReg, tol, maxTime, FWVariant = "AFW", typeStep = "SS"):
self.L = function.largestEig()
self.mu = function.smallestEig()
self.kappa = self.L - 2*self.mu
from collections import deque
xOut = deque([x0], maxlen = 2)
#Quantities we want to output.
FWGap = [function.FWGapBaseProblem(xOut[-1], feasReg)]
fVal = [function.fEvalBaseProblem(xOut[-1])]
timing = [time.time()]
iterations = [1]
q = self.mu / (self.mu + self.kappa)
rho = 0.9*np.sqrt(q)
y = deque([x0, x0], maxlen = 2)
function.setKappa(self.kappa)
epsilon = 0.22222 * FWGap[-1] * (1-rho)
alpha = deque([np.sqrt(q)], maxlen = 2)
itCount = 0
while(fVal[-1] - fValOpt > tol):
function.sety(y[-1])
newX, gap, fvalue, timingInner = runFWSimplex(xOut[-1], function, feasReg, epsilon, maxTime/2.0, FWVariant = FWVariant, typeStep = typeStep, criterion = "DG")
xOut.append(newX)
epsilon *= (1-rho)
iterations.append(len(gap) + iterations[-1])
alpha.append(self.findRoot(alpha[-1], q))
beta = self.returnBeta(alpha)
y.append(xOut[-1] + beta *(xOut[-1] - xOut[-2]))
performUpdate(function, xOut[-1], FWGap, fVal, timing, function.FWGapBaseProblem(xOut[-1], feasReg))
if(timing[-1] - timing[0] > maxTime):
break
itCount += 1
timing[:] = [t - timing[0] for t in timing]
return xOut[-1], FWGap, fVal, timing, iterations
#Finds the root of the equation between 0 and 1.
#Throws an assertion if no valid candidate is found.
def findRoot(self, alpha, q):
aux = (q-alpha*alpha)
val = 0.5*(aux + np.sqrt(aux*aux + 4.0*alpha*alpha))
if(val > 0 and val <= 1):
return val
else:
val = 0.5*(aux - np.sqrt(aux*aux + 4.0*alpha*alpha))
assert val > 0 and val < 1, "Root does not meet desired criteria.\n"
return val
#Returns the value of Beta based on the values of alpha.
#The alpha deque contains at least two values.
def returnBeta(self, alpha):
return alpha[-2]*(1-alpha[-2])/(alpha[-2]*alpha[-2] + alpha[-1])
"""# Simplex example"""
from functions import randomPSDGenerator, funcQuadratic, funcAccelScheme
from feasibleRegions import probabilitySimplexPolytope
from algorithms import NAGD_probabilitySimplex, CGS, DIPFW
TIME_LIMIT = int(1800)
size = int(1500)
feasibleRegion = probabilitySimplexPolytope(size)
x_0 = feasibleRegion.initialPoint()
S_0 = [x_0]
alpha_0 = [1]
tolerance = 1.0e-5
typeOfStep = "SS"
LVal = 1000.0
MuVal = 1.0
M = randomPSDGenerator(size, MuVal, LVal)
b = np.random.randint(-1,1, size = size)
fun = funcQuadratic(size, M , b, MuVal, LVal)
print("Solving the problem over the simplex.")
##Run to a high Frank-Wolfe primal gap accuracy for later use.
print("\nSolving the problem to a high accuracy using Nesterov's AGD to obtain a reference solution.")
fValOpt = NAGD_probabilitySimplex(x_0, fun, feasibleRegion, tolerance/10.0)
#Catalyst augmented
print("\nRunning Catalyst-augmented AFW.")
funCat = funcAccelScheme(len(x_0), fun.returnM(), fun.returnb(), fun.largestEig(), fun.smallestEig())
CatalystAFW = catalystSchemeSimplex()
xCatalyst, FWCatalyst, fCatalyst, tCatalyst, itCatalyst = CatalystAFW.run(x_0, funCat, feasibleRegion, tolerance, TIME_LIMIT)
##Vanilla AFW
print("\nRunning AFW.")
xAFW, FWGapAFW, fValAFW, timingAFW = runFWSimplex(x_0, fun, feasibleRegion, tolerance, TIME_LIMIT, FWVariant = "AFW", typeStep = typeOfStep, criterion = "PG", criterionRef = fValOpt)
##Vanilla PFW
print("\nRunning PFW.")
xPFW, FWGapPFW, fValPFW, timingPFW = runFWSimplex(x_0, fun, feasibleRegion, tolerance, TIME_LIMIT, FWVariant = "PFW", typeStep = typeOfStep, criterion = "PG", criterionRef = fValOpt)
#LaCG
print("\nRunning LaCG-AFW.")
LaCGAway = LaCG()
xLaCGAFW, FWGapLaCGAFW, fValLaCGAFW, timingLaCGAFW = LaCGAway.run(x_0, fun, feasibleRegion, tolerance, typeStep = typeOfStep, FWVariant = "AFW")
#LaCG PFW
print("\nRunning LaCG-PFW.")
LaCGPairwise = LaCG()
xLaCGPFW, FWGapLaCGPFW, fValLaCGPFW, timingLaCGPFW = LaCGPairwise.run(x_0, fun, feasibleRegion, tolerance, typeStep = typeOfStep, FWVariant = "PFW")
#Conditional Gradient Sliding.
print("\nRunning CGS.")
CGS = CGS()
xCGS, FWGapCGS, fValCGS, timingCGS, iterationCGS = CGS.run(x_0, fun, feasibleRegion, tolerance, TIME_LIMIT, criterion = "PG", criterionRef = fValOpt)
#Decomposition Invariant CG
print("\nRunning DICG.")
xDICG, FWGapDICG, fValDICG, timingDICG = DIPFW(x_0, fun, feasibleRegion, tolerance, TIME_LIMIT, typeStep = typeOfStep, criterion = "PG", criterionRef = fValOpt)
import matplotlib.pyplot as plt
#Plot primal gap in terms of iteration.
plt.loglog(np.arange(len(fValAFW)) + 1, [(x - fValOpt) for x in fValAFW], '-*', color = 'b', markevery = np.logspace(0, np.log10(len(fValAFW)-1), 10).astype(int).tolist(), label = 'AFW')
plt.loglog(np.arange(len(fValPFW)) + 1, [(x - fValOpt) for x in fValPFW], '-D', color = 'c', markevery = np.logspace(0, np.log10(len(fValPFW)-1), 10).astype(int).tolist(), label = 'PFW')
plt.loglog(np.arange(len(fValLaCGAFW)) + 1, [(x - fValOpt) for x in fValLaCGAFW], '-o', color = 'k', markevery = np.logspace(0, np.log10(len(fValLaCGAFW)-1), 10).astype(int).tolist(), label = 'LaCG-AFW')
plt.loglog(np.arange(len(fValLaCGPFW)) + 1, [(x - fValOpt) for x in fValLaCGPFW], '-^', color = 'g', markevery = np.logspace(0, np.log10(len(fValLaCGPFW)-1), 10).astype(int).tolist(), label = 'LaCG-PFW')
plt.loglog(np.arange(len(fValDICG)) + 1, [(x - fValOpt) for x in fValDICG], '-s', color = 'r', markevery = np.logspace(0, np.log10(len(fValDICG)-1), 10).astype(int).tolist(), label = 'DICG')
plt.loglog(iterationCGS, [(x - fValOpt) for x in fValCGS], color = 'y', label = 'CGS')
plt.loglog(itCatalyst, [(x - fValOpt) for x in fCatalyst], ':', color = 'm', label = 'Catalyst')
plt.legend()
plt.xlabel(r'$k$')
plt.ylabel(r'$f(x_{k}) - f^{*}$')
plt.autoscale(enable=True, axis='x', tight=True)
plt.grid()
plt.show()
plt.close()
#Plot Primal gap in terms of time.
plt.semilogy(timingAFW, [(x - fValOpt) for x in fValAFW], '-*', color = 'b', markevery = int(len(timingAFW)/10), label = 'AFW')
plt.semilogy(timingPFW, [(x - fValOpt) for x in fValPFW], '-D', color = 'g', markevery = int(len(timingPFW)/10), label = 'PFW')
plt.semilogy(timingLaCGAFW, [(x - fValOpt) for x in fValLaCGAFW], '-o', color = 'k', markevery = int(len(timingLaCGAFW)/10), label = 'LaCG-AFW')
plt.semilogy(timingLaCGPFW, [(x - fValOpt) for x in fValLaCGPFW], '-^', color = 'g', markevery = int(len(timingLaCGPFW)/10), label = 'LaCG-PFW')
plt.semilogy(timingDICG, [(x - fValOpt) for x in fValDICG], '-s', color = 'r', markevery = int(len(timingDICG)/10), label = 'DICG')
plt.semilogy(timingCGS, [(x - fValOpt) for x in fValCGS], color = 'y', label = 'CGS')
plt.semilogy(tCatalyst, [(x - fValOpt) for x in fCatalyst], ':', color = 'm', label = 'Catalyst')
plt.legend()
plt.ylabel(r'$f(x_{k}) - f^{*}$')
plt.xlabel(r't[s]')
plt.autoscale(enable=True, axis='x', tight=True)
plt.grid()
plt.show()
plt.close() | [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"functions.randomPSDGenerator",
"matplotlib.pyplot.autoscale",
"algorithms.CGS",
"matplotlib.pyplot.semilogy",
"collections.deque",
"matplotlib.pyplot.loglog",
"feasibleRegions.probabilitySimplexPolytope",
"auxiliaryFunctions.st... | [((52, 63), 'time.time', 'time.time', ([], {}), '()\n', (61, 63), False, 'import time\n'), ((8934, 8966), 'feasibleRegions.probabilitySimplexPolytope', 'probabilitySimplexPolytope', (['size'], {}), '(size)\n', (8960, 8966), False, 'from feasibleRegions import probabilitySimplexPolytope\n'), ((9097, 9134), 'functions.randomPSDGenerator', 'randomPSDGenerator', (['size', 'MuVal', 'LVal'], {}), '(size, MuVal, LVal)\n', (9115, 9134), False, 'from functions import randomPSDGenerator, funcQuadratic, funcAccelScheme\n'), ((9139, 9174), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(1)'], {'size': 'size'}), '(-1, 1, size=size)\n', (9156, 9174), True, 'import numpy as np\n'), ((9182, 9220), 'functions.funcQuadratic', 'funcQuadratic', (['size', 'M', 'b', 'MuVal', 'LVal'], {}), '(size, M, b, MuVal, LVal)\n', (9195, 9220), False, 'from functions import randomPSDGenerator, funcQuadratic, funcAccelScheme\n'), ((9446, 9513), 'algorithms.NAGD_probabilitySimplex', 'NAGD_probabilitySimplex', (['x_0', 'fun', 'feasibleRegion', '(tolerance / 10.0)'], {}), '(x_0, fun, feasibleRegion, tolerance / 10.0)\n', (9469, 9513), False, 'from algorithms import NAGD_probabilitySimplex, CGS, DIPFW\n'), ((10758, 10763), 'algorithms.CGS', 'CGS', ([], {}), '()\n', (10761, 10763), False, 'from algorithms import NAGD_probabilitySimplex, CGS, DIPFW\n'), ((10815, 10913), 'algorithms.CGS.run', 'CGS.run', (['x_0', 'fun', 'feasibleRegion', 'tolerance', 'TIME_LIMIT'], {'criterion': '"""PG"""', 'criterionRef': 'fValOpt'}), "(x_0, fun, feasibleRegion, tolerance, TIME_LIMIT, criterion='PG',\n criterionRef=fValOpt)\n", (10822, 10913), False, 'from algorithms import NAGD_probabilitySimplex, CGS, DIPFW\n'), ((11009, 11126), 'algorithms.DIPFW', 'DIPFW', (['x_0', 'fun', 'feasibleRegion', 'tolerance', 'TIME_LIMIT'], {'typeStep': 'typeOfStep', 'criterion': '"""PG"""', 'criterionRef': 'fValOpt'}), "(x_0, fun, feasibleRegion, tolerance, TIME_LIMIT, typeStep=typeOfStep,\n criterion='PG', criterionRef=fValOpt)\n", (11014, 11126), False, 'from algorithms import NAGD_probabilitySimplex, CGS, DIPFW\n'), ((12176, 12263), 'matplotlib.pyplot.loglog', 'plt.loglog', (['iterationCGS', '[(x - fValOpt) for x in fValCGS]'], {'color': '"""y"""', 'label': '"""CGS"""'}), "(iterationCGS, [(x - fValOpt) for x in fValCGS], color='y', label\n ='CGS')\n", (12186, 12263), True, 'import matplotlib.pyplot as plt\n'), ((12265, 12361), 'matplotlib.pyplot.loglog', 'plt.loglog', (['itCatalyst', '[(x - fValOpt) for x in fCatalyst]', '""":"""'], {'color': '"""m"""', 'label': '"""Catalyst"""'}), "(itCatalyst, [(x - fValOpt) for x in fCatalyst], ':', color='m',\n label='Catalyst')\n", (12275, 12361), True, 'import matplotlib.pyplot as plt\n'), ((12363, 12375), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12373, 12375), True, 'import matplotlib.pyplot as plt\n'), ((12376, 12393), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {}), "('$k$')\n", (12386, 12393), True, 'import matplotlib.pyplot as plt\n'), ((12395, 12427), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$f(x_{k}) - f^{*}$"""'], {}), "('$f(x_{k}) - f^{*}$')\n", (12405, 12427), True, 'import matplotlib.pyplot as plt\n'), ((12429, 12477), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {'enable': '(True)', 'axis': '"""x"""', 'tight': '(True)'}), "(enable=True, axis='x', tight=True)\n", (12442, 12477), True, 'import matplotlib.pyplot as plt\n'), ((12478, 12488), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (12486, 12488), True, 'import matplotlib.pyplot as plt\n'), ((12489, 12499), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12497, 12499), True, 'import matplotlib.pyplot as plt\n'), ((12500, 12511), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12509, 12511), True, 'import matplotlib.pyplot as plt\n'), ((13228, 13314), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['timingCGS', '[(x - fValOpt) for x in fValCGS]'], {'color': '"""y"""', 'label': '"""CGS"""'}), "(timingCGS, [(x - fValOpt) for x in fValCGS], color='y', label=\n 'CGS')\n", (13240, 13314), True, 'import matplotlib.pyplot as plt\n'), ((13316, 13413), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['tCatalyst', '[(x - fValOpt) for x in fCatalyst]', '""":"""'], {'color': '"""m"""', 'label': '"""Catalyst"""'}), "(tCatalyst, [(x - fValOpt) for x in fCatalyst], ':', color='m',\n label='Catalyst')\n", (13328, 13413), True, 'import matplotlib.pyplot as plt\n'), ((13415, 13427), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13425, 13427), True, 'import matplotlib.pyplot as plt\n'), ((13428, 13460), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$f(x_{k}) - f^{*}$"""'], {}), "('$f(x_{k}) - f^{*}$')\n", (13438, 13460), True, 'import matplotlib.pyplot as plt\n'), ((13462, 13480), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t[s]"""'], {}), "('t[s]')\n", (13472, 13480), True, 'import matplotlib.pyplot as plt\n'), ((13482, 13530), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {'enable': '(True)', 'axis': '"""x"""', 'tight': '(True)'}), "(enable=True, axis='x', tight=True)\n", (13495, 13530), True, 'import matplotlib.pyplot as plt\n'), ((13531, 13541), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (13539, 13541), True, 'import matplotlib.pyplot as plt\n'), ((13542, 13552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13550, 13552), True, 'import matplotlib.pyplot as plt\n'), ((13553, 13564), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13562, 13564), True, 'import matplotlib.pyplot as plt\n'), ((3164, 3201), 'auxiliaryFunctions.stepSize', 'stepSize', (['function', 'd', 'grad', 'typeStep'], {}), '(function, d, grad, typeStep)\n', (3172, 3201), False, 'from auxiliaryFunctions import project_onto_simplex, performUpdate, exitCriterion, stepSize\n'), ((1027, 1038), 'time.time', 'time.time', ([], {}), '()\n', (1036, 1038), False, 'import time\n'), ((1317, 1369), 'auxiliaryFunctions.performUpdate', 'performUpdate', (['function', 'x', 'FWGap', 'fVal', 'timing', 'gap'], {}), '(function, x, FWGap, fVal, timing, gap)\n', (1330, 1369), False, 'from auxiliaryFunctions import project_onto_simplex, performUpdate, exitCriterion, stepSize\n'), ((1915, 1934), 'numpy.dot', 'np.dot', (['grad', '(x - v)'], {}), '(grad, x - v)\n', (1921, 1934), True, 'import numpy as np\n'), ((1937, 1956), 'numpy.dot', 'np.dot', (['grad', '(a - x)'], {}), '(grad, a - x)\n', (1943, 1956), True, 'import numpy as np\n'), ((2012, 2049), 'auxiliaryFunctions.stepSize', 'stepSize', (['function', 'd', 'grad', 'typeStep'], {}), '(function, d, grad, typeStep)\n', (2020, 2049), False, 'from auxiliaryFunctions import project_onto_simplex, performUpdate, exitCriterion, stepSize\n'), ((2435, 2472), 'auxiliaryFunctions.stepSize', 'stepSize', (['function', 'd', 'grad', 'typeStep'], {}), '(function, d, grad, typeStep)\n', (2443, 2472), False, 'from auxiliaryFunctions import project_onto_simplex, performUpdate, exitCriterion, stepSize\n'), ((2637, 2656), 'numpy.dot', 'np.dot', (['grad', '(x - v)'], {}), '(grad, x - v)\n', (2643, 2656), True, 'import numpy as np\n'), ((3318, 3330), 'numpy.dot', 'np.dot', (['v', 'x'], {}), '(v, x)\n', (3324, 3330), True, 'import numpy as np\n'), ((3393, 3412), 'numpy.dot', 'np.dot', (['grad', '(x - v)'], {}), '(grad, x - v)\n', (3399, 3412), True, 'import numpy as np\n'), ((3846, 3877), 'numpy.sqrt', 'np.sqrt', (['(0.5 * self.mu / self.L)'], {}), '(0.5 * self.mu / self.L)\n', (3853, 3877), True, 'import numpy as np\n'), ((6026, 6049), 'auxiliaryFunctions.project_onto_simplex', 'project_onto_simplex', (['b'], {}), '(b)\n', (6046, 6049), False, 'from auxiliaryFunctions import project_onto_simplex, performUpdate, exitCriterion, stepSize\n'), ((6613, 6634), 'collections.deque', 'deque', (['[x0]'], {'maxlen': '(2)'}), '([x0], maxlen=2)\n', (6618, 6634), False, 'from collections import deque\n'), ((6934, 6959), 'collections.deque', 'deque', (['[x0, x0]'], {'maxlen': '(2)'}), '([x0, x0], maxlen=2)\n', (6939, 6959), False, 'from collections import deque\n'), ((1381, 1495), 'auxiliaryFunctions.exitCriterion', 'exitCriterion', (['itCount', 'fVal[-1]', 'FWGap[-1]'], {'criterion': 'criterion', 'numCriterion': 'tolerance', 'critRef': 'criterionRef'}), '(itCount, fVal[-1], FWGap[-1], criterion=criterion,\n numCriterion=tolerance, critRef=criterionRef)\n', (1394, 1495), False, 'from auxiliaryFunctions import project_onto_simplex, performUpdate, exitCriterion, stepSize\n'), ((4358, 4369), 'time.time', 'time.time', ([], {}), '()\n', (4367, 4369), False, 'import time\n'), ((4542, 4594), 'auxiliaryFunctions.performUpdate', 'performUpdate', (['function', 'x', 'FWGap', 'fVal', 'timing', 'gap'], {}), '(function, x, FWGap, fVal, timing, gap)\n', (4555, 4594), False, 'from auxiliaryFunctions import project_onto_simplex, performUpdate, exitCriterion, stepSize\n'), ((5896, 5913), 'numpy.where', 'np.where', (['(x > 0.0)'], {}), '(x > 0.0)\n', (5904, 5913), True, 'import numpy as np\n'), ((6810, 6821), 'time.time', 'time.time', ([], {}), '()\n', (6819, 6821), False, 'import time\n'), ((6911, 6921), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (6918, 6921), True, 'import numpy as np\n'), ((2206, 2218), 'numpy.dot', 'np.dot', (['v', 'x'], {}), '(v, x)\n', (2212, 2218), True, 'import numpy as np\n'), ((7071, 7081), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (7078, 7081), True, 'import numpy as np\n'), ((8160, 8200), 'numpy.sqrt', 'np.sqrt', (['(aux * aux + 4.0 * alpha * alpha)'], {}), '(aux * aux + 4.0 * alpha * alpha)\n', (8167, 8200), True, 'import numpy as np\n'), ((8296, 8336), 'numpy.sqrt', 'np.sqrt', (['(aux * aux + 4.0 * alpha * alpha)'], {}), '(aux * aux + 4.0 * alpha * alpha)\n', (8303, 8336), True, 'import numpy as np\n'), ((76, 111), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (107, 111), False, 'import datetime\n')] |
# Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reducing a macronode lattice to a canonical lattice."""
# pylint: disable=protected-access,too-many-statements,too-many-locals,too-many-arguments
import numpy as np
from numpy.random import default_rng
from scipy.linalg import block_diag
from flamingpy.cv.ops import CVLayer, SCZ_apply
from flamingpy.cv.gkp import GKP_binner, Z_err_cond
from thewalrus.symplectic import expand, beam_splitter
def invert_permutation(p):
"""Invert the permutation associated with p."""
p_inverted = np.empty(p.size, p.dtype)
p_inverted[p] = np.arange(p.size)
return p_inverted
def BS_network(n):
"""Return the symlectic matrix of the beamsplitter network.
Return the symplectic matrix of the beamsplitters connecting four
micronodes in each macronode out of n total micronodes. If n = 4, return
the matrix in the 'all q's first' convention; otherwise, return a large
block-diagonal matrix in the 'q1p1, ... qnpn' convention.
"""
# 50/50 beamsplitter in the 'all q's first' convention.
bs5050 = beam_splitter(np.pi / 4, 0)
bs1 = expand(bs5050, [1, 0], 4)
bs2 = expand(bs5050, [3, 2], 4)
bs3 = expand(bs5050, [2, 0], 4)
bs4 = expand(bs5050, [3, 1], 4)
bs_network = (bs4 @ bs3 @ bs2 @ bs1).astype(np.single)
if n < 4:
print("Too small!")
raise Exception
if n > 4:
# Permutation away from 'all q's first' convention for matrices of
# with dimension 4 and the network spanning all the macronoes.
perm_out_4 = [0, 4, 1, 5, 2, 6, 3, 7]
bs_perm = bs_network[:, perm_out_4][perm_out_4, :]
# Symplectic corresponding to the beasmplitter network spanning
# the whole lattice.
bs_full = block_diag(*[bs_perm] * (n // 4))
return bs_full
return bs_network
def reduce_macro_and_simulate(
RHG_macro, RHG_reduced, CVRHG_reduced, bs_network, swap_prob, delta, rng=default_rng()
):
"""Reduce the macronode RHG lattice to the canonical lattice.
Take the macronode lattice EGraph, RHG_macro, and generate a
macronode CV lattice with swap-out probaiblity swap_prob and delta
value delta. Then, label micronodes as planets and stars, conduct
homodyne measurements, process these measurements, and compute
conditional phase error probabilities. Generate an canonical RHG
lattice with effective measurement outcomes, phase error
probabilities, and state types ('p' or 'GKP') stored as node
attributes.
"""
to_points = RHG_macro.to_points
N = len(RHG_macro)
# The hybridized CVRHG macronode lattice.
CVRHG = CVLayer(RHG_macro, p_swap=swap_prob, rng=rng)
# Noise-model
perfect_points = RHG_macro.graph.get("perfect_points")
if perfect_points:
perfect_inds = [RHG_macro.to_indices[point] for point in perfect_points]
else:
perfect_inds = None
noise_model = {
"noise": "grn",
"delta": delta,
"sampling_order": "two-step",
"perfect_inds": perfect_inds,
}
CVRHG.apply_noise(noise_model, rng)
# A list of permuted indices where each block of four
# corresponds to [star, planet, planet, planet].
permuted_inds = np.empty(N, dtype=np.int32)
for i in range(0, N - 3, 4):
# Indices of GKP micronodes in macronode i.
gkps = []
for j in range(4):
micronode = to_points[i + j]
if CVRHG.egraph.nodes[micronode]["state"] == "GKP":
gkps.append(j)
centre_point = tuple(round(i) for i in micronode)
if gkps:
star_ind, reduced_state = i + gkps[0], "GKP"
else:
star_ind, reduced_state = i, "p"
CVRHG_reduced._states["p"] += [RHG_reduced.to_indices[centre_point]]
# Set type of node in the reduced lattice as a p-squeezed
# state if all micronodes are p, else GKP.
RHG_reduced.nodes[centre_point]["state"] = reduced_state
# Old and permuted indices of all micronodes in macronode i.
old_inds = [i, i + 1, i + 2, i + 3]
old_inds.pop(star_ind - i)
new_inds = [star_ind] + old_inds
# Associate a 'body index' (1 to 4) to each micronode,
# with 1 being the star index and the rest being planets.
k = 1
for ind in new_inds:
CVRHG.egraph.nodes[to_points[ind]]["body_index"] = k
k += 1
permuted_inds[[i, i + 1, i + 2, i + 3]] = new_inds
# Indices of stars and planets.
stars = permuted_inds[::4]
planets = np.delete(permuted_inds, np.arange(0, N, 4))
# Update quadrature values after CZ gate application.
quads = CVRHG._init_quads
quads = SCZ_apply(RHG_macro.adj_mat, quads)
# Permute the quadrature values to align with the permuted
# indices in order to apply the beamsplitter network.
quad_permutation = np.concatenate([permuted_inds, N + permuted_inds])
permuted_quads = quads[quad_permutation]
for i in range(0, N - 3, 4):
q_inds = np.array([i, i + 1, i + 2, i + 3])
p_inds = q_inds + N
updated_qs = bs_network[:4, :4] @ permuted_quads[q_inds]
updated_ps = bs_network[4:, 4:] @ permuted_quads[p_inds]
permuted_quads[q_inds] = updated_qs
permuted_quads[p_inds] = updated_ps
unpermuted_quads = permuted_quads[invert_permutation(quad_permutation)]
# Measure stars in p, planets in q.
CVRHG.measure_hom(quad="p", inds=stars, updated_quads=unpermuted_quads, rng=rng)
CVRHG.measure_hom(quad="q", inds=planets, updated_quads=unpermuted_quads, rng=rng)
def neighbor_of_i(i, j):
"""Return the neighbor of the ith micronode, jth macronode.
Micronode i is adjacent to a neighbor with a body index (1 for
star, 2, 3, 4 for planets). Return the vertex and the body index
of the neighbor to help the processing rules. If there is no
such neighbor, return None.
"""
# Index of ith micronode in the jth macronode.
ith_index = permuted_inds[j + i - 1]
ith_vertex = to_points[ith_index]
# Vertex of the neighbor of the ith micronode.
ith_adjacency = list(CVRHG.egraph[ith_vertex])
if ith_adjacency:
ith_neighbor = list(CVRHG.egraph[ith_vertex])[0]
ith_body_index = CVRHG.egraph.nodes[ith_neighbor]["body_index"]
return ith_neighbor, ith_body_index
return None
def m(vertex):
"""Measurement outcomes in the macronode containing vertex.
Return the values of the homodyne measurements of the macronode
containing vertex. Note we are only interested in q-homodyne
outcomes; the returned list is of the form [0, 0, q2, q3, q4].
If vertex is None, return a list of 0s, so that the processing
is unaltered by the outcomes.
"""
if vertex is None:
return [0, 0, 0, 0, 0]
meas = np.zeros(5)
# The central node corresponding to the neighboring
# macronode.
central_node = tuple(round(i) for i in vertex)
for micro in CVRHG.egraph.macro_to_micro[central_node]:
index = CVRHG.egraph.nodes[micro]["body_index"]
# Populate meas with the q-homodyne outcomes for
# the planet modes.
if index != 1:
meas[index] = CVRHG.egraph.nodes[micro]["hom_val_q"]
return meas
def Z(M, neighbor_body_index):
"""Process the homodyne outcomes for neighboring macronode i.
Macronode j is connected to a macronode whose with an array of
measurement outcomes M and whose micronodes have body indices
neighbor_body_index. Use this information to process the
measurement outcomes M.
"""
if neighbor_body_index == 1:
return 0
if neighbor_body_index == 2:
return M[2] - M[4]
if neighbor_body_index == 3:
return M[3] - M[4]
if neighbor_body_index == 4:
return M[2] + M[3]
return None
# sorted_homodynes = np.empty(N // 4, dtype=np.float32)
sorted_bits = np.empty(N // 4, dtype=np.float32)
reduced_indices = RHG_reduced.to_indices
# Processing of homodyne outcomes and calculations of phase
# error probabilities
for j in range(0, N - 3, 4):
star_index = permuted_inds[j]
vertex = to_points[star_index]
# Here, j corresponds to the macronode and i to to micronode.
# i ranges from 1 to 4, to align with manuscript.
verts_and_inds = [neighbor_of_i(i, j) for i in (1, 2, 3, 4)]
neighbors = [tup[0] if tup else None for tup in verts_and_inds]
body_indices = [tup[1] if tup else None for tup in verts_and_inds]
# Array of arrays of measurement outcomes in all the
# macronodes adjacent to j.
m_arr = np.array([m(neighbors[i - 1]) for i in (1, 2, 3, 4)])
# Array of processed q-homodyne outcomes from neighboring
# macronodes of the form [0, Z(1), Z(2), Z(3), Z(4)].
Z_arr = np.array([0] + [Z(m_arr[i - 1], body_indices[i - 1]) for i in (1, 2, 3, 4)])
# p-homodyne outcome of the star node.
star_p_val = CVRHG.egraph.nodes[vertex]["hom_val_p"]
# Types of state for the four micronodes directly neighboring
# macronode j.
types = [RHG_macro.nodes[neighbor]["state"] if neighbor else None for neighbor in neighbors]
# Phase error probability and number of p-squeezed states
# among the four micronodes in the vicinity of macronode j
p_err = 0
num_p = 0
outcome = 2 * star_p_val
gkp_inds = []
for i in (1, 2, 3, 4):
if types[i - 1] == "p":
num_p += 1
outcome -= Z_arr[i]
if types[i - 1] == "GKP":
if delta > 0:
p_err += Z_err_cond(2 * delta, Z_arr[i], use_hom_val=True)
gkp_inds += [i]
if delta > 0:
p_err += Z_err_cond(2 * (2 + num_p) * delta, outcome, use_hom_val=True)
p_err = min(p_err, 0.5)
p_err = max(p_err, 0)
bitp = GKP_binner([outcome])[0]
bitq = GKP_binner(Z_arr[gkp_inds].astype(np.float64)) if gkp_inds else 0
processed_bit_val = (bitp + np.sum(bitq)) % 2
# Update the reduced CVRHG lattice with the effective
# homodyne value and the phase error probability.
central_vert = tuple(round(i) for i in vertex)
RHG_reduced.nodes[central_vert]["bit_val"] = processed_bit_val
sorted_bits[reduced_indices[central_vert]] = processed_bit_val
RHG_reduced.nodes[central_vert]["p_phase_cond"] = p_err
CVRHG_reduced.bits = sorted_bits
return
| [
"thewalrus.symplectic.beam_splitter",
"numpy.random.default_rng",
"flamingpy.cv.gkp.GKP_binner",
"thewalrus.symplectic.expand",
"flamingpy.cv.gkp.Z_err_cond",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"numpy.concatenate",
"scipy.linalg.block_diag",
"flamingpy.cv.ops.CVLayer",
... | [((1132, 1157), 'numpy.empty', 'np.empty', (['p.size', 'p.dtype'], {}), '(p.size, p.dtype)\n', (1140, 1157), True, 'import numpy as np\n'), ((1179, 1196), 'numpy.arange', 'np.arange', (['p.size'], {}), '(p.size)\n', (1188, 1196), True, 'import numpy as np\n'), ((1684, 1711), 'thewalrus.symplectic.beam_splitter', 'beam_splitter', (['(np.pi / 4)', '(0)'], {}), '(np.pi / 4, 0)\n', (1697, 1711), False, 'from thewalrus.symplectic import expand, beam_splitter\n'), ((1723, 1748), 'thewalrus.symplectic.expand', 'expand', (['bs5050', '[1, 0]', '(4)'], {}), '(bs5050, [1, 0], 4)\n', (1729, 1748), False, 'from thewalrus.symplectic import expand, beam_splitter\n'), ((1760, 1785), 'thewalrus.symplectic.expand', 'expand', (['bs5050', '[3, 2]', '(4)'], {}), '(bs5050, [3, 2], 4)\n', (1766, 1785), False, 'from thewalrus.symplectic import expand, beam_splitter\n'), ((1797, 1822), 'thewalrus.symplectic.expand', 'expand', (['bs5050', '[2, 0]', '(4)'], {}), '(bs5050, [2, 0], 4)\n', (1803, 1822), False, 'from thewalrus.symplectic import expand, beam_splitter\n'), ((1834, 1859), 'thewalrus.symplectic.expand', 'expand', (['bs5050', '[3, 1]', '(4)'], {}), '(bs5050, [3, 1], 4)\n', (1840, 1859), False, 'from thewalrus.symplectic import expand, beam_splitter\n'), ((2576, 2589), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (2587, 2589), False, 'from numpy.random import default_rng\n'), ((3287, 3332), 'flamingpy.cv.ops.CVLayer', 'CVLayer', (['RHG_macro'], {'p_swap': 'swap_prob', 'rng': 'rng'}), '(RHG_macro, p_swap=swap_prob, rng=rng)\n', (3294, 3332), False, 'from flamingpy.cv.ops import CVLayer, SCZ_apply\n'), ((3889, 3916), 'numpy.empty', 'np.empty', (['N'], {'dtype': 'np.int32'}), '(N, dtype=np.int32)\n', (3897, 3916), True, 'import numpy as np\n'), ((5404, 5439), 'flamingpy.cv.ops.SCZ_apply', 'SCZ_apply', (['RHG_macro.adj_mat', 'quads'], {}), '(RHG_macro.adj_mat, quads)\n', (5413, 5439), False, 'from flamingpy.cv.ops import CVLayer, SCZ_apply\n'), ((5589, 5639), 'numpy.concatenate', 'np.concatenate', (['[permuted_inds, N + permuted_inds]'], {}), '([permuted_inds, N + permuted_inds])\n', (5603, 5639), True, 'import numpy as np\n'), ((8922, 8956), 'numpy.empty', 'np.empty', (['(N // 4)'], {'dtype': 'np.float32'}), '(N // 4, dtype=np.float32)\n', (8930, 8956), True, 'import numpy as np\n'), ((2381, 2416), 'scipy.linalg.block_diag', 'block_diag', (['*([bs_perm] * (n // 4))'], {}), '(*([bs_perm] * (n // 4)))\n', (2391, 2416), False, 'from scipy.linalg import block_diag\n'), ((5279, 5297), 'numpy.arange', 'np.arange', (['(0)', 'N', '(4)'], {}), '(0, N, 4)\n', (5288, 5297), True, 'import numpy as np\n'), ((5738, 5772), 'numpy.array', 'np.array', (['[i, i + 1, i + 2, i + 3]'], {}), '([i, i + 1, i + 2, i + 3])\n', (5746, 5772), True, 'import numpy as np\n'), ((7692, 7703), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (7700, 7703), True, 'import numpy as np\n'), ((10858, 10920), 'flamingpy.cv.gkp.Z_err_cond', 'Z_err_cond', (['(2 * (2 + num_p) * delta)', 'outcome'], {'use_hom_val': '(True)'}), '(2 * (2 + num_p) * delta, outcome, use_hom_val=True)\n', (10868, 10920), False, 'from flamingpy.cv.gkp import GKP_binner, Z_err_cond\n'), ((11003, 11024), 'flamingpy.cv.gkp.GKP_binner', 'GKP_binner', (['[outcome]'], {}), '([outcome])\n', (11013, 11024), False, 'from flamingpy.cv.gkp import GKP_binner, Z_err_cond\n'), ((11149, 11161), 'numpy.sum', 'np.sum', (['bitq'], {}), '(bitq)\n', (11155, 11161), True, 'import numpy as np\n'), ((10730, 10779), 'flamingpy.cv.gkp.Z_err_cond', 'Z_err_cond', (['(2 * delta)', 'Z_arr[i]'], {'use_hom_val': '(True)'}), '(2 * delta, Z_arr[i], use_hom_val=True)\n', (10740, 10779), False, 'from flamingpy.cv.gkp import GKP_binner, Z_err_cond\n')] |
import numpy as np
conv_crit = 0.0000001
alpha = 1
limit = 500
def pad(x, i):
if i < 0 or i >= x.shape[0]:
return 0
else:
return x[i]
def SIP(Aw, As, Ap, An, Ae, b, Ni, Nj):
n = Ni*Nj
Lw = np.empty(n, dtype="double")
Ls = np.empty(n, dtype="double")
Lp = np.empty(n, dtype="double")
Un = np.empty(n, dtype="double")
Ue = np.empty(n, dtype="double")
for i in range(n):
Lw[i] = Aw[i] / (1 + alpha * pad(Un, i-Nj))
Ls[i] = As[i] / (1 + alpha * pad(Ue, i-1))
Lp[i] = Ap[i] + alpha * (Lw[i] * pad(Un, i-Nj) + Ls[i] * pad(Ue, i-1)) - Lw[i] * pad(Ue, i-Nj) - Ls[i] * pad(Un, i-1)
Un[i] = (An[i] - alpha * Lw[i] * pad(Un, i-Nj)) / Lp[i]
Ue[i] = (Ae[i] - alpha * Ls[i] * pad(Ue, i-1)) / Lp[i]
x = np.zeros(n, dtype="double")
R = np.empty(n, dtype="double")
delta = np.empty(n, dtype="double")
error = 1
itr = 0
while error > conv_crit and itr <= limit:
for k in range(n):
rho = b[k] - Aw[k] * pad(x, k - Nj) - As[k] * pad(x, k - 1) - Ae[k] * pad(x, k + Nj) - An[k] * pad(x, k + 1) - Ap[k] * pad(x, k)
R[k] = (rho - Ls[k] * pad(R, k - 1) - Lw[k] * pad(R, k - Nj)) / Lp[k]
for j in range(n - 1, -1, -1):
delta[j] = R[j] - Un[j] * pad(delta, j + 1) - Ue[j] * pad(delta, j + Nj)
error = (delta @ delta) ** 0.5
x += delta
print(R)
itr += 1
return x
Aw = np.array([0,0,1,2], dtype="double")
As = np.array([0,1,2,3], dtype="double")
Ap = np.array([1,2,3,4], dtype="double")
An = np.array([1,2,3,0], dtype="double")
Ae = np.array([1,2,0,0], dtype="double")
b = np.array([3,7,9,9], dtype="double")
# Aw = np.array([0,0,0,1,2,3,4,5,6], dtype="double")
# As = np.array([0,1,2,3,4,5,6,7,8], dtype="double")
# Ap = np.array([1,2,3,4,5,6,7,8,9], dtype="double")
# An = np.array([1,2,3,4,5,6,7,8,0], dtype="double")
# Ae = np.array([1,2,3,4,5,6,0,0,0], dtype="double")
# b = np.array([3,7,11,16,21,26,24,28,23], dtype="double")
print(SIP(Aw, As, Ap, An, Ae, b, 2, 2))
# ....Nw = np.empty(n, dtype="double")
# ....Nnw = np.empty(n, dtype="double")
# ....Ns = np.empty(n, dtype="double")
# ....Np = np.empty(n, dtype="double")
# ....Nn = np.empty(n, dtype="double")
# ....Nse = np.empty(n, dtype="double")
# ....Ne = np.empty(n, dtype="double")
# ....for j in range(n):
# ........Nw[j] = Lw[j] - Aw[j]
# ........Nnw[j] = Lw[j]*Un[j-Nj]
# ........Ns[j] = Ls[j] - As[j]
# ........Np[j] = Lw[j]*Ue[j-Nj] + Ls[j]*Un[j-1] + Lp[j] - Ap[j]
# ........Nn[j] = Un[j]*Lp[j] - An[j]
# ........Nse[j] = Ls[j]*Ue[j-1]
# ........Ne[j] = Ue[j]*Lp[j] - Ae[j]
| [
"numpy.array",
"numpy.zeros",
"numpy.empty"
] | [((1512, 1550), 'numpy.array', 'np.array', (['[0, 0, 1, 2]'], {'dtype': '"""double"""'}), "([0, 0, 1, 2], dtype='double')\n", (1520, 1550), True, 'import numpy as np\n'), ((1554, 1592), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {'dtype': '"""double"""'}), "([0, 1, 2, 3], dtype='double')\n", (1562, 1592), True, 'import numpy as np\n'), ((1596, 1634), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {'dtype': '"""double"""'}), "([1, 2, 3, 4], dtype='double')\n", (1604, 1634), True, 'import numpy as np\n'), ((1638, 1676), 'numpy.array', 'np.array', (['[1, 2, 3, 0]'], {'dtype': '"""double"""'}), "([1, 2, 3, 0], dtype='double')\n", (1646, 1676), True, 'import numpy as np\n'), ((1680, 1718), 'numpy.array', 'np.array', (['[1, 2, 0, 0]'], {'dtype': '"""double"""'}), "([1, 2, 0, 0], dtype='double')\n", (1688, 1718), True, 'import numpy as np\n'), ((1721, 1759), 'numpy.array', 'np.array', (['[3, 7, 9, 9]'], {'dtype': '"""double"""'}), "([3, 7, 9, 9], dtype='double')\n", (1729, 1759), True, 'import numpy as np\n'), ((244, 271), 'numpy.empty', 'np.empty', (['n'], {'dtype': '"""double"""'}), "(n, dtype='double')\n", (252, 271), True, 'import numpy as np\n'), ((282, 309), 'numpy.empty', 'np.empty', (['n'], {'dtype': '"""double"""'}), "(n, dtype='double')\n", (290, 309), True, 'import numpy as np\n'), ((320, 347), 'numpy.empty', 'np.empty', (['n'], {'dtype': '"""double"""'}), "(n, dtype='double')\n", (328, 347), True, 'import numpy as np\n'), ((358, 385), 'numpy.empty', 'np.empty', (['n'], {'dtype': '"""double"""'}), "(n, dtype='double')\n", (366, 385), True, 'import numpy as np\n'), ((396, 423), 'numpy.empty', 'np.empty', (['n'], {'dtype': '"""double"""'}), "(n, dtype='double')\n", (404, 423), True, 'import numpy as np\n'), ((822, 849), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""double"""'}), "(n, dtype='double')\n", (830, 849), True, 'import numpy as np\n'), ((859, 886), 'numpy.empty', 'np.empty', (['n'], {'dtype': '"""double"""'}), "(n, dtype='double')\n", (867, 886), True, 'import numpy as np\n'), ((900, 927), 'numpy.empty', 'np.empty', (['n'], {'dtype': '"""double"""'}), "(n, dtype='double')\n", (908, 927), True, 'import numpy as np\n')] |
# freda (todo) :
import os, time, sys, math
import subprocess, shutil
from os.path import *
import numpy as np
from inspect import isclass
from pytz import timezone
from datetime import datetime
import inspect
import torch
def datestr():
pacific = timezone('US/Pacific')
now = datetime.now(pacific)
return '{}{:02}{:02}_{:02}{:02}'.format(now.year, now.month, now.day, now.hour, now.minute)
def module_to_dict(module, exclude=[]):
return dict([(x, getattr(module, x)) for x in dir(module)
if isclass(getattr(module, x))
and x not in exclude
and getattr(module, x) not in exclude])
def find_le(a, x):
from bisect import bisect_right
# Find rightmost value less than or equal to x
i = bisect_right(a, x)
if i:
return i-1
raise ValueError
class TimerBlock:
def __init__(self, title):
print(("{}".format(title)))
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end = time.clock()
self.interval = self.end - self.start
if exc_type is not None:
self.log("Operation failed\n")
else:
self.log("Operation finished\n")
def log(self, string):
duration = time.clock() - self.start
units = 's'
if duration > 60:
duration = duration / 60.
units = 'm'
print((" [{:.3f}{}] {}".format(duration, units, string)))
def log2file(self, fid, string):
fid = open(fid, 'a')
fid.write("%s\n"%(string))
fid.close()
def add_arguments_for_module(parser, module, argument_for_class, default, skip_params=[], parameter_defaults={}):
argument_group = parser.add_argument_group(argument_for_class.capitalize())
module_dict = module_to_dict(module)
argument_group.add_argument('--' + argument_for_class, type=str, default=default, choices=list(module_dict.keys()))
args, unknown_args = parser.parse_known_args()
class_obj = module_dict[vars(args)[argument_for_class]]
argspec = inspect.getargspec(class_obj.__init__)
defaults = argspec.defaults[::-1] if argspec.defaults else None
args = argspec.args[::-1]
for i, arg in enumerate(args):
cmd_arg = '{}_{}'.format(argument_for_class, arg)
if arg not in skip_params + ['self', 'args']:
if arg in list(parameter_defaults.keys()):
argument_group.add_argument('--{}'.format(cmd_arg), type=type(parameter_defaults[arg]), default=parameter_defaults[arg])
elif (defaults is not None and i < len(defaults)):
argument_group.add_argument('--{}'.format(cmd_arg), type=type(defaults[i]), default=defaults[i])
else:
print(("[Warning]: non-default argument '{}' detected on class '{}'. This argument cannot be modified via the command line"
.format(arg, module.__class__.__name__)))
# We don't have a good way of dealing with inferring the type of the argument
# TODO: try creating a custom action and using ast's infer type?
# else:
# argument_group.add_argument('--{}'.format(cmd_arg), required=True)
def kwargs_from_args(args, argument_for_class):
argument_for_class = argument_for_class + '_'
return {key[len(argument_for_class):]: value for key, value in list(vars(args).items()) if argument_for_class in key and key != argument_for_class + 'class'}
def format_dictionary_of_losses(labels, values):
try:
string = ', '.join([('{}: {:' + ('.3f' if value >= 0.001 else '.1e') +'}').format(name, value) for name, value in zip(labels, values)])
except (TypeError, ValueError) as e:
print((list(zip(labels, values))))
string = '[Log Error] ' + str(e)
return string
class IteratorTimer():
def __init__(self, iterable):
self.iterable = iterable
self.iterator = self.iterable.__iter__()
def __iter__(self):
return self
def __len__(self):
return len(self.iterable)
def __next__(self):
start = time.time()
n = next(self.iterator)
self.last_duration = (time.time() - start)
return n
next = __next__
def gpumemusage():
gpu_mem = subprocess.check_output("nvidia-smi | grep MiB | cut -f 3 -d '|'", shell=True).replace(' ', '').replace('\n', '').replace('i', '')
all_stat = [float(a) for a in gpu_mem.replace('/','').split('MB')[:-1]]
gpu_mem = ''
for i in range(len(all_stat)/2):
curr, tot = all_stat[2*i], all_stat[2*i+1]
util = "%1.2f"%(100*curr/tot)+'%'
cmem = str(int(math.ceil(curr/1024.)))+'GB'
gmem = str(int(math.ceil(tot/1024.)))+'GB'
gpu_mem += util + '--' + join(cmem, gmem) + ' '
return gpu_mem
def update_hyperparameter_schedule(args, epoch, global_iteration, optimizer):
if args.schedule_lr_frequency > 0:
for param_group in optimizer.param_groups:
if (global_iteration + 1) % args.schedule_lr_frequency == 0:
param_group['lr'] /= float(args.schedule_lr_fraction)
param_group['lr'] = float(np.maximum(param_group['lr'], 0.000001))
def save_checkpoint(state, is_best, path, prefix, filename='checkpoint.pth.tar'):
prefix_save = os.path.join(path, prefix)
name = prefix_save + '_' + filename
torch.save(state, name)
if is_best:
shutil.copyfile(name, prefix_save + '_model_best.pth.tar')
| [
"subprocess.check_output",
"pytz.timezone",
"math.ceil",
"time.clock",
"os.path.join",
"inspect.getargspec",
"datetime.datetime.now",
"bisect.bisect_right",
"shutil.copyfile",
"torch.save",
"numpy.maximum",
"time.time"
] | [((255, 277), 'pytz.timezone', 'timezone', (['"""US/Pacific"""'], {}), "('US/Pacific')\n", (263, 277), False, 'from pytz import timezone\n'), ((288, 309), 'datetime.datetime.now', 'datetime.now', (['pacific'], {}), '(pacific)\n', (300, 309), False, 'from datetime import datetime\n'), ((783, 801), 'bisect.bisect_right', 'bisect_right', (['a', 'x'], {}), '(a, x)\n', (795, 801), False, 'from bisect import bisect_right\n'), ((2152, 2190), 'inspect.getargspec', 'inspect.getargspec', (['class_obj.__init__'], {}), '(class_obj.__init__)\n', (2170, 2190), False, 'import inspect\n'), ((5392, 5418), 'os.path.join', 'os.path.join', (['path', 'prefix'], {}), '(path, prefix)\n', (5404, 5418), False, 'import os, time, sys, math\n'), ((5463, 5486), 'torch.save', 'torch.save', (['state', 'name'], {}), '(state, name)\n', (5473, 5486), False, 'import torch\n'), ((985, 997), 'time.clock', 'time.clock', ([], {}), '()\n', (995, 997), False, 'import os, time, sys, math\n'), ((1094, 1106), 'time.clock', 'time.clock', ([], {}), '()\n', (1104, 1106), False, 'import os, time, sys, math\n'), ((4195, 4206), 'time.time', 'time.time', ([], {}), '()\n', (4204, 4206), False, 'import os, time, sys, math\n'), ((5511, 5569), 'shutil.copyfile', 'shutil.copyfile', (['name', "(prefix_save + '_model_best.pth.tar')"], {}), "(name, prefix_save + '_model_best.pth.tar')\n", (5526, 5569), False, 'import subprocess, shutil\n'), ((1337, 1349), 'time.clock', 'time.clock', ([], {}), '()\n', (1347, 1349), False, 'import os, time, sys, math\n'), ((4269, 4280), 'time.time', 'time.time', ([], {}), '()\n', (4278, 4280), False, 'import os, time, sys, math\n'), ((4740, 4764), 'math.ceil', 'math.ceil', (['(curr / 1024.0)'], {}), '(curr / 1024.0)\n', (4749, 4764), False, 'import os, time, sys, math\n'), ((4792, 4815), 'math.ceil', 'math.ceil', (['(tot / 1024.0)'], {}), '(tot / 1024.0)\n', (4801, 4815), False, 'import os, time, sys, math\n'), ((5250, 5286), 'numpy.maximum', 'np.maximum', (["param_group['lr']", '(1e-06)'], {}), "(param_group['lr'], 1e-06)\n", (5260, 5286), True, 'import numpy as np\n'), ((4362, 4440), 'subprocess.check_output', 'subprocess.check_output', (['"""nvidia-smi | grep MiB | cut -f 3 -d \'|\'"""'], {'shell': '(True)'}), '("nvidia-smi | grep MiB | cut -f 3 -d \'|\'", shell=True)\n', (4385, 4440), False, 'import subprocess, shutil\n')] |
# import face_recognition
import os
import time
from milvus import *
import numpy as np
import random
from faker import Faker
fake = Faker()
# milvus = Milvus()
milvus_collection = 'partition_query'
FILE_PATH = 'bigann_base.bvecs'
VEC_NUM = 10000000
BASE_LEN = 100000
NUM = VEC_NUM // BASE_LEN
VEC_DIM = 128
SERVER_ADDR = "0.0.0.0"
SERVER_PORT = 19530
def load_bvecs_data(fname,base_len,idx):
begin_num = base_len * idx
# print(fname, ": ", begin_num )
x = np.memmap(fname, dtype='uint8', mode='r')
d = x[:4].view('int32')[0]
data = x.reshape(-1, d + 4)[begin_num:(begin_num+base_len), 4:]
data = (data + 0.5) / 255
data = data.tolist()
return data
def create_milvus_collection(milvus):
if not milvus.has_collection(milvus_collection)[1]:
param = {
'collection_name': milvus_collection,
'dimension': VEC_DIM,
'index_file_size':1024,
'metric_type':MetricType.L2
}
status = milvus.create_collection(param)
print(status)
build_collection(milvus)
def build_collection(milvus):
index_param = { 'nlist': 16384}
status = milvus.create_index(milvus_collection, IndexType.IVF_SQ8H, index_param)
print(status)
def create_partition(partition_tag,milvus):
milvus.create_partition(milvus_collection, partition_tag=partition_tag)
def get_partition_tag():
partition_tag=[]
count = 0
while count<NUM:
sex = random.choice(['female','male'])
get_time = fake.date_between(start_date="-30d", end_date="today")
is_glasses = random.choice(['True','False'])
p_tag = str(get_time) + "/" + sex + "/" + str(is_glasses)
if p_tag not in partition_tag:
partition_tag.append(p_tag)
count = count + 1
print(partition_tag)
return partition_tag
def add_vectors(vectors,vectors_ids,partition_tag,milvus):
time_start = time.time()
status, ids = milvus.insert(collection_name=milvus_collection, records=vectors, ids=vectors_ids, partition_tag=partition_tag)
time_end = time.time()
print(status, "insert milvue time: ", time_end-time_start)
def main():
milvus = Milvus(host=SERVER_ADDR, port=SERVER_PORT)
create_milvus_collection(milvus)
partition_tag = get_partition_tag()
count = 0
while count < (VEC_NUM // BASE_LEN):
vectors = load_bvecs_data(FILE_PATH,BASE_LEN,count)
vectors_ids = [id for id in range(count*BASE_LEN,(count+1)*BASE_LEN)]
create_partition(partition_tag[count],milvus)
add_vectors(vectors,vectors_ids,partition_tag[count],milvus)
count = count + 1
if __name__ == '__main__':
main()
| [
"faker.Faker",
"numpy.memmap",
"time.time",
"random.choice"
] | [((134, 141), 'faker.Faker', 'Faker', ([], {}), '()\n', (139, 141), False, 'from faker import Faker\n'), ((477, 518), 'numpy.memmap', 'np.memmap', (['fname'], {'dtype': '"""uint8"""', 'mode': '"""r"""'}), "(fname, dtype='uint8', mode='r')\n", (486, 518), True, 'import numpy as np\n'), ((1936, 1947), 'time.time', 'time.time', ([], {}), '()\n', (1945, 1947), False, 'import time\n'), ((2097, 2108), 'time.time', 'time.time', ([], {}), '()\n', (2106, 2108), False, 'import time\n'), ((1473, 1506), 'random.choice', 'random.choice', (["['female', 'male']"], {}), "(['female', 'male'])\n", (1486, 1506), False, 'import random\n'), ((1601, 1633), 'random.choice', 'random.choice', (["['True', 'False']"], {}), "(['True', 'False'])\n", (1614, 1633), False, 'import random\n')] |
# Licensed under a 3-clause BSD style license - see LICENSES
import os
from os.path import dirname, join
from tempfile import mkdtemp, NamedTemporaryFile
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from astropy.table import Table
from astropy.extern import six
from astropy import wcs
from astropy.io import fits
import sncosmo
# Dummy data used for read_lc/write_lc round-tripping tests
time = [1., 2., 3., 4.]
band = ['sdssg', 'sdssr', 'sdssi', 'sdssz']
zp = [25., 25., 25., 25.]
zpsys = ['ab', 'ab', 'ab', 'ab']
flux = [1., 1., 1., 1.]
fluxerr = [0.1, 0.1, 0.1, 0.1]
lcdata = Table(data=(time, band, flux, fluxerr, zp, zpsys),
names=('time', 'band', 'flux', 'fluxerr', 'zp', 'zpsys'),
meta={'a': 1, 'b': 1.0, 'c': 'one'})
def test_read_griddata_ascii():
# Write a temporary test file.
f = six.StringIO()
f.write("0. 0. 0.\n"
"0. 1. 0.\n"
"0. 2. 0.\n"
"1. 0. 0.\n"
"1. 1. 0.\n"
"1. 2. 0.\n")
f.seek(0)
x0, x1, y = sncosmo.read_griddata_ascii(f)
f.close()
assert_allclose(x0, np.array([0., 1.]))
assert_allclose(x1, np.array([0., 1., 2.]))
def test_write_griddata_ascii():
x0 = np.array([0., 1.])
x1 = np.array([0., 1., 2.])
y = np.zeros((2, 3))
f = six.StringIO()
sncosmo.write_griddata_ascii(x0, x1, y, f)
# Read it back
f.seek(0)
x0_in, x1_in, y_in = sncosmo.read_griddata_ascii(f)
f.close()
assert_allclose(x0_in, x0)
assert_allclose(x1_in, x1)
assert_allclose(y_in, y)
# with a filename:
dirname = mkdtemp()
fname = os.path.join(dirname, 'griddata.dat')
sncosmo.write_griddata_ascii(x0, x1, y, fname)
x0_in, x1_in, y_in = sncosmo.read_griddata_ascii(fname)
assert_allclose(x0_in, x0)
assert_allclose(x1_in, x1)
assert_allclose(y_in, y)
os.remove(fname)
os.rmdir(dirname)
def test_griddata_fits():
"""Round tripping with write_griddata_fits() and read_griddata_fits()"""
x0 = np.array([0., 1.])
x1 = np.array([0., 1., 2.])
y = np.zeros((2, 3))
f = six.BytesIO()
sncosmo.write_griddata_fits(x0, x1, y, f)
# Read it back
f.seek(0)
x0_in, x1_in, y_in = sncosmo.read_griddata_fits(f)
assert_allclose(x0_in, x0)
assert_allclose(x1_in, x1)
assert_allclose(y_in, y)
f.close()
# Test reading 3-d grid data. We don't have a writer for
# this, so we write a temporary FITS file by hand.
x2 = np.array([3., 5., 7., 9])
y = np.zeros((len(x0), len(x1), len(x2)))
# write a FITS file that represents x0, x1, x2, y
w = wcs.WCS(naxis=3)
w.wcs.crpix = [1, 1, 1]
w.wcs.crval = [x2[0], x1[0], x0[0]]
w.wcs.cdelt = [2., 1., 1.]
hdu = fits.PrimaryHDU(y, header=w.to_header())
f = six.BytesIO()
hdu.writeto(f)
# Read it back
f.seek(0)
x0_in, x1_in, x2_in, y_in = sncosmo.read_griddata_fits(f)
f.close()
assert_allclose(x0_in, x0)
assert_allclose(x1_in, x1)
assert_allclose(x2_in, x2)
assert_allclose(y_in, y)
def test_read_lc():
from astropy.extern.six import StringIO
f = StringIO("""
@id 1
@RA 36.0
@description good
time band flux fluxerr zp zpsys
50000. g 1. 0.1 25. ab
50000.1 r 2. 0.1 25. ab
""")
t = sncosmo.read_lc(f, format='ascii')
assert str(t) == (" time band flux fluxerr zp zpsys\n"
"------- ---- ---- ------- ---- -----\n"
"50000.0 g 1.0 0.1 25.0 ab\n"
"50000.1 r 2.0 0.1 25.0 ab")
assert t.meta['id'] == 1
assert t.meta['RA'] == 36.0
assert t.meta['description'] == 'good'
def test_read_salt2():
fname = join(dirname(__file__), "data", "lc-03D4ag.list")
data = sncosmo.read_lc(fname, format="salt2")
# Test a few columns
assert_allclose(data["Date"][0:4],
[52816.54, 52824.59, 52851.53, 52873.4])
assert_allclose(data["ZP"][0:4], 27.036167)
assert np.all(data["Filter"][0:4] == "MEGACAMPSF::g")
assert np.all(data["MagSys"] == "AB_B12")
# Test a bit of metadata
assert_allclose(data.meta["Z_HELIO"], 0.285)
assert_allclose(data.meta["RA"], 333.690959)
assert data.meta["z_source"] == "H"
def test_read_salt2_cov():
fname = join(dirname(__file__), "data", "lc-03D4ag.list")
data = sncosmo.read_lc(fname, format="salt2", read_covmat=True)
assert data["Fluxcov"].shape == (len(data), len(data))
assert_allclose(data["Fluxcov"][0:3, 0:3],
[[0.867712297284, 0.01139998771, 0.01119398747],
[0.01139998771, 2.03512047975, 0.01190299234],
[0.01119398747, 0.01190299234, 1.3663344852]])
def test_read_salt2_old():
dname = join(dirname(__file__), "data", "SNLS3-04D3gx")
data = sncosmo.read_lc(dname, format="salt2-old")
# Test length and column names:
assert len(data) == 25 + 37 + 38 + 18 # g + r + i + z lengths
assert data.colnames == ["Date", "Flux", "Fluxerr", "ZP", "Filter",
"MagSys"]
# Test a bit of metadata and data
assert data.meta["NAME"] == "04D3gx"
assert_allclose(data.meta["Redshift"], 0.91)
assert_allclose(data.meta["RA"], 215.056948)
assert np.all(data["MagSys"] == "VEGA")
def test_roundtripping():
for format in ['json', 'ascii', 'salt2']:
f = NamedTemporaryFile(delete=False)
f.close() # close to ensure that we can open it in write_lc()
# raw=True is for the benefit of salt2 writer that modifies column
# and header names by default.
sncosmo.write_lc(lcdata, f.name, format=format, raw=True,
pedantic=False)
data = sncosmo.read_lc(f.name, format=format)
for key in lcdata.colnames:
assert np.all(data[key] == lcdata[key])
for key in lcdata.meta:
assert data.meta[key] == lcdata.meta[key]
os.unlink(f.name)
def test_write_lc_salt2():
"""Extra test to see if column renaming works"""
f = NamedTemporaryFile(delete=False)
f.close() # close to ensure that we can open it in write_lc()
sncosmo.write_lc(lcdata, f.name, format='salt2')
os.unlink(f.name)
def test_write_lc_snana():
"""Just check if the snana writer works without error."""
f = NamedTemporaryFile(delete=False)
f.close() # close to ensure that we can open it in write_lc()
sncosmo.write_lc(lcdata, f.name, format='snana', pedantic=False)
os.unlink(f.name)
def test_load_example_data():
data = sncosmo.load_example_data()
| [
"astropy.table.Table",
"numpy.array",
"os.remove",
"sncosmo.read_lc",
"astropy.extern.six.StringIO",
"sncosmo.write_griddata_ascii",
"numpy.testing.assert_allclose",
"os.unlink",
"tempfile.NamedTemporaryFile",
"sncosmo.load_example_data",
"astropy.extern.six.BytesIO",
"os.path.dirname",
"snc... | [((620, 769), 'astropy.table.Table', 'Table', ([], {'data': '(time, band, flux, fluxerr, zp, zpsys)', 'names': "('time', 'band', 'flux', 'fluxerr', 'zp', 'zpsys')", 'meta': "{'a': 1, 'b': 1.0, 'c': 'one'}"}), "(data=(time, band, flux, fluxerr, zp, zpsys), names=('time', 'band',\n 'flux', 'fluxerr', 'zp', 'zpsys'), meta={'a': 1, 'b': 1.0, 'c': 'one'})\n", (625, 769), False, 'from astropy.table import Table\n'), ((874, 888), 'astropy.extern.six.StringIO', 'six.StringIO', ([], {}), '()\n', (886, 888), False, 'from astropy.extern import six\n'), ((1071, 1101), 'sncosmo.read_griddata_ascii', 'sncosmo.read_griddata_ascii', (['f'], {}), '(f)\n', (1098, 1101), False, 'import sncosmo\n'), ((1254, 1274), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1262, 1274), True, 'import numpy as np\n'), ((1282, 1307), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (1290, 1307), True, 'import numpy as np\n'), ((1313, 1329), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (1321, 1329), True, 'import numpy as np\n'), ((1339, 1353), 'astropy.extern.six.StringIO', 'six.StringIO', ([], {}), '()\n', (1351, 1353), False, 'from astropy.extern import six\n'), ((1358, 1400), 'sncosmo.write_griddata_ascii', 'sncosmo.write_griddata_ascii', (['x0', 'x1', 'y', 'f'], {}), '(x0, x1, y, f)\n', (1386, 1400), False, 'import sncosmo\n'), ((1460, 1490), 'sncosmo.read_griddata_ascii', 'sncosmo.read_griddata_ascii', (['f'], {}), '(f)\n', (1487, 1490), False, 'import sncosmo\n'), ((1509, 1535), 'numpy.testing.assert_allclose', 'assert_allclose', (['x0_in', 'x0'], {}), '(x0_in, x0)\n', (1524, 1535), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((1540, 1566), 'numpy.testing.assert_allclose', 'assert_allclose', (['x1_in', 'x1'], {}), '(x1_in, x1)\n', (1555, 1566), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((1571, 1595), 'numpy.testing.assert_allclose', 'assert_allclose', (['y_in', 'y'], {}), '(y_in, y)\n', (1586, 1595), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((1634, 1643), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (1641, 1643), False, 'from tempfile import mkdtemp, NamedTemporaryFile\n'), ((1656, 1693), 'os.path.join', 'os.path.join', (['dirname', '"""griddata.dat"""'], {}), "(dirname, 'griddata.dat')\n", (1668, 1693), False, 'import os\n'), ((1698, 1744), 'sncosmo.write_griddata_ascii', 'sncosmo.write_griddata_ascii', (['x0', 'x1', 'y', 'fname'], {}), '(x0, x1, y, fname)\n', (1726, 1744), False, 'import sncosmo\n'), ((1770, 1804), 'sncosmo.read_griddata_ascii', 'sncosmo.read_griddata_ascii', (['fname'], {}), '(fname)\n', (1797, 1804), False, 'import sncosmo\n'), ((1809, 1835), 'numpy.testing.assert_allclose', 'assert_allclose', (['x0_in', 'x0'], {}), '(x0_in, x0)\n', (1824, 1835), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((1840, 1866), 'numpy.testing.assert_allclose', 'assert_allclose', (['x1_in', 'x1'], {}), '(x1_in, x1)\n', (1855, 1866), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((1871, 1895), 'numpy.testing.assert_allclose', 'assert_allclose', (['y_in', 'y'], {}), '(y_in, y)\n', (1886, 1895), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((1900, 1916), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (1909, 1916), False, 'import os\n'), ((1921, 1938), 'os.rmdir', 'os.rmdir', (['dirname'], {}), '(dirname)\n', (1929, 1938), False, 'import os\n'), ((2054, 2074), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (2062, 2074), True, 'import numpy as np\n'), ((2082, 2107), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (2090, 2107), True, 'import numpy as np\n'), ((2113, 2129), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (2121, 2129), True, 'import numpy as np\n'), ((2139, 2152), 'astropy.extern.six.BytesIO', 'six.BytesIO', ([], {}), '()\n', (2150, 2152), False, 'from astropy.extern import six\n'), ((2157, 2198), 'sncosmo.write_griddata_fits', 'sncosmo.write_griddata_fits', (['x0', 'x1', 'y', 'f'], {}), '(x0, x1, y, f)\n', (2184, 2198), False, 'import sncosmo\n'), ((2258, 2287), 'sncosmo.read_griddata_fits', 'sncosmo.read_griddata_fits', (['f'], {}), '(f)\n', (2284, 2287), False, 'import sncosmo\n'), ((2292, 2318), 'numpy.testing.assert_allclose', 'assert_allclose', (['x0_in', 'x0'], {}), '(x0_in, x0)\n', (2307, 2318), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((2323, 2349), 'numpy.testing.assert_allclose', 'assert_allclose', (['x1_in', 'x1'], {}), '(x1_in, x1)\n', (2338, 2349), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((2354, 2378), 'numpy.testing.assert_allclose', 'assert_allclose', (['y_in', 'y'], {}), '(y_in, y)\n', (2369, 2378), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((2519, 2547), 'numpy.array', 'np.array', (['[3.0, 5.0, 7.0, 9]'], {}), '([3.0, 5.0, 7.0, 9])\n', (2527, 2547), True, 'import numpy as np\n'), ((2654, 2670), 'astropy.wcs.WCS', 'wcs.WCS', ([], {'naxis': '(3)'}), '(naxis=3)\n', (2661, 2670), False, 'from astropy import wcs\n'), ((2829, 2842), 'astropy.extern.six.BytesIO', 'six.BytesIO', ([], {}), '()\n', (2840, 2842), False, 'from astropy.extern import six\n'), ((2928, 2957), 'sncosmo.read_griddata_fits', 'sncosmo.read_griddata_fits', (['f'], {}), '(f)\n', (2954, 2957), False, 'import sncosmo\n'), ((2977, 3003), 'numpy.testing.assert_allclose', 'assert_allclose', (['x0_in', 'x0'], {}), '(x0_in, x0)\n', (2992, 3003), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((3008, 3034), 'numpy.testing.assert_allclose', 'assert_allclose', (['x1_in', 'x1'], {}), '(x1_in, x1)\n', (3023, 3034), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((3039, 3065), 'numpy.testing.assert_allclose', 'assert_allclose', (['x2_in', 'x2'], {}), '(x2_in, x2)\n', (3054, 3065), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((3070, 3094), 'numpy.testing.assert_allclose', 'assert_allclose', (['y_in', 'y'], {}), '(y_in, y)\n', (3085, 3094), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((3169, 3308), 'astropy.extern.six.StringIO', 'StringIO', (['"""\n@id 1\n@RA 36.0\n@description good\ntime band flux fluxerr zp zpsys\n50000. g 1. 0.1 25. ab\n50000.1 r 2. 0.1 25. ab\n"""'], {}), '(\n """\n@id 1\n@RA 36.0\n@description good\ntime band flux fluxerr zp zpsys\n50000. g 1. 0.1 25. ab\n50000.1 r 2. 0.1 25. ab\n"""\n )\n', (3177, 3308), False, 'from astropy.extern.six import StringIO\n'), ((3307, 3341), 'sncosmo.read_lc', 'sncosmo.read_lc', (['f'], {'format': '"""ascii"""'}), "(f, format='ascii')\n", (3322, 3341), False, 'import sncosmo\n'), ((3795, 3833), 'sncosmo.read_lc', 'sncosmo.read_lc', (['fname'], {'format': '"""salt2"""'}), "(fname, format='salt2')\n", (3810, 3833), False, 'import sncosmo\n'), ((3864, 3939), 'numpy.testing.assert_allclose', 'assert_allclose', (["data['Date'][0:4]", '[52816.54, 52824.59, 52851.53, 52873.4]'], {}), "(data['Date'][0:4], [52816.54, 52824.59, 52851.53, 52873.4])\n", (3879, 3939), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((3964, 4007), 'numpy.testing.assert_allclose', 'assert_allclose', (["data['ZP'][0:4]", '(27.036167)'], {}), "(data['ZP'][0:4], 27.036167)\n", (3979, 4007), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((4020, 4066), 'numpy.all', 'np.all', (["(data['Filter'][0:4] == 'MEGACAMPSF::g')"], {}), "(data['Filter'][0:4] == 'MEGACAMPSF::g')\n", (4026, 4066), True, 'import numpy as np\n'), ((4078, 4112), 'numpy.all', 'np.all', (["(data['MagSys'] == 'AB_B12')"], {}), "(data['MagSys'] == 'AB_B12')\n", (4084, 4112), True, 'import numpy as np\n'), ((4147, 4191), 'numpy.testing.assert_allclose', 'assert_allclose', (["data.meta['Z_HELIO']", '(0.285)'], {}), "(data.meta['Z_HELIO'], 0.285)\n", (4162, 4191), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((4196, 4240), 'numpy.testing.assert_allclose', 'assert_allclose', (["data.meta['RA']", '(333.690959)'], {}), "(data.meta['RA'], 333.690959)\n", (4211, 4240), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((4383, 4439), 'sncosmo.read_lc', 'sncosmo.read_lc', (['fname'], {'format': '"""salt2"""', 'read_covmat': '(True)'}), "(fname, format='salt2', read_covmat=True)\n", (4398, 4439), False, 'import sncosmo\n'), ((4503, 4697), 'numpy.testing.assert_allclose', 'assert_allclose', (["data['Fluxcov'][0:3, 0:3]", '[[0.867712297284, 0.01139998771, 0.01119398747], [0.01139998771, \n 2.03512047975, 0.01190299234], [0.01119398747, 0.01190299234, 1.3663344852]\n ]'], {}), "(data['Fluxcov'][0:3, 0:3], [[0.867712297284, 0.01139998771,\n 0.01119398747], [0.01139998771, 2.03512047975, 0.01190299234], [\n 0.01119398747, 0.01190299234, 1.3663344852]])\n", (4518, 4697), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((4851, 4893), 'sncosmo.read_lc', 'sncosmo.read_lc', (['dname'], {'format': '"""salt2-old"""'}), "(dname, format='salt2-old')\n", (4866, 4893), False, 'import sncosmo\n'), ((5193, 5237), 'numpy.testing.assert_allclose', 'assert_allclose', (["data.meta['Redshift']", '(0.91)'], {}), "(data.meta['Redshift'], 0.91)\n", (5208, 5237), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((5242, 5286), 'numpy.testing.assert_allclose', 'assert_allclose', (["data.meta['RA']", '(215.056948)'], {}), "(data.meta['RA'], 215.056948)\n", (5257, 5286), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((5298, 5330), 'numpy.all', 'np.all', (["(data['MagSys'] == 'VEGA')"], {}), "(data['MagSys'] == 'VEGA')\n", (5304, 5330), True, 'import numpy as np\n'), ((6089, 6121), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (6107, 6121), False, 'from tempfile import mkdtemp, NamedTemporaryFile\n'), ((6193, 6241), 'sncosmo.write_lc', 'sncosmo.write_lc', (['lcdata', 'f.name'], {'format': '"""salt2"""'}), "(lcdata, f.name, format='salt2')\n", (6209, 6241), False, 'import sncosmo\n'), ((6246, 6263), 'os.unlink', 'os.unlink', (['f.name'], {}), '(f.name)\n', (6255, 6263), False, 'import os\n'), ((6363, 6395), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (6381, 6395), False, 'from tempfile import mkdtemp, NamedTemporaryFile\n'), ((6467, 6531), 'sncosmo.write_lc', 'sncosmo.write_lc', (['lcdata', 'f.name'], {'format': '"""snana"""', 'pedantic': '(False)'}), "(lcdata, f.name, format='snana', pedantic=False)\n", (6483, 6531), False, 'import sncosmo\n'), ((6536, 6553), 'os.unlink', 'os.unlink', (['f.name'], {}), '(f.name)\n', (6545, 6553), False, 'import os\n'), ((6597, 6624), 'sncosmo.load_example_data', 'sncosmo.load_example_data', ([], {}), '()\n', (6622, 6624), False, 'import sncosmo\n'), ((1141, 1161), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1149, 1161), True, 'import numpy as np\n'), ((1185, 1210), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (1193, 1210), True, 'import numpy as np\n'), ((3739, 3756), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (3746, 3756), False, 'from os.path import dirname, join\n'), ((4327, 4344), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (4334, 4344), False, 'from os.path import dirname, join\n'), ((4797, 4814), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (4804, 4814), False, 'from os.path import dirname, join\n'), ((5417, 5449), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (5435, 5449), False, 'from tempfile import mkdtemp, NamedTemporaryFile\n'), ((5644, 5717), 'sncosmo.write_lc', 'sncosmo.write_lc', (['lcdata', 'f.name'], {'format': 'format', 'raw': '(True)', 'pedantic': '(False)'}), '(lcdata, f.name, format=format, raw=True, pedantic=False)\n', (5660, 5717), False, 'import sncosmo\n'), ((5758, 5796), 'sncosmo.read_lc', 'sncosmo.read_lc', (['f.name'], {'format': 'format'}), '(f.name, format=format)\n', (5773, 5796), False, 'import sncosmo\n'), ((5981, 5998), 'os.unlink', 'os.unlink', (['f.name'], {}), '(f.name)\n', (5990, 5998), False, 'import os\n'), ((5853, 5885), 'numpy.all', 'np.all', (['(data[key] == lcdata[key])'], {}), '(data[key] == lcdata[key])\n', (5859, 5885), True, 'import numpy as np\n')] |
# This algorithm is limited to algorithm verification
import argparse
import cv2
import os
import numpy as np
import pandas as pd
import sys
from tqdm import tqdm
from skimage import transform
from pprint import pprint
from mtcnn.mtcnn import MTCNN
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
# from common.landmark_utils import LandmarkImageCrop
# from common.landmark_helper import LandmarkHelper
ap = argparse.ArgumentParser()
ap.add_argument("-l", "--landmark_txt", type=str, default='./new_dataset/landmarks.txt',
help="path to landmarks txt")
ap.add_argument("-c", "--landmark_csv", type=str, default='./new_dataset/face_landmarks.csv',
help="exist landmarks csv")
ap.add_argument("-b", "--base_dir", type=str, default='./new_dataset',
help="base dataset dir")
ap.add_argument("-s", "--output_size", type=int, default=112,
help="output image size")
ap.add_argument("-n", "--new_path", type=str, default='./align_new_dataset',
help="new save image file")
args = vars(ap.parse_args())
REFERENCE_FACIAL_POINTS = [[38.453125, 28.139446],
[70.8962, 27.549734],
[54.171013, 50.283226]]
# def scale_and_shift(image, landmarks, scale_range, output_size):
# '''
# Auto generate bbox and then random to scale and shift it.
# Args:
# image: a numpy type
# landmarks: face landmarks with format [(x1, y1), ...]. range is 0-w or h in int
# scale_range: scale bbox in (min, max). eg: (1.3, 1.5)
# output_size: output size of image
# Returns:
# an image and landmarks will be returned
# Raises:
# No
# '''
# (x1, y1, x2, y2), new_size, need_pad, (p_x, p_y, p_w, p_h) = LandmarkImageCrop.get_bbox_of_landmarks(
# image, landmarks, scale_range, shift_rate=0.3)
# box_image = image[y1:y2, x1:x2]
# if need_pad:
# box_image = np.lib.pad(
# box_image, ((p_y, p_h), (p_x, p_w), (0, 0)), 'constant')
# box_image = cv2.resize(box_image, (output_size, output_size))
# landmarks = (landmarks - (x1 - p_x, y1 - p_y))
# return box_image, landmarks
class FaceAlign(object):
'''Align face with MTCNN'''
def __init__(self, out_size):
self.detector = MTCNN()
self.out_size = out_size
def face_aligned_mtcnn(self, im):
'''
Function: Alignment with MTCNN Prior box
im: BGR image array
'''
try:
wrapper = self.detector.detect_faces(im[:, :, ::-1])[0]
except:
raise ValueError("No face...")
points = wrapper['keypoints']
values = list(points.values())
gt_array = np.array(values).reshape((-1, 2))[:2]
ref_array = np.array(REFERENCE_FACIAL_POINTS[:2], dtype=np.float32)
tform = transform.SimilarityTransform()
tform.estimate(gt_array, ref_array)
tfm = tform.params[0: 2, :]
return cv2.warpAffine(
im, tfm, (self.out_size, self.out_size))
def face_aligned(self, im, ldmarks):
'''
im: BGR array
ldmarks: [(x0, y0), ...]
'''
gt_array = np.array(ldmarks)[:2]
ref_array = np.array(REFERENCE_FACIAL_POINTS[:2], dtype=np.float32)
tform = transform.SimilarityTransform()
tform.estimate(gt_array, ref_array)
tfm = tform.params[0: 2, :]
return cv2.warpAffine(
im, tfm, (self.out_size, self.out_size)), tform
if __name__ == '__main__':
# with open('./dataset/landmarks.txt') as f:
# samples_list = []
# for line in f.readlines():
# # Parse txt file
# img_path, landmarks = LandmarkHelper.parse(line)
# image_path = os.path.join("./dataset", img_path)
# im = cv2.imread(image_path)
# image, landmarks = scale_and_shift(
# im, landmarks, scale_range=(1.1, 1.5), output_size=112)
# cv2.imshow("image", image)
# cv2.waitKey(0)
if not os.path.exists(args['new_path']):
os.mkdir(args['new_path'])
root_dir = args['base_dir']
df = pd.read_csv(args['landmark_csv'], header=None)
ldmarks = np.array(df.iloc[:, 1:])
ldmarks = ldmarks.reshape((-1, 106, 2)) * \
(args['output_size'], args['output_size'])
ref_leftpupil = np.mean(ldmarks[:, 34], axis=0)
ref_rightpupil = np.mean(ldmarks[:, 92], axis=0)
ref_nose = np.mean(ldmarks[:, 86], axis=0)
ref_array = np.stack(
[ref_leftpupil, ref_rightpupil, ref_nose], axis=0).astype(np.float32)
boxes = np.empty(
(df.shape[0], args['output_size'], args['output_size'], 3), dtype=np.uint8)
landmarks = np.empty((df.shape[0], 212))
for idx in tqdm(range(df.shape[0])):
im = cv2.imread(os.path.join(root_dir, df.iloc[idx, 0]))
im = cv2.resize(im, (args['output_size'], args['output_size']))
gt_ldmarks = ldmarks[idx]
gt = np.array(df.iloc[idx, 1:], dtype=np.float32).reshape(
(-1, 2)) * (args['output_size'], args['output_size'])
gt_leftpupil = gt[34]
gt_rightpupil = gt[92]
gt_nose = gt[86]
gt_array = np.stack(
[gt_leftpupil, gt_rightpupil, gt_nose], axis=0).astype(np.float32)
# M = cv2.getAffineTransform(gt_array, ref_array)
# Similar transformation
tform = transform.SimilarityTransform()
tform.estimate(gt_array, ref_array)
tfm = tform.params[0: 2, :]
dst = cv2.warpAffine(
im, tfm, (args['output_size'], args['output_size']))
b = np.ones((gt_ldmarks.shape[0], 1))
d = np.concatenate((gt_ldmarks, b), axis=1)
gt_ldmarks = np.dot(d, np.transpose(tfm))
boxes[idx] = dst
landmarks[idx] = (gt_ldmarks / (args['output_size'])).flatten()
# for ldmark in gt_ldmarks:
# cv2.circle(
# dst, (int(ldmark[0]), int(ldmark[1])), 2, (255, 0, 0), -1)
# cv2.imshow("image", dst)
# cv2.waitKey(0)
# Save image and new landmarks
ldmark_dict = dict()
for box, ldmark, num in tqdm(zip(boxes, landmarks, np.arange(df.shape[0]))):
cv2.imwrite("{}.png".format(
os.path.join(args['new_path'], str(num).zfill(5))), box)
ldmark_dict["{}.png".format(str(num).zfill(5))] = ldmark
df = pd.DataFrame(ldmark_dict).T
df.to_csv("{}/face_landmarks.csv".format(args['new_path']),
encoding="utf-8", header=None)
pprint("Complete conversion!!!")
| [
"pandas.read_csv",
"tensorflow.logging.set_verbosity",
"numpy.array",
"pprint.pprint",
"numpy.arange",
"numpy.mean",
"os.path.exists",
"argparse.ArgumentParser",
"skimage.transform.SimilarityTransform",
"numpy.stack",
"numpy.empty",
"os.mkdir",
"numpy.concatenate",
"pandas.DataFrame",
"m... | [((276, 318), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (300, 318), True, 'import tensorflow as tf\n'), ((432, 457), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (455, 457), False, 'import argparse\n'), ((4204, 4250), 'pandas.read_csv', 'pd.read_csv', (["args['landmark_csv']"], {'header': 'None'}), "(args['landmark_csv'], header=None)\n", (4215, 4250), True, 'import pandas as pd\n'), ((4266, 4290), 'numpy.array', 'np.array', (['df.iloc[:, 1:]'], {}), '(df.iloc[:, 1:])\n', (4274, 4290), True, 'import numpy as np\n'), ((4411, 4442), 'numpy.mean', 'np.mean', (['ldmarks[:, 34]'], {'axis': '(0)'}), '(ldmarks[:, 34], axis=0)\n', (4418, 4442), True, 'import numpy as np\n'), ((4464, 4495), 'numpy.mean', 'np.mean', (['ldmarks[:, 92]'], {'axis': '(0)'}), '(ldmarks[:, 92], axis=0)\n', (4471, 4495), True, 'import numpy as np\n'), ((4511, 4542), 'numpy.mean', 'np.mean', (['ldmarks[:, 86]'], {'axis': '(0)'}), '(ldmarks[:, 86], axis=0)\n', (4518, 4542), True, 'import numpy as np\n'), ((4660, 4749), 'numpy.empty', 'np.empty', (["(df.shape[0], args['output_size'], args['output_size'], 3)"], {'dtype': 'np.uint8'}), "((df.shape[0], args['output_size'], args['output_size'], 3), dtype=\n np.uint8)\n", (4668, 4749), True, 'import numpy as np\n'), ((4770, 4798), 'numpy.empty', 'np.empty', (['(df.shape[0], 212)'], {}), '((df.shape[0], 212))\n', (4778, 4798), True, 'import numpy as np\n'), ((6569, 6601), 'pprint.pprint', 'pprint', (['"""Complete conversion!!!"""'], {}), "('Complete conversion!!!')\n", (6575, 6601), False, 'from pprint import pprint\n'), ((2335, 2342), 'mtcnn.mtcnn.MTCNN', 'MTCNN', ([], {}), '()\n', (2340, 2342), False, 'from mtcnn.mtcnn import MTCNN\n'), ((2811, 2866), 'numpy.array', 'np.array', (['REFERENCE_FACIAL_POINTS[:2]'], {'dtype': 'np.float32'}), '(REFERENCE_FACIAL_POINTS[:2], dtype=np.float32)\n', (2819, 2866), True, 'import numpy as np\n'), ((2884, 2915), 'skimage.transform.SimilarityTransform', 'transform.SimilarityTransform', ([], {}), '()\n', (2913, 2915), False, 'from skimage import transform\n'), ((3012, 3067), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'tfm', '(self.out_size, self.out_size)'], {}), '(im, tfm, (self.out_size, self.out_size))\n', (3026, 3067), False, 'import cv2\n'), ((3263, 3318), 'numpy.array', 'np.array', (['REFERENCE_FACIAL_POINTS[:2]'], {'dtype': 'np.float32'}), '(REFERENCE_FACIAL_POINTS[:2], dtype=np.float32)\n', (3271, 3318), True, 'import numpy as np\n'), ((3336, 3367), 'skimage.transform.SimilarityTransform', 'transform.SimilarityTransform', ([], {}), '()\n', (3365, 3367), False, 'from skimage import transform\n'), ((4093, 4125), 'os.path.exists', 'os.path.exists', (["args['new_path']"], {}), "(args['new_path'])\n", (4107, 4125), False, 'import os\n'), ((4135, 4161), 'os.mkdir', 'os.mkdir', (["args['new_path']"], {}), "(args['new_path'])\n", (4143, 4161), False, 'import os\n'), ((4920, 4978), 'cv2.resize', 'cv2.resize', (['im', "(args['output_size'], args['output_size'])"], {}), "(im, (args['output_size'], args['output_size']))\n", (4930, 4978), False, 'import cv2\n'), ((5449, 5480), 'skimage.transform.SimilarityTransform', 'transform.SimilarityTransform', ([], {}), '()\n', (5478, 5480), False, 'from skimage import transform\n'), ((5575, 5642), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'tfm', "(args['output_size'], args['output_size'])"], {}), "(im, tfm, (args['output_size'], args['output_size']))\n", (5589, 5642), False, 'import cv2\n'), ((5669, 5702), 'numpy.ones', 'np.ones', (['(gt_ldmarks.shape[0], 1)'], {}), '((gt_ldmarks.shape[0], 1))\n', (5676, 5702), True, 'import numpy as np\n'), ((5715, 5754), 'numpy.concatenate', 'np.concatenate', (['(gt_ldmarks, b)'], {'axis': '(1)'}), '((gt_ldmarks, b), axis=1)\n', (5729, 5754), True, 'import numpy as np\n'), ((6427, 6452), 'pandas.DataFrame', 'pd.DataFrame', (['ldmark_dict'], {}), '(ldmark_dict)\n', (6439, 6452), True, 'import pandas as pd\n'), ((3221, 3238), 'numpy.array', 'np.array', (['ldmarks'], {}), '(ldmarks)\n', (3229, 3238), True, 'import numpy as np\n'), ((3464, 3519), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'tfm', '(self.out_size, self.out_size)'], {}), '(im, tfm, (self.out_size, self.out_size))\n', (3478, 3519), False, 'import cv2\n'), ((4559, 4618), 'numpy.stack', 'np.stack', (['[ref_leftpupil, ref_rightpupil, ref_nose]'], {'axis': '(0)'}), '([ref_leftpupil, ref_rightpupil, ref_nose], axis=0)\n', (4567, 4618), True, 'import numpy as np\n'), ((4866, 4905), 'os.path.join', 'os.path.join', (['root_dir', 'df.iloc[idx, 0]'], {}), '(root_dir, df.iloc[idx, 0])\n', (4878, 4905), False, 'import os\n'), ((5786, 5803), 'numpy.transpose', 'np.transpose', (['tfm'], {}), '(tfm)\n', (5798, 5803), True, 'import numpy as np\n'), ((6220, 6242), 'numpy.arange', 'np.arange', (['df.shape[0]'], {}), '(df.shape[0])\n', (6229, 6242), True, 'import numpy as np\n'), ((5252, 5308), 'numpy.stack', 'np.stack', (['[gt_leftpupil, gt_rightpupil, gt_nose]'], {'axis': '(0)'}), '([gt_leftpupil, gt_rightpupil, gt_nose], axis=0)\n', (5260, 5308), True, 'import numpy as np\n'), ((2753, 2769), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (2761, 2769), True, 'import numpy as np\n'), ((5027, 5071), 'numpy.array', 'np.array', (['df.iloc[idx, 1:]'], {'dtype': 'np.float32'}), '(df.iloc[idx, 1:], dtype=np.float32)\n', (5035, 5071), True, 'import numpy as np\n')] |
import h5py
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from copy import deepcopy
#group a set of img patches
def group_images(data,per_row):
assert data.shape[0]%per_row==0
assert (data.shape[1]==1 or data.shape[1]==3)
data = np.transpose(data,(0,2,3,1))
all_stripe = []
for i in range(int(data.shape[0]/per_row)):
stripe = data[i*per_row]
for k in range(i*per_row+1, i*per_row+per_row):
stripe = np.concatenate((stripe,data[k]),axis=1)
all_stripe.append(stripe)
totimg = all_stripe[0]
for i in range(1,len(all_stripe)):
totimg = np.concatenate((totimg,all_stripe[i]),axis=0)
return totimg
# Prediction result splicing (original img, predicted probability, binary img, groundtruth)
def concat_result(ori_img,pred_res,gt):
ori_img = data = np.transpose(ori_img,(1,2,0))
pred_res = data = np.transpose(pred_res,(1,2,0))
gt = data = np.transpose(gt,(1,2,0))
binary = deepcopy(pred_res)
binary[binary>=0.5]=1
binary[binary<0.5]=0
if ori_img.shape[2]==3:
pred_res = np.repeat((pred_res*255).astype(np.uint8),repeats=3,axis=2)
binary = np.repeat((binary*255).astype(np.uint8),repeats=3,axis=2)
gt = np.repeat((gt*255).astype(np.uint8),repeats=3,axis=2)
total_img = np.concatenate((ori_img,pred_res,binary,gt),axis=1)
return total_img
#visualize image, save as PIL image
def save_img(data,filename):
assert (len(data.shape)==3) #height*width*channels
if data.shape[2]==1: #in case it is black and white
data = np.reshape(data,(data.shape[0],data.shape[1]))
img = Image.fromarray(data.astype(np.uint8)) #the image is between 0-1
img.save(filename)
return img | [
"numpy.transpose",
"numpy.reshape",
"numpy.concatenate",
"copy.deepcopy"
] | [((285, 317), 'numpy.transpose', 'np.transpose', (['data', '(0, 2, 3, 1)'], {}), '(data, (0, 2, 3, 1))\n', (297, 317), True, 'import numpy as np\n'), ((881, 913), 'numpy.transpose', 'np.transpose', (['ori_img', '(1, 2, 0)'], {}), '(ori_img, (1, 2, 0))\n', (893, 913), True, 'import numpy as np\n'), ((934, 967), 'numpy.transpose', 'np.transpose', (['pred_res', '(1, 2, 0)'], {}), '(pred_res, (1, 2, 0))\n', (946, 967), True, 'import numpy as np\n'), ((982, 1009), 'numpy.transpose', 'np.transpose', (['gt', '(1, 2, 0)'], {}), '(gt, (1, 2, 0))\n', (994, 1009), True, 'import numpy as np\n'), ((1023, 1041), 'copy.deepcopy', 'deepcopy', (['pred_res'], {}), '(pred_res)\n', (1031, 1041), False, 'from copy import deepcopy\n'), ((1369, 1424), 'numpy.concatenate', 'np.concatenate', (['(ori_img, pred_res, binary, gt)'], {'axis': '(1)'}), '((ori_img, pred_res, binary, gt), axis=1)\n', (1383, 1424), True, 'import numpy as np\n'), ((658, 705), 'numpy.concatenate', 'np.concatenate', (['(totimg, all_stripe[i])'], {'axis': '(0)'}), '((totimg, all_stripe[i]), axis=0)\n', (672, 705), True, 'import numpy as np\n'), ((1642, 1690), 'numpy.reshape', 'np.reshape', (['data', '(data.shape[0], data.shape[1])'], {}), '(data, (data.shape[0], data.shape[1]))\n', (1652, 1690), True, 'import numpy as np\n'), ((497, 538), 'numpy.concatenate', 'np.concatenate', (['(stripe, data[k])'], {'axis': '(1)'}), '((stripe, data[k]), axis=1)\n', (511, 538), True, 'import numpy as np\n')] |
import csv
import os
import torch
from torch.optim import *
import torchvision
from torchvision.transforms import *
from scipy import stats
from sklearn import metrics
import numpy as np
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Logger(object):
def __init__(self, path, header):
self.log_file = open(path, 'w')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerow(header)
self.header = header
def __del(self):
self.log_file.close()
def log(self, values):
write_values = []
for col in self.header:
assert col in values
write_values.append(values[col])
self.logger.writerow(write_values)
self.log_file.flush()
def accuracy(output, target, topk=(1, 5)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res, pred
def reverseTransform(img):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if len(img.shape) == 5:
for i in range(3):
img[:, i, :, :, :] = img[:, i, :, :, :]*std[i] + mean[i]
else:
for i in range(3):
img[:, i, :, :] = img[:, i, :, :]*std[i] + mean[i]
return img
def d_prime(auc):
standard_normal = stats.norm()
d_prime = standard_normal.ppf(auc) * np.sqrt(2.0)
return d_prime
def calculate_stats(output, target):
"""Calculate statistics including mAP, AUC, etc.
Args:
output: 2d array, (samples_num, classes_num)
target: 2d array, (samples_num, classes_num)
Returns:
stats: list of statistic of each class.
"""
classes_num = target.shape[-1]
stats = []
# Class-wise statistics
for k in range(classes_num):
# Average precision
avg_precision = metrics.average_precision_score(
target[:, k], output[:, k], average=None)
# AUC
auc = metrics.roc_auc_score(target[:, k], output[:, k], average=None)
# Precisions, recalls
(precisions, recalls, thresholds) = metrics.precision_recall_curve(
target[:, k], output[:, k])
# FPR, TPR
(fpr, tpr, thresholds) = metrics.roc_curve(target[:, k], output[:, k])
save_every_steps = 1000 # Sample statistics to reduce size
dict = {'precisions': precisions[0::save_every_steps],
'recalls': recalls[0::save_every_steps],
'AP': avg_precision,
'fpr': fpr[0::save_every_steps],
'fnr': 1. - tpr[0::save_every_steps],
'auc': auc}
stats.append(dict)
return stats
| [
"numpy.sqrt",
"scipy.stats.norm",
"sklearn.metrics.average_precision_score",
"csv.writer",
"sklearn.metrics.precision_recall_curve",
"sklearn.metrics.roc_auc_score",
"scipy.stats.append",
"sklearn.metrics.roc_curve"
] | [((1933, 1945), 'scipy.stats.norm', 'stats.norm', ([], {}), '()\n', (1943, 1945), False, 'from scipy import stats\n'), ((704, 745), 'csv.writer', 'csv.writer', (['self.log_file'], {'delimiter': '"""\t"""'}), "(self.log_file, delimiter='\\t')\n", (714, 745), False, 'import csv\n'), ((1987, 1999), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1994, 1999), True, 'import numpy as np\n'), ((2458, 2531), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['target[:, k]', 'output[:, k]'], {'average': 'None'}), '(target[:, k], output[:, k], average=None)\n', (2489, 2531), False, 'from sklearn import metrics\n'), ((2574, 2637), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['target[:, k]', 'output[:, k]'], {'average': 'None'}), '(target[:, k], output[:, k], average=None)\n', (2595, 2637), False, 'from sklearn import metrics\n'), ((2713, 2771), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['target[:, k]', 'output[:, k]'], {}), '(target[:, k], output[:, k])\n', (2743, 2771), False, 'from sklearn import metrics\n'), ((2838, 2883), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['target[:, k]', 'output[:, k]'], {}), '(target[:, k], output[:, k])\n', (2855, 2883), False, 'from sklearn import metrics\n'), ((3252, 3270), 'scipy.stats.append', 'stats.append', (['dict'], {}), '(dict)\n', (3264, 3270), False, 'from scipy import stats\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.