code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from numpy import dot, linalg, average, array
import heapq
from math import trunc
class NLargest(list):
def __init__(self, size):
self.__size = size
def add(self, element):
if self.__size == len(self) and self[0] < element:
heapq.heapreplace(self, element)
return
heapq.heappush(self, element)
class Recommendation:
def __init__(self, data, numRec, numNeigh):
self.__data = data
self.__numRec = numRec
self.__numNeighbors = numNeigh
def __cosDistance(self, vector1, vector2):
preferences1 = array(vector1)
preferences2 = array(vector2)
result = dot(preferences1, preferences2) / (linalg.norm(preferences1) * linalg.norm(preferences2))
return trunc(round(result, 4) * 10000) / 10000
def __calculateDistances(self, user1, user2):
vector1 = []
vector2 = []
currUserKeys = self.__data[user1].keys()
for item in set().union(currUserKeys, self.__data[user2].keys()):
if item in self.__data[user1]: vector1.append(self.__data[user1][item])
else: vector1.append(0)
if item in self.__data[user2]: vector2.append(self.__data[user2][item])
else: vector2.append(0)
distance = self.__cosDistance(vector1, vector2)
return (distance, user2)
def __neighbors(self, userId):
distances = NLargest(self.__numNeighbors)
minimum = -1
for user in self.__data:
if user == userId: continue
pair = self.__calculateDistances(userId, user)
distances.add(pair)
return distances
def __compare(self, user, neighbor, total, recommendations):
distance, neighborId = neighbor
weight = distance / total if total != 0 else 1 / self.__numNeighbors
userItems = self.__data[user]
neighborItems = self.__data[neighborId]
for item in neighborItems:
if item in userItems: continue
if not item in recommendations:
recommendations[item] = trunc(neighborItems[item] * weight * 10000) / 10000
else:
recommendations[item] = trunc((recommendations[item] + neighborItems[item] * weight) * 10000) / 10000
def recommend(self, userId):
neighbors = self.__neighbors(userId)
total = sum(i for i, j in neighbors)
recommendations = {}
for neighbor in neighbors:
self.__compare(userId, neighbor, total, recommendations)
recList = [(recommendations[item], item) for item in recommendations]
print (recList)
heapq.heapify(recList)
# return heapq.nlargest(self.__numRec, recList)
return [rec[1] for rec in heapq.nlargest(self.__numRec, recList)]
def allRecommendations(self):
results = {}
for user in self.__data:
results[user] = self.recommend(user)
return results
| [
"heapq.heappush",
"heapq.heapify",
"heapq.nlargest",
"heapq.heapreplace",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"math.trunc"
] | [((297, 326), 'heapq.heappush', 'heapq.heappush', (['self', 'element'], {}), '(self, element)\n', (311, 326), False, 'import heapq\n'), ((546, 560), 'numpy.array', 'array', (['vector1'], {}), '(vector1)\n', (551, 560), False, 'from numpy import dot, linalg, average, array\n'), ((580, 594), 'numpy.array', 'array', (['vector2'], {}), '(vector2)\n', (585, 594), False, 'from numpy import dot, linalg, average, array\n'), ((2406, 2428), 'heapq.heapify', 'heapq.heapify', (['recList'], {}), '(recList)\n', (2419, 2428), False, 'import heapq\n'), ((246, 278), 'heapq.heapreplace', 'heapq.heapreplace', (['self', 'element'], {}), '(self, element)\n', (263, 278), False, 'import heapq\n'), ((608, 639), 'numpy.dot', 'dot', (['preferences1', 'preferences2'], {}), '(preferences1, preferences2)\n', (611, 639), False, 'from numpy import dot, linalg, average, array\n'), ((643, 668), 'numpy.linalg.norm', 'linalg.norm', (['preferences1'], {}), '(preferences1)\n', (654, 668), False, 'from numpy import dot, linalg, average, array\n'), ((671, 696), 'numpy.linalg.norm', 'linalg.norm', (['preferences2'], {}), '(preferences2)\n', (682, 696), False, 'from numpy import dot, linalg, average, array\n'), ((2511, 2549), 'heapq.nlargest', 'heapq.nlargest', (['self.__numRec', 'recList'], {}), '(self.__numRec, recList)\n', (2525, 2549), False, 'import heapq\n'), ((1897, 1940), 'math.trunc', 'trunc', (['(neighborItems[item] * weight * 10000)'], {}), '(neighborItems[item] * weight * 10000)\n', (1902, 1940), False, 'from math import trunc\n'), ((1994, 2063), 'math.trunc', 'trunc', (['((recommendations[item] + neighborItems[item] * weight) * 10000)'], {}), '((recommendations[item] + neighborItems[item] * weight) * 10000)\n', (1999, 2063), False, 'from math import trunc\n')] |
import argparse
import logging
import random
import numpy as np
import torch
from networks.meta_learner import MetaLearner
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
class InitiateTraining(object):
def __init__(self, args):
self.dataset = args.dataset
self.data_path = args.data_path
self.num_tasks = args.num_tasks
self.num_instances = args.num_instances
self.meta_bs = args.meta_batch
self.base_bs = args.base_batch
self.meta_lr = args.meta_lr
self.base_lr = args.base_lr
self.epochs = args.epochs
self.base_updates = args.base_updates
self.experiment = args.experiment
self.meta_learner = MetaLearner(dataset=self.dataset, data_path=self.data_path, num_tasks=self.num_tasks,
num_instances=self.num_instances, meta_batch=self.meta_bs,
meta_lr=self.meta_lr, base_batch=self.base_bs, base_lr=self.base_lr,
meta_updates=self.epochs, base_updates=self.base_updates,
experiment=self.experiment)
def start_training(self):
self.meta_learner.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', help='Name of the dataset', default='WorldExpo', type=str)
parser.add_argument('-trp', '--data_path', help='Path of the dataset', required=True, type=str)
parser.add_argument('-nt', '--num_tasks', help='Number of tasks for training', default=10, type=int)
parser.add_argument('-ni', '--num_instances', help='Number of instances per task for training', default=5, type=int)
parser.add_argument('-mb', '--meta_batch', help='Batch size for meta network', default=32, type=int)
parser.add_argument('-bb', '--base_batch', help='Batch size for base network', default=1, type=int)
parser.add_argument('-mlr', '--meta_lr', help='Meta learning rate', default=1e-5, type=float)
parser.add_argument('-blr', '--base_lr', help='Base learning rate', default=1e-5, type=float)
parser.add_argument('-e', '--epochs', help='Number of training epochs', default=15000, type=int)
parser.add_argument('-bu', '--base_updates', help='Iterations for base network to train', default=1, type=int)
parser.add_argument('-exp', '--experiment', help='Experiment number', default=0, type=int)
parser.add_argument('-log', '--log_name', help='Name of logging file', type=str, required=True)
args = parser.parse_args()
logging.basicConfig(filename=args.log_name, level=logging.INFO)
logging.info('Started training')
st = InitiateTraining(args)
st.start_training()
logging.info('Finished training')
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"logging.basicConfig",
"torch.manual_seed",
"logging.info",
"random.seed",
"networks.meta_learner.MetaLearner"
] | [((126, 140), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (137, 140), False, 'import random\n'), ((141, 158), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (155, 158), True, 'import numpy as np\n'), ((159, 179), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (176, 179), False, 'import torch\n'), ((1275, 1300), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1298, 1300), False, 'import argparse\n'), ((2581, 2644), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'args.log_name', 'level': 'logging.INFO'}), '(filename=args.log_name, level=logging.INFO)\n', (2600, 2644), False, 'import logging\n'), ((2649, 2681), 'logging.info', 'logging.info', (['"""Started training"""'], {}), "('Started training')\n", (2661, 2681), False, 'import logging\n'), ((2744, 2777), 'logging.info', 'logging.info', (['"""Finished training"""'], {}), "('Finished training')\n", (2756, 2777), False, 'import logging\n'), ((708, 1025), 'networks.meta_learner.MetaLearner', 'MetaLearner', ([], {'dataset': 'self.dataset', 'data_path': 'self.data_path', 'num_tasks': 'self.num_tasks', 'num_instances': 'self.num_instances', 'meta_batch': 'self.meta_bs', 'meta_lr': 'self.meta_lr', 'base_batch': 'self.base_bs', 'base_lr': 'self.base_lr', 'meta_updates': 'self.epochs', 'base_updates': 'self.base_updates', 'experiment': 'self.experiment'}), '(dataset=self.dataset, data_path=self.data_path, num_tasks=self.\n num_tasks, num_instances=self.num_instances, meta_batch=self.meta_bs,\n meta_lr=self.meta_lr, base_batch=self.base_bs, base_lr=self.base_lr,\n meta_updates=self.epochs, base_updates=self.base_updates, experiment=\n self.experiment)\n', (719, 1025), False, 'from networks.meta_learner import MetaLearner\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
from test_softmax_op import stable_softmax
import paddle.fluid.core as core
class TestSequenceSoftmaxOp(OpTest):
def setUp(self):
self.op_type = "sequence_softmax"
self.use_cudnn = False
self.init_op_type()
x = np.random.uniform(0.1, 1, (11, 1)).astype("float32")
lod = [[4, 1, 3, 3]]
out = np.zeros((11, 1)).astype("float32")
offset = 0
for i in range(len(lod[0])):
sub_x = x[offset:offset + lod[0][i], :]
sub_x = sub_x.reshape(1, lod[0][i])
sub_out = stable_softmax(sub_x)
out[offset:offset + lod[0][i], :] = sub_out.reshape(lod[0][i], 1)
offset += lod[0][i]
self.inputs = {"X": (x, lod)}
self.outputs = {"Out": out}
self.attrs = {'use_cudnn': self.use_cudnn, }
def init_op_type(self):
pass
def test_check_output(self):
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5)
else:
self.check_output()
def test_check_grad(self):
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ["X"], "Out", max_relative_error=0.01)
else:
self.check_grad(["X"], "Out", max_relative_error=0.01)
# ----------------cudnn Sequencesoftmax----------------
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSequenceSoftmaxCUDNNOp(TestSequenceSoftmaxOp):
def init_op_type(self):
self.use_cudnn = True
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.random.uniform",
"paddle.fluid.core.CUDAPlace",
"test_softmax_op.stable_softmax",
"numpy.zeros",
"paddle.fluid.core.is_compiled_with_cuda"
] | [((2330, 2345), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2343, 2345), False, 'import unittest\n'), ((2101, 2129), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (2127, 2129), True, 'import paddle.fluid.core as core\n'), ((1236, 1257), 'test_softmax_op.stable_softmax', 'stable_softmax', (['sub_x'], {}), '(sub_x)\n', (1250, 1257), False, 'from test_softmax_op import stable_softmax\n'), ((1619, 1636), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (1633, 1636), True, 'import paddle.fluid.core as core\n'), ((1821, 1838), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (1835, 1838), True, 'import paddle.fluid.core as core\n'), ((925, 959), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(1)', '(11, 1)'], {}), '(0.1, 1, (11, 1))\n', (942, 959), True, 'import numpy as np\n'), ((1022, 1039), 'numpy.zeros', 'np.zeros', (['(11, 1)'], {}), '((11, 1))\n', (1030, 1039), True, 'import numpy as np\n')] |
"""
Straight-ray 2D travel-time tomography (i.e., does not consider reflection or
refraction)
**Solver**
* :class:`~fatiando.seismic.srtomo.SRTomo`: Data misfit class that runs the
tomography.
**Functions**
* :func:`~fatiando.seismic.srtomo.slowness2vel`: Safely convert slowness to
velocity (avoids zero division)
**Examples**
----
"""
from __future__ import division
import numpy
import scipy.sparse
from ..inversion.base import Misfit
from ..utils import safe_dot
from . import ttime2d
class SRTomo(Misfit):
"""
2D travel-time straight-ray tomography.
Use the :meth:`~fatiando.seismic.srtomo.SRTomo.fit` method to run the
tomography and produce a velocity estimate. The estimate is stored in the
``estimate_`` attribute.
Generaly requires regularization, like
:class:`~fatiando.inversion.regularization.Damping` or
:class:`~fatiando.inversion.regularization.Smoothness2D`.
Parameters:
* ttimes : array
Array with the travel-times of the straight seismic rays.
* srcs : list of lists
List of the [x, y] positions of the sources.
* recs : list of lists
List of the [x, y] positions of the receivers.
* mesh : :class:`~fatiando.mesher.SquareMesh` or compatible
The mesh where the inversion (tomography) will take place.
The ith travel-time is the time between the ith element in *srcs* and the
ith element in *recs*.
Examples:
Using simple synthetic data:
>>> from fatiando.mesher import Square, SquareMesh
>>> from fatiando.seismic import ttime2d
>>> # One source was recorded at 3 receivers.
>>> # The medium has 2 velocities: 2 and 5
>>> model = [Square([0, 10, 0, 5], {'vp':2}),
... Square([0, 10, 5, 10], {'vp':5})]
>>> src = (5, 0)
>>> srcs = [src, src, src]
>>> recs = [(0, 0), (5, 10), (10, 0)]
>>> # Calculate the synthetic travel-times
>>> ttimes = ttime2d.straight(model, 'vp', srcs, recs)
>>> print ttimes
[ 2.5 3.5 2.5]
>>> # Make a mesh to represent the two blocks
>>> mesh = SquareMesh((0, 10, 0, 10), shape=(2, 1))
>>> # Run the tomography
>>> tomo = SRTomo(ttimes, srcs, recs, mesh)
>>> tomo.fit().estimate_
array([ 2., 5.])
Using the steepest descent method to solve (no linear systems):
>>> # Use steepest descent to solve this (requires an initial guess)
>>> tomo.config(method='steepest', initial=[0, 0]).fit().estimate_
array([ 2., 5.])
.. note::
A simple way to plot the results is to use the ``addprop`` method of
the mesh and then pass the mesh to :func:`fatiando.vis.map.squaremesh`.
"""
def __init__(self, ttimes, srcs, recs, mesh):
super(SRTomo, self).__init__(
data=ttimes,
positional=dict(srcs=srcs, recs=recs),
model=dict(mesh=mesh),
nparams=mesh.size, islinear=True)
def _get_jacobian(self, p):
"""
Build the Jacobian (sensitivity) matrix using the travel-time data
stored.
"""
srcs, recs = self.positional['srcs'], self.positional['recs']
i, j, v = [], [], []
for k, c in enumerate(self.model['mesh']):
column = ttime2d.straight([c], '', srcs, recs,
velocity=1.)
nonzero = numpy.flatnonzero(column)
i.extend(nonzero)
j.extend(k * numpy.ones_like(nonzero))
v.extend(column[nonzero])
shape = (self.ndata, self.nparams)
return scipy.sparse.coo_matrix((v, (i, j)), shape).tocsr()
def _get_predicted(self, p):
pred = safe_dot(self.jacobian(p), p)
if len(pred.shape) > 1:
pred = numpy.array(pred.T).ravel()
return pred
def fit(self):
"""
Solve the tomography for the velocity of each cell.
Actually solves for the slowness to make the inverse problem linear.
The ``estimate_`` attribute holds the estimated velocities and ``p_``
the respective slownesses.
See the docstring of :class:`~fatiando.seismic.srtomo.SRTomo` for
examples.
"""
super(SRTomo, self).fit()
self._estimate = slowness2vel(self.p_, tol=10 ** -8)
return self
def slowness2vel(slowness, tol=10 ** (-8)):
"""
Safely convert slowness to velocity.
Almost 0 slowness is mapped to 0 velocity.
Parameters:
* slowness : array
The slowness values
* tol : float
Slowness < tol will be set to 0 velocity
Returns:
* velocity : array
The converted velocities
Examples:
>>> import numpy as np
>>> slow = np.array([1, 2, 0.000001, 4])
>>> slowness2vel(slow, tol=0.00001)
array([ 1. , 0.5 , 0. , 0.25])
"""
velocity = numpy.array(slowness)
velocity[slowness < tol] = 0
divide = slowness >= tol
velocity[divide] = 1. / slowness[divide]
return velocity
| [
"numpy.flatnonzero",
"numpy.array",
"numpy.ones_like"
] | [((4827, 4848), 'numpy.array', 'numpy.array', (['slowness'], {}), '(slowness)\n', (4838, 4848), False, 'import numpy\n'), ((3348, 3373), 'numpy.flatnonzero', 'numpy.flatnonzero', (['column'], {}), '(column)\n', (3365, 3373), False, 'import numpy\n'), ((3429, 3453), 'numpy.ones_like', 'numpy.ones_like', (['nonzero'], {}), '(nonzero)\n', (3444, 3453), False, 'import numpy\n'), ((3733, 3752), 'numpy.array', 'numpy.array', (['pred.T'], {}), '(pred.T)\n', (3744, 3752), False, 'import numpy\n')] |
import copy
import textwrap
import astropy.constants as const
import astropy.units as u
import numpy as np
import xarray as xr
from scipy import interpolate
import psipy.visualization as viz
from psipy.util.decorators import add_common_docstring
__all__ = ['Variable']
# Some docstrings that are used more than once
quad_mesh_link = ':class:`~matplotlib.collections.QuadMesh`'
# TODO: fix this to ':class:`~matplotlib.animation.FuncAnimation`'
animation_link = 'animation'
returns_doc = textwrap.indent(f"""
{quad_mesh_link} or {animation_link}
If a timestep is specified, the {quad_mesh_link} of the plot is returned.
Otherwise an {animation_link} is returned.
""", ' ')
class Variable:
"""
A single scalar variable.
This class primarily contains methods for plotting data. It can be created
with any `xarray.DataArray` that has ``['theta', 'phi', 'r', 'time']``
fields.
Parameters
----------
data : xarray.Dataset
Variable data.
name : str
Variable name.
unit : astropy.units.Quantity
Variable unit.
"""
def __init__(self, data, name, unit):
# Convert from xarray Dataset to DataArray
self._data = data[name]
# Sort the data once now for any interpolation later
self._data = self._data.sortby(['phi', 'theta', 'r', 'time'])
self.name = name
self._unit = unit
def __str__(self):
return textwrap.dedent(f'''
Variable
--------
Name: {self.name}
Grid size: {len(self.phi_coords), len(self.theta_coords), len(self.r_coords)} (phi, theta, r)
Timesteps: {len(self.time_coords)}
''')
@property
def data(self):
"""
`xarray.DataArray` with the data.
"""
return self._data
@property
def unit(self):
"""
Units of the scalar data.
"""
return self._unit
@unit.setter
def unit(self, new_unit):
# This line will error if untis aren't compatible
conversion = float(1 * self._unit / new_unit)
self._data *= conversion
self._unit = new_unit
@property
def r_coords(self):
"""
Radial coordinate values.
"""
return self._data.coords['r'].values
@r_coords.setter
def r_coords(self, coords):
self._data.coords['r'] = coords
@property
def theta_coords(self):
"""
Latitude coordinate values.
"""
return self._data.coords['theta'].values
@property
def phi_coords(self):
"""
Longitude coordinate values.
"""
return self._data.coords['phi'].values
@property
def time_coords(self):
"""
Timestep coordinate values.
"""
return self._data.coords['time'].values
@property
def n_timesteps(self):
"""
Number of timesteps.
"""
return len(self.time_coords)
def radial_normalized(self, radial_exponent):
r"""
Return a radially normalised copy of this variable.
Multiplies the variable by :math:`(r / r_{\odot})^{\gamma}`,
where :math:`\gamma` = ``radial_exponent`` is the given exponent.
Parameters
----------
radial_exponent : float
Returns
-------
Variable
"""
r = self.data.coords['r']
rsun_au = float(u.AU / const.R_sun)
data = self.data * (r * rsun_au)**radial_exponent
units = self.unit * (u.AU)**radial_exponent
name = self.name + f' $r^{radial_exponent}$'
return Variable(xr.Dataset({name: data}), name, units)
# Methods for radial cuts
@add_common_docstring(returns_doc=returns_doc)
def plot_radial_cut(self, r_idx, t_idx=None, ax=None, **kwargs):
"""
Plot a radial cut.
Parameters
----------
r_idx : int
Radial index at which to slice the data.
t_idx : int, optional
Time index at which to slice the data. If not given, an anmiation
will be created across all time indices.
ax : matplolit.axes.Axes, optional
axes on which to plot. Defaults to current axes if not specified.
kwargs :
Additional keyword arguments are passed to
`xarray.plot.pcolormesh`.
Returns
-------
{returns_doc}
"""
r_slice = self.data.isel(r=r_idx)
time_slice = r_slice.isel(time=t_idx or 0)
# Setup axes
ax = viz.setup_radial_ax(ax)
# Set colorbar string
kwargs = self._set_cbar_label(kwargs, self.unit.to_string('latex'))
quad_mesh = time_slice.plot(x='phi', y='theta', ax=ax, **kwargs)
# Plot formatting
r = r_slice['r'].values
ax.set_title(f'{self.name}, r={r:.2f}' + r'$R_{\odot}$')
viz.format_radial_ax(ax)
if t_idx is not None or self.n_timesteps == 1:
return quad_mesh
else:
return viz.animate_time(ax, r_slice, quad_mesh)
def contour_radial_cut(self, r_idx, levels, t_idx=0, ax=None, **kwargs):
"""
Plot contours on a radial cut.
Parameters
----------
r_idx : int
Radial index at which to slice the data.
levels : list
List of levels to contour.
t_idx : int, optional
Time index at which to slice the data.
ax : matplolit.axes.Axes, optional
axes on which to plot. Defaults to current axes if not specified.
kwargs :
Additional keyword arguments are passed to `xarray.plot.contour`.
"""
ax = viz.setup_radial_ax(ax)
sliced = self.data.isel(r=r_idx, time=t_idx)
# Need to save a copy of the title to reset it later, since xarray
# tries to set it's own title that we don't want
title = ax.get_title()
xr.plot.contour(sliced, x='phi', y='theta', ax=ax,
levels=levels, **kwargs)
ax.set_title(title)
viz.format_radial_ax(ax)
@add_common_docstring(returns_doc=returns_doc)
def plot_phi_cut(self, phi_idx, t_idx=None, ax=None, **kwargs):
"""
Plot a phi cut.
Parameters
----------
phi_idx : int
Index at which to slice the data.
t_idx : int, optional
Time index at which to slice the data. If not given, an anmiation
will be created across all time indices.
ax : matplolit.axes.Axes, optional
axes on which to plot. Defaults to current axes if not specified.
kwargs :
Additional keyword arguments are passed to
`xarray.plot.pcolormesh`.
Returns
-------
{returns_doc}
"""
phi_slice = self.data.isel(phi=phi_idx)
time_slice = phi_slice.isel(time=t_idx or 0)
ax = viz.setup_polar_ax(ax)
kwargs = self._set_cbar_label(kwargs, self.unit.to_string('latex'))
# Take slice of data and plot
quad_mesh = time_slice.plot(x='theta', y='r', ax=ax, **kwargs)
viz.format_polar_ax(ax)
phi = np.rad2deg(time_slice['phi'].values)
ax.set_title(f'{self.name}, ' + r'$\phi$= ' + f'{phi:.2f}' +
r'$^{\circ}$')
if t_idx is not None or self.n_timesteps == 1:
return quad_mesh
else:
return viz.animate_time(ax, phi_slice, quad_mesh)
def contour_phi_cut(self, i, levels, t_idx=0, ax=None, **kwargs):
"""
Plot contours on a phi cut.
Parameters
----------
i : int
Index at which to slice the data.
levels : list
List of levels to contour.
t_idx : int, optional
Time index at which to slice the data.
ax : matplolit.axes.Axes, optional
axes on which to plot. Defaults to current axes if not specified.
kwargs :
Additional keyword arguments are passed to `xarray.plot.contour`.
"""
ax = viz.setup_polar_ax(ax)
sliced = self.data.isel(phi=i, time=t_idx)
# Need to save a copy of the title to reset it later, since xarray
# tries to set it's own title that we don't want
title = ax.get_title()
xr.plot.contour(sliced, x='theta', y='r', ax=ax,
levels=levels, **kwargs)
viz.format_polar_ax(ax)
ax.set_title(title)
@property
def _equator_theta_idx(self):
"""
The theta index of the solar equator.
"""
return (self.data.shape[1] - 1) // 2
# Methods for equatorial cuts
@add_common_docstring(returns_doc=returns_doc)
def plot_equatorial_cut(self, t_idx=None, ax=None, **kwargs):
"""
Plot an equatorial cut.
Parameters
----------
ax : matplolit.axes.Axes, optional
axes on which to plot. Defaults to current axes if not specified.
t_idx : int, optional
Time index at which to slice the data. If not given, an anmiation
will be created across all time indices.
kwargs :
Additional keyword arguments are passed to
`xarray.plot.pcolormesh`.
Returns
-------
{returns_doc}
"""
theta_slice = self.data.isel(theta=self._equator_theta_idx)
time_slice = theta_slice.isel(time=t_idx or 0)
ax = viz.setup_polar_ax(ax)
kwargs = self._set_cbar_label(kwargs, self.unit.to_string('latex'))
# Take slice of data and plot
quad_mesh = time_slice.plot(x='phi', y='r', ax=ax, **kwargs)
viz.format_equatorial_ax(ax)
ax.set_title(f'{self.name}, equatorial plane')
if t_idx is not None or self.n_timesteps == 1:
return quad_mesh
else:
return viz.animate_time(ax, theta_slice, quad_mesh)
def contour_equatorial_cut(self, levels, t_idx=0, ax=None, **kwargs):
"""
Plot contours on an equatorial cut.
Parameters
----------
levels : list
List of levels to contour.
ax : matplolit.axes.Axes, optional
axes on which to plot. Defaults to current axes if not specified.
t_idx : int, optional
Time index at which to slice the data.
kwargs :
Additional keyword arguments are passed to `xarray.plot.contour`.
"""
ax = viz.setup_polar_ax(ax)
sliced = self.data.isel(theta=self._equator_theta_idx, time=t_idx)
# Need to save a copy of the title to reset it later, since xarray
# tries to set it's own title that we don't want
title = ax.get_title()
xr.plot.contour(sliced, x='phi', y='r', ax=ax,
levels=levels, **kwargs)
viz.format_equatorial_ax(ax)
ax.set_title(title)
@staticmethod
def _set_cbar_label(kwargs, label):
"""
Set the colobar label with units.
"""
# Copy kwargs to prevent modifying them inplace
kwargs = copy.deepcopy(kwargs)
# Set the colobar label with units
cbar_kwargs = kwargs.pop('cbar_kwargs', {})
cbar_kwargs['label'] = cbar_kwargs.pop('label', label)
kwargs['cbar_kwargs'] = cbar_kwargs
return kwargs
@u.quantity_input
def sample_at_coords(self, lon: u.deg, lat: u.deg, r: u.m, t=None):
"""
Sample this variable along a 1D trajectory of coordinates.
Parameters
----------
lon : astropy.units.Quantity
Longitudes.
lat : astropy.units.Quantity
Latitudes.
r : astropy.units.Quantity
Radial distances.
t : array-like, optional
Timsteps. If the variable only has a single timstep, this argument
is not required.
Returns
-------
astropy.units.Quantity
The sampled data.
Notes
-----
Linear interpolation is used to interpoalte between cells. See the
docstring of `scipy.interpolate.interpn` for more information.
"""
points = [self.data.coords[dim].values for dim in
['phi', 'theta', 'r', 'time']]
values = self.data.values
# Check that coordinates are increasing
if not np.all(np.diff(points[1]) >= 0):
raise RuntimeError(
'Longitude coordinates are not monotonically increasing')
if not np.all(np.diff(points[2]) >= 0):
raise RuntimeError(
'Latitude coordinates are not monotonically increasing')
if not np.all(np.diff(points[3]) > 0):
raise RuntimeError(
'Radial coordinates are not monotonically increasing')
# Pad phi points so it's possible to interpolate all the way from
# 0 to 360 deg
points[0] = np.append(points[0], points[0][0] + 2 * np.pi)
values = np.append(values, values[0:1, :, :, :], axis=0)
if len(points[3]) == 1:
# Only one timestep
xi = np.column_stack([lon.to_value(u.rad),
lat.to_value(u.rad),
r.to_value(const.R_sun)])
values = values[:, :, :, 0]
points = points[:-1]
else:
xi = np.column_stack([t,
lon.to_value(u.rad),
lat.to_value(u.rad),
r.to_value(const.R_sun)])
values_x = interpolate.interpn(points, values, xi)
return values_x * self._unit
| [
"psipy.visualization.animate_time",
"copy.deepcopy",
"scipy.interpolate.interpn",
"psipy.visualization.setup_polar_ax",
"psipy.visualization.setup_radial_ax",
"textwrap.indent",
"psipy.visualization.format_equatorial_ax",
"xarray.Dataset",
"numpy.rad2deg",
"numpy.append",
"numpy.diff",
"psipy.... | [((493, 702), 'textwrap.indent', 'textwrap.indent', (['f"""\n{quad_mesh_link} or {animation_link}\n If a timestep is specified, the {quad_mesh_link} of the plot is returned.\n Otherwise an {animation_link} is returned.\n"""', '""" """'], {}), '(\n f"""\n{quad_mesh_link} or {animation_link}\n If a timestep is specified, the {quad_mesh_link} of the plot is returned.\n Otherwise an {animation_link} is returned.\n"""\n , \' \')\n', (508, 702), False, 'import textwrap\n'), ((3713, 3758), 'psipy.util.decorators.add_common_docstring', 'add_common_docstring', ([], {'returns_doc': 'returns_doc'}), '(returns_doc=returns_doc)\n', (3733, 3758), False, 'from psipy.util.decorators import add_common_docstring\n'), ((6123, 6168), 'psipy.util.decorators.add_common_docstring', 'add_common_docstring', ([], {'returns_doc': 'returns_doc'}), '(returns_doc=returns_doc)\n', (6143, 6168), False, 'from psipy.util.decorators import add_common_docstring\n'), ((8722, 8767), 'psipy.util.decorators.add_common_docstring', 'add_common_docstring', ([], {'returns_doc': 'returns_doc'}), '(returns_doc=returns_doc)\n', (8742, 8767), False, 'from psipy.util.decorators import add_common_docstring\n'), ((4566, 4589), 'psipy.visualization.setup_radial_ax', 'viz.setup_radial_ax', (['ax'], {}), '(ax)\n', (4585, 4589), True, 'import psipy.visualization as viz\n'), ((4900, 4924), 'psipy.visualization.format_radial_ax', 'viz.format_radial_ax', (['ax'], {}), '(ax)\n', (4920, 4924), True, 'import psipy.visualization as viz\n'), ((5708, 5731), 'psipy.visualization.setup_radial_ax', 'viz.setup_radial_ax', (['ax'], {}), '(ax)\n', (5727, 5731), True, 'import psipy.visualization as viz\n'), ((5956, 6031), 'xarray.plot.contour', 'xr.plot.contour', (['sliced'], {'x': '"""phi"""', 'y': '"""theta"""', 'ax': 'ax', 'levels': 'levels'}), "(sliced, x='phi', y='theta', ax=ax, levels=levels, **kwargs)\n", (5971, 6031), True, 'import xarray as xr\n'), ((6092, 6116), 'psipy.visualization.format_radial_ax', 'viz.format_radial_ax', (['ax'], {}), '(ax)\n', (6112, 6116), True, 'import psipy.visualization as viz\n'), ((6954, 6976), 'psipy.visualization.setup_polar_ax', 'viz.setup_polar_ax', (['ax'], {}), '(ax)\n', (6972, 6976), True, 'import psipy.visualization as viz\n'), ((7170, 7193), 'psipy.visualization.format_polar_ax', 'viz.format_polar_ax', (['ax'], {}), '(ax)\n', (7189, 7193), True, 'import psipy.visualization as viz\n'), ((7209, 7245), 'numpy.rad2deg', 'np.rad2deg', (["time_slice['phi'].values"], {}), "(time_slice['phi'].values)\n", (7219, 7245), True, 'import numpy as np\n'), ((8115, 8137), 'psipy.visualization.setup_polar_ax', 'viz.setup_polar_ax', (['ax'], {}), '(ax)\n', (8133, 8137), True, 'import psipy.visualization as viz\n'), ((8360, 8433), 'xarray.plot.contour', 'xr.plot.contour', (['sliced'], {'x': '"""theta"""', 'y': '"""r"""', 'ax': 'ax', 'levels': 'levels'}), "(sliced, x='theta', y='r', ax=ax, levels=levels, **kwargs)\n", (8375, 8433), True, 'import xarray as xr\n'), ((8466, 8489), 'psipy.visualization.format_polar_ax', 'viz.format_polar_ax', (['ax'], {}), '(ax)\n', (8485, 8489), True, 'import psipy.visualization as viz\n'), ((9513, 9535), 'psipy.visualization.setup_polar_ax', 'viz.setup_polar_ax', (['ax'], {}), '(ax)\n', (9531, 9535), True, 'import psipy.visualization as viz\n'), ((9727, 9755), 'psipy.visualization.format_equatorial_ax', 'viz.format_equatorial_ax', (['ax'], {}), '(ax)\n', (9751, 9755), True, 'import psipy.visualization as viz\n'), ((10528, 10550), 'psipy.visualization.setup_polar_ax', 'viz.setup_polar_ax', (['ax'], {}), '(ax)\n', (10546, 10550), True, 'import psipy.visualization as viz\n'), ((10797, 10868), 'xarray.plot.contour', 'xr.plot.contour', (['sliced'], {'x': '"""phi"""', 'y': '"""r"""', 'ax': 'ax', 'levels': 'levels'}), "(sliced, x='phi', y='r', ax=ax, levels=levels, **kwargs)\n", (10812, 10868), True, 'import xarray as xr\n'), ((10901, 10929), 'psipy.visualization.format_equatorial_ax', 'viz.format_equatorial_ax', (['ax'], {}), '(ax)\n', (10925, 10929), True, 'import psipy.visualization as viz\n'), ((11156, 11177), 'copy.deepcopy', 'copy.deepcopy', (['kwargs'], {}), '(kwargs)\n', (11169, 11177), False, 'import copy\n'), ((12988, 13034), 'numpy.append', 'np.append', (['points[0]', '(points[0][0] + 2 * np.pi)'], {}), '(points[0], points[0][0] + 2 * np.pi)\n', (12997, 13034), True, 'import numpy as np\n'), ((13052, 13099), 'numpy.append', 'np.append', (['values', 'values[0:1, :, :, :]'], {'axis': '(0)'}), '(values, values[0:1, :, :, :], axis=0)\n', (13061, 13099), True, 'import numpy as np\n'), ((13649, 13688), 'scipy.interpolate.interpn', 'interpolate.interpn', (['points', 'values', 'xi'], {}), '(points, values, xi)\n', (13668, 13688), False, 'from scipy import interpolate\n'), ((3638, 3662), 'xarray.Dataset', 'xr.Dataset', (['{name: data}'], {}), '({name: data})\n', (3648, 3662), True, 'import xarray as xr\n'), ((5043, 5083), 'psipy.visualization.animate_time', 'viz.animate_time', (['ax', 'r_slice', 'quad_mesh'], {}), '(ax, r_slice, quad_mesh)\n', (5059, 5083), True, 'import psipy.visualization as viz\n'), ((7469, 7511), 'psipy.visualization.animate_time', 'viz.animate_time', (['ax', 'phi_slice', 'quad_mesh'], {}), '(ax, phi_slice, quad_mesh)\n', (7485, 7511), True, 'import psipy.visualization as viz\n'), ((9930, 9974), 'psipy.visualization.animate_time', 'viz.animate_time', (['ax', 'theta_slice', 'quad_mesh'], {}), '(ax, theta_slice, quad_mesh)\n', (9946, 9974), True, 'import psipy.visualization as viz\n'), ((12435, 12453), 'numpy.diff', 'np.diff', (['points[1]'], {}), '(points[1])\n', (12442, 12453), True, 'import numpy as np\n'), ((12589, 12607), 'numpy.diff', 'np.diff', (['points[2]'], {}), '(points[2])\n', (12596, 12607), True, 'import numpy as np\n'), ((12742, 12760), 'numpy.diff', 'np.diff', (['points[3]'], {}), '(points[3])\n', (12749, 12760), True, 'import numpy as np\n')] |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from copy import deepcopy
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import unittest
import nose
from numpy.testing import assert_almost_equal, assert_allclose
from numpy.testing.decorators import slow
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal)
import trackpy as tp
from trackpy.try_numba import NUMBA_AVAILABLE
from trackpy.linking import PointND, link, Hash_table
# Catch attempts to set values on an inadvertent copy of a Pandas object.
tp.utils.make_pandas_strict()
path, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(path, 'data')
# Call lambda function for a fresh copy each time.
unit_steps = lambda: [[PointND(t, (x, 0))] for t, x in enumerate(range(5))]
np.random.seed(0)
random_x = np.random.randn(5).cumsum()
random_x -= random_x.min() # All x > 0
max_disp = np.diff(random_x).max()
random_walk_legacy = lambda: [[PointND(t, (x, 5))]
for t, x in enumerate(random_x)]
def hash_generator(dims, box_size):
return lambda: Hash_table(dims, box_size)
def _skip_if_no_numba():
if not NUMBA_AVAILABLE:
raise nose.SkipTest('numba not installed. Skipping.')
def random_walk(N):
return np.cumsum(np.random.randn(N))
def contracting_grid():
"""Two frames with a grid of 441 points.
In the second frame, the points contract, so that the outermost set
coincides with the second-outermost set in the previous frame.
This is a way to challenge (and/or stump) a subnet solver.
"""
pts0x, pts0y = np.mgrid[-10:11,-10:11]
pts0 = pd.DataFrame(dict(x=pts0x.flatten(), y=pts0y.flatten(),
frame=0))
pts1 = pts0.copy()
pts1.frame = 1
pts1.x = pts1.x * 0.9
pts1.y = pts1.y * 0.9
allpts = pd.concat([pts0, pts1], ignore_index=True)
allpts.x += 100 # Because BTree doesn't allow negative coordinates
allpts.y += 100
return allpts
class CommonTrackingTests(object):
do_diagnostics = False # Don't ask for diagnostic info from linker
def test_one_trivial_stepper(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(10, 2))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_search_range' in self.diag.columns
# Except for first frame, all particles should have been labeled
# with a search_range
assert not any(self.diag['diag_search_range'][
actual_iter.frame > 0].isnull())
def test_two_isolated_steppers(self):
N = 5
Y = 25
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_two_isolated_steppers_one_gapped(self):
N = 5
Y = 25
# Begin second feature one frame later than the first,
# so the particle labeling (0, 1) is established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': np.arange(N)})
a = a.drop(3).reset_index(drop=True)
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1),
'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy()
expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# link_df_iter() tests not performed, because hash_size is
# not knowable from the first frame alone.
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_isolated_continuous_random_walks(self):
# Two 2D random walks
np.random.seed(0)
N = 30
Y = 250
M = 20 # margin, because negative values raise OutOfHash
a = DataFrame({'x': M + random_walk(N), 'y': M + random_walk(N), 'frame': np.arange(N)})
b = DataFrame({'x': M + random_walk(N - 1), 'y': M + Y + random_walk(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(2*M, Y + 2*M))
assert_frame_equal(actual_iter, expected)
# Many 2D random walks
np.random.seed(0)
initial_positions = [(100, 100), (200, 100), (100, 200), (200, 200)]
import itertools
c = itertools.count()
def walk(x, y):
i = next(c)
return DataFrame({'x': x + random_walk(N - i),
'y': y + random_walk(N - i),
'frame': np.arange(i, N)})
f = pd.concat([walk(*pos) for pos in initial_positions])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([i*np.ones(N - i) for i in range(len(initial_positions))])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(200 + M, 200 + M))
assert_frame_equal(actual_iter, expected)
def test_start_at_frame_other_than_zero(self):
# One 1D stepper
N = 5
FIRST_FRAME = 3
f = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': FIRST_FRAME + np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(6, 2))
assert_frame_equal(actual, expected)
def test_blank_frame_no_memory(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': [0, 1, 2, 4, 5]})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(10, 10))
assert_frame_equal(actual, expected)
# This doesn't error, but we might wish it would
# give the particle a new ID after the gap. It just
# ignores the missing frame.
def test_real_data_that_causes_duplicate_bug(self):
filename = 'reproduce_duplicate_track_assignment.df'
f = pd.read_pickle(os.path.join(path, filename))
# Not all parameters reproduce it, but these do
self.link_df(f, 8, 2, verify_integrity=True)
def test_search_range(self):
t = self.link(unit_steps(), 1.1, hash_generator((10, 10), 1))
assert len(t) == 1 # One track
t_short = self.link(unit_steps(), 0.9, hash_generator((10, 10), 1))
assert len(t_short) == len(unit_steps()) # Each step is a separate track.
t = self.link(random_walk_legacy(), max_disp + 0.1,
hash_generator((10, 10), 1))
assert len(t) == 1 # One track
t_short = self.link(random_walk_legacy(), max_disp - 0.1,
hash_generator((10, 10), 1))
assert len(t_short) > 1 # Multiple tracks
def test_box_size(self):
"""No matter what the box size, there should be one track, and it should
contain all the points."""
for box_size in [0.1, 1, 10]:
t1 = self.link(unit_steps(), 1.1, hash_generator((10, 10), box_size))
t2 = self.link(random_walk_legacy(), max_disp + 1,
hash_generator((10, 10), box_size))
assert len(t1) == 1
assert len(t2) == 1
assert len(t1[0].points) == len(unit_steps())
assert len(t2[0].points) == len(random_walk_legacy())
def test_easy_tracking(self):
level_count = 5
p_count = 16
levels = []
for j in range(level_count):
level = []
for k in np.arange(p_count) * 2:
level.append(PointND(j, (j, k)))
levels.append(level)
hash_generator = lambda: Hash_table((level_count + 1,
p_count * 2 + 1), .5)
tracks = self.link(levels, 1.5, hash_generator)
assert len(tracks) == p_count
for t in tracks:
x, y = zip(*[p.pos for p in t])
dx = np.diff(x)
dy = np.diff(y)
assert np.sum(dx) == level_count - 1
assert np.sum(dy) == 0
def test_copy(self):
"""Check inplace/copy behavior of link_df, link_df_iter"""
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
f_inplace = f.copy()
expected = f.copy()
expected['particle'] = np.zeros(N)
# Should add particle column in-place
# UNLESS diagnostics are enabled
actual = self.link_df(f_inplace, 5)
assert_frame_equal(actual, expected)
if self.do_diagnostics:
assert 'particle' not in f_inplace.columns
else:
assert_frame_equal(actual, f_inplace)
# Should copy
actual = self.link_df(f, 5, copy_features=True)
assert_frame_equal(actual, expected)
assert 'particle' not in f.columns
# Should copy
actual_iter = self.link_df_iter(f, 5, hash_size=(10, 2))
assert_frame_equal(actual_iter, expected)
assert 'particle' not in f.columns
@nose.tools.raises(tp.SubnetOversizeException)
def test_oversize_fail(self):
self.link_df(contracting_grid(), 1)
@nose.tools.raises(tp.SubnetOversizeException)
def test_adaptive_fail(self):
"""Check recursion limit"""
self.link_df(contracting_grid(), 1, adaptive_stop=0.92)
def link(self, *args, **kwargs):
kwargs.update(self.linker_opts)
return tp.link(*args, **kwargs)
def link_df(self, *args, **kwargs):
kwargs.update(self.linker_opts)
kwargs['diagnostics'] = self.do_diagnostics
return tp.link_df(*args, **kwargs)
def link_df_iter(self, *args, **kwargs):
kwargs.update(self.linker_opts)
kwargs['diagnostics'] = self.do_diagnostics
args = list(args)
features = args.pop(0)
res = pd.concat(tp.link_df_iter(
(df for fr, df in features.groupby('frame')), *args, **kwargs))
return res.sort(['particle', 'frame']).reset_index(drop=True)
class TestOnce(unittest.TestCase):
# simple API tests that need only run on one engine
def setUp(self):
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
self.features = f
def test_t_column(self):
f = self.features.copy()
cols = list(f.columns)
name = 'arbitrary name'
cols[cols.index('frame')] = name
f.columns = cols
# smoke tests
tp.link_df(f, 5, t_column=name, verify_integrity=True)
f_iter = (frame for fnum, frame in f.groupby('arbitrary name'))
list(tp.link_df_iter(f_iter, 5, t_column=name, verify_integrity=True))
@nose.tools.raises(ValueError)
def test_check_iter(self):
"""Check that link_df_iter() makes a useful error message if we
try to pass a single DataFrame."""
list(tp.link_df_iter(self.features.copy(), 5))
class SubnetNeededTests(CommonTrackingTests):
"""Tests that assume a best-effort subnet linker (i.e. not "drop")."""
def test_two_nearby_steppers(self):
N = 5
Y = 2
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_subnet' in self.diag.columns
assert 'diag_subnet_size' in self.diag.columns
# Except for frame in which they appear, all particles should have
# been labeled with a search_range
assert not any(self.diag['diag_search_range'][
actual_iter.frame > 1].isnull())
# The number of loop iterations is reported by the numba linker only
if self.linker_opts['link_strategy'] == 'numba':
assert 'diag_subnet_iterations' in self.diag.columns
def test_two_nearby_steppers_one_gapped(self):
N = 5
Y = 2
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
a = a.drop(3).reset_index(drop=True)
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_nearby_continuous_random_walks(self):
# Two 2D random walks
np.random.seed(0)
N = 30
Y = 250
M = 20 # margin, because negative values raise OutOfHash
a = DataFrame({'x': M + random_walk(N),
'y': M + random_walk(N),
'frame': np.arange(N)})
b = DataFrame({'x': M + random_walk(N - 1),
'y': M + Y + random_walk(N - 1),
'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(2*M, 2*M + Y))
assert_frame_equal(actual, expected)
# Several 2D random walks
np.random.seed(0)
initial_positions = [(10, 11), (10, 18), (14, 15), (20, 21), (13, 13),
(10, 10), (17, 19)]
import itertools
c = itertools.count()
def walk(x, y):
i = next(c)
return DataFrame({'x': x + random_walk(N - i),
'y': y + random_walk(N - i),
'frame': np.arange(i, N)})
f = pd.concat([walk(*pos) for pos in initial_positions])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([i*np.ones(N - i) for i in range(len(initial_positions))])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(2*M, 2*M))
assert_frame_equal(actual, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f1, 5, hash_size=(2*M, 2*M))
assert_frame_equal(actual, expected)
def test_quadrature_distances(self):
"""A simple test to check whether the subnet linker adds
distances in quadrature (as in Crocker-Grier)."""
def subnet_test(epsilon):
"""Returns 2 features in 2 frames, which represent a special
case when the subnet linker adds distances in quadrature. With
epsilon=0, subnet linking is degenerate. Therefore
linking should differ for positive and negative epsilon."""
return pd.DataFrame([(0, 10, 11), (0, 10, 8),
(1, 9, 10), (1, 12, 10 + epsilon)],
columns=['frame', 'x', 'y'])
trneg = self.link_df(subnet_test(0.01), 5, retain_index=True)
trpos = self.link_df(subnet_test(-0.01), 5, retain_index=True)
assert not np.allclose(trneg.particle.values, trpos.particle.values)
def test_memory(self):
"""A unit-stepping trajectory and a random walk are observed
simultaneously. The random walk is missing from one observation."""
a = [p[0] for p in unit_steps()]
b = [p[0] for p in random_walk_legacy()]
# b[2] is intentionally omitted below.
gapped = lambda: deepcopy([[a[0], b[0]], [a[1], b[1]], [a[2]],
[a[3], b[3]], [a[4], b[4]]])
safe_disp = 1 + random_x.max() - random_x.min() # Definitely large enough
t0 = self.link(gapped(), safe_disp, hash_generator((10, 10), 1), memory=0)
assert len(t0) == 3, len(t0)
t2 = self.link(gapped(), safe_disp, hash_generator((10, 10), 1), memory=2)
assert len(t2) == 2, len(t2)
def test_memory_removal(self):
"""BUG: A particle remains in memory after its Track is resumed, leaving two
copies that can independently pick up desinations, leaving two Points in the
same Track in a single level."""
levels = []
levels.append([PointND(0, [1, 1]), PointND(0, [4, 1])]) # two points
levels.append([PointND(1, [1, 1])]) # one vanishes, but is remembered
levels.append([PointND(2, [1, 1]), PointND(2, [2, 1])]) # resume Track
levels.append([PointND(3, [1, 1]), PointND(3, [2, 1]), PointND(3, [4, 1])])
t = self.link(levels, 5, hash_generator((10, 10), 1), memory=2)
assert len(t) == 3, len(t)
def test_memory_with_late_appearance(self):
a = [p[0] for p in unit_steps()]
b = [p[0] for p in random_walk_legacy()]
gapped = lambda: deepcopy([[a[0]], [a[1], b[1]], [a[2]],
[a[3]], [a[4], b[4]]])
safe_disp = 1 + random_x.max() - random_x.min() # large enough
t0 = self.link(gapped(), safe_disp, hash_generator((10, 10), 1), memory=1)
assert len(t0) == 3, len(t0)
t2 = self.link(gapped(), safe_disp, hash_generator((10, 10), 1), memory=4)
assert len(t2) == 2, len(t2)
def test_memory_on_one_gap(self):
N = 5
Y = 2
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
a = a.drop(3).reset_index(drop=True)
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.array([0, 0, 0, 0]), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link_df(f, 5, memory=1)
assert_frame_equal(actual, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50), memory=1)
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5, memory=1)
assert_frame_equal(actual, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
actual_iter = self.link_df_iter(f.sort('frame'), 5,
memory=1, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5, memory=1)
assert_frame_equal(actual, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
actual_iter = self.link_df_iter(f1, 5, memory=1, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
def test_pathological_tracking(self):
level_count = 5
p_count = 16
levels = []
shift = 1
for j in range(level_count):
level = []
for k in np.arange(p_count) * 2:
level.append(PointND(k // 2, (j, k + j * shift)))
levels.append(level)
hash_generator = lambda: Hash_table((level_count + 1,
p_count*2 + level_count*shift + 1),
.5)
tracks = self.link(levels, 8, hash_generator)
assert len(tracks) == p_count, len(tracks)
class DiagnosticsTests(CommonTrackingTests):
"""Mixin to obtain diagnostic info from the linker.
Makes examining that info optional, so that most tests can focus on
correctness of tracking.
"""
do_diagnostics = True
def _strip_diag(self, df):
"""Move diagnostic columns from the returned DataFrame into a buffer.
"""
diag_cols = [cn for cn in df.columns if cn.startswith('diag_')]
self.diag = df.reindex(columns=diag_cols)
return tp.strip_diagnostics(df)
def link_df(self, *args, **kwargs):
return self._strip_diag(
super(DiagnosticsTests, self).link_df(*args, **kwargs))
def link_df_iter(self, *args, **kwargs):
df = self._strip_diag(
super(DiagnosticsTests, self).link_df_iter(*args, **kwargs))
# pd.concat() can mess with the column order if not all columns
# are present in all DataFrames. So we enforce it here.
return df.reindex(columns=['frame', 'x', 'y', 'particle'])
class NumbaOnlyTests(SubnetNeededTests):
"""Tests that are unbearably slow without a fast subnet linker."""
def test_adaptive_range(self):
cg = contracting_grid()
# Allow 5 applications of the step
tracks = self.link_df(cg, 1, adaptive_step=0.8, adaptive_stop=0.32)
# Transform back to origin
tracks.x -= 100
tracks.y -= 100
assert len(cg) == len(tracks)
tr0 = tracks[tracks.frame == 0].set_index('particle')
tr1 = tracks[tracks.frame == 1].set_index('particle')
only0 = list(set(tr0.index) - set(tr1.index))
only1 = list(set(tr1.index) - set(tr0.index))
# From the first frame, the outermost particles should have been lost.
assert all((tr0.x.ix[only0].abs() > 9.5) | (tr0.y.ix[only0].abs() > 9.5))
# There should be new tracks in the second frame, corresponding to the
# middle radii.
assert all((tr1.x.ix[only1].abs() == 4.5) | (tr1.y.ix[only1].abs() == 4.5))
if self.do_diagnostics:
# We use this opportunity to check for diagnostic data
# made by the numba linker only.
assert 'diag_subnet_iterations' in self.diag.columns
class TestKDTreeWithDropLink(CommonTrackingTests, unittest.TestCase):
def setUp(self):
self.linker_opts = dict(link_strategy='drop',
neighbor_strategy='KDTree')
def test_drop_link(self):
# One 1D stepper. A new particle appears in frame 2.
# The resulting subnet causes the trajectory to be broken
# when link_strategy is 'drop' and search_range is large enough.
N = 2
f_1particle = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
f = f_1particle.append(DataFrame(
{'x': [3], 'y': [1], 'frame': [1]}), ignore_index=True)
f_expected_without_subnet = f.copy()
f_expected_without_subnet['particle'] = [0, 0, 1]
# The linker assigns new particle IDs in arbitrary order. So
# comparing with expected values is tricky.
# We just check for the creation of 2 new trajectories.
without_subnet = self.link_df(f, 1.5, retain_index=True)
assert_frame_equal(without_subnet, f_expected_without_subnet, check_dtype=False)
with_subnet = self.link_df(f, 5, retain_index=True)
assert set(with_subnet.particle) == set((0, 1, 2))
class TestBTreeWithRecursiveLink(SubnetNeededTests, unittest.TestCase):
def setUp(self):
self.linker_opts = dict(link_strategy='recursive',
neighbor_strategy='BTree')
class TestBTreeWithNonrecursiveLink(SubnetNeededTests, unittest.TestCase):
def setUp(self):
self.linker_opts = dict(link_strategy='nonrecursive',
neighbor_strategy='BTree')
class TestBTreeWithNonrecursiveLinkDiag(DiagnosticsTests, TestBTreeWithNonrecursiveLink):
pass
class TestKDTreeWithRecursiveLink(SubnetNeededTests, unittest.TestCase):
def setUp(self):
self.linker_opts = dict(link_strategy='recursive',
neighbor_strategy='KDTree')
class TestKDTreeWithRecursiveLinkDiag(DiagnosticsTests, TestKDTreeWithRecursiveLink):
pass
class TestKDTreeWithNonrecursiveLink(SubnetNeededTests, unittest.TestCase):
def setUp(self):
self.linker_opts = dict(link_strategy='nonrecursive',
neighbor_strategy='KDTree')
class TestKDTreeWithNumbaLink(NumbaOnlyTests, unittest.TestCase):
def setUp(self):
_skip_if_no_numba()
self.linker_opts = dict(link_strategy='numba',
neighbor_strategy='KDTree')
class TestKDTreeWithNumbaLinkDiag(DiagnosticsTests, TestKDTreeWithNumbaLink):
pass
class TestBTreeWithNumbaLink(NumbaOnlyTests, unittest.TestCase):
def setUp(self):
_skip_if_no_numba()
self.linker_opts = dict(link_strategy='numba',
neighbor_strategy='BTree')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| [
"numpy.random.seed",
"numpy.sum",
"numpy.allclose",
"numpy.ones",
"trackpy.strip_diagnostics",
"trackpy.linking.PointND",
"numpy.arange",
"os.path.join",
"pandas.DataFrame",
"os.path.abspath",
"numpy.random.randn",
"pandas.concat",
"copy.deepcopy",
"pandas.util.testing.assert_frame_equal",... | [((691, 720), 'trackpy.utils.make_pandas_strict', 'tp.utils.make_pandas_strict', ([], {}), '()\n', (718, 720), True, 'import trackpy as tp\n'), ((780, 806), 'os.path.join', 'os.path.join', (['path', '"""data"""'], {}), "(path, 'data')\n", (792, 806), False, 'import os\n'), ((937, 954), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (951, 954), True, 'import numpy as np\n'), ((746, 771), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (761, 771), False, 'import os\n'), ((1987, 2029), 'pandas.concat', 'pd.concat', (['[pts0, pts1]'], {'ignore_index': '(True)'}), '([pts0, pts1], ignore_index=True)\n', (1996, 2029), True, 'import pandas as pd\n'), ((12153, 12198), 'nose.tools.raises', 'nose.tools.raises', (['tp.SubnetOversizeException'], {}), '(tp.SubnetOversizeException)\n', (12170, 12198), False, 'import nose\n'), ((12283, 12328), 'nose.tools.raises', 'nose.tools.raises', (['tp.SubnetOversizeException'], {}), '(tp.SubnetOversizeException)\n', (12300, 12328), False, 'import nose\n'), ((13812, 13841), 'nose.tools.raises', 'nose.tools.raises', (['ValueError'], {}), '(ValueError)\n', (13829, 13841), False, 'import nose\n'), ((30741, 30828), 'nose.runmodule', 'nose.runmodule', ([], {'argv': "[__file__, '-vvs', '-x', '--pdb', '--pdb-failure']", 'exit': '(False)'}), "(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n", (30755, 30828), False, 'import nose\n'), ((966, 984), 'numpy.random.randn', 'np.random.randn', (['(5)'], {}), '(5)\n', (981, 984), True, 'import numpy as np\n'), ((1045, 1062), 'numpy.diff', 'np.diff', (['random_x'], {}), '(random_x)\n', (1052, 1062), True, 'import numpy as np\n'), ((1241, 1267), 'trackpy.linking.Hash_table', 'Hash_table', (['dims', 'box_size'], {}), '(dims, box_size)\n', (1251, 1267), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((1337, 1384), 'nose.SkipTest', 'nose.SkipTest', (['"""numba not installed. Skipping."""'], {}), "('numba not installed. Skipping.')\n", (1350, 1384), False, 'import nose\n'), ((1428, 1446), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (1443, 1446), True, 'import numpy as np\n'), ((2471, 2482), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2479, 2482), True, 'import numpy as np\n'), ((2527, 2563), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (2545, 2563), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((2637, 2678), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (2655, 2678), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((3408, 3425), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (3417, 3425), True, 'import pandas as pd\n'), ((3657, 3693), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (3675, 3693), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((3768, 3809), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (3786, 3809), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((3911, 3947), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (3929, 3947), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((4036, 4077), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (4054, 4077), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((4119, 4136), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4133, 4136), True, 'import numpy as np\n'), ((4272, 4308), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (4290, 4308), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((4384, 4425), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (4402, 4425), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((4932, 4949), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (4941, 4949), True, 'import pandas as pd\n'), ((5223, 5259), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (5241, 5259), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((5334, 5375), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (5352, 5375), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((5595, 5631), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (5613, 5631), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((5720, 5761), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (5738, 5761), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((5803, 5820), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5817, 5820), True, 'import numpy as np\n'), ((5956, 5992), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (5974, 5992), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((6068, 6109), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (6086, 6109), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((6202, 6219), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6216, 6219), True, 'import numpy as np\n'), ((6537, 6554), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (6546, 6554), True, 'import pandas as pd\n'), ((6786, 6822), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (6804, 6822), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((6903, 6944), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (6921, 6944), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((6985, 7002), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6999, 7002), True, 'import numpy as np\n'), ((7117, 7134), 'itertools.count', 'itertools.count', ([], {}), '()\n', (7132, 7134), False, 'import itertools\n'), ((7683, 7719), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (7701, 7719), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((7804, 7845), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (7822, 7845), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((8140, 8151), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (8148, 8151), True, 'import numpy as np\n'), ((8196, 8232), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (8214, 8232), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((8300, 8336), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (8318, 8336), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((8586, 8597), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (8594, 8597), True, 'import numpy as np\n'), ((8642, 8678), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (8660, 8678), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((8748, 8784), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (8766, 8784), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((11459, 11470), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (11467, 11470), True, 'import numpy as np\n'), ((11611, 11647), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (11629, 11647), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((11886, 11922), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (11904, 11922), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((12062, 12103), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (12080, 12103), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((12556, 12580), 'trackpy.link', 'tp.link', (['*args'], {}), '(*args, **kwargs)\n', (12563, 12580), True, 'import trackpy as tp\n'), ((12729, 12756), 'trackpy.link_df', 'tp.link_df', (['*args'], {}), '(*args, **kwargs)\n', (12739, 12756), True, 'import trackpy as tp\n'), ((13599, 13653), 'trackpy.link_df', 'tp.link_df', (['f', '(5)'], {'t_column': 'name', 'verify_integrity': '(True)'}), '(f, 5, t_column=name, verify_integrity=True)\n', (13609, 13653), True, 'import trackpy as tp\n'), ((14565, 14582), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (14574, 14582), True, 'import pandas as pd\n'), ((14814, 14850), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (14832, 14850), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((14925, 14966), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (14943, 14966), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((15068, 15104), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (15086, 15104), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((15193, 15234), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (15211, 15234), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((15276, 15293), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (15290, 15293), True, 'import numpy as np\n'), ((15429, 15465), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (15447, 15465), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((15541, 15582), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (15559, 15582), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((16645, 16662), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (16654, 16662), True, 'import pandas as pd\n'), ((16959, 16995), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (16977, 16995), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((17070, 17111), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (17088, 17111), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((17213, 17249), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (17231, 17249), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((17338, 17379), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (17356, 17379), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((17421, 17438), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (17435, 17438), True, 'import numpy as np\n'), ((17574, 17610), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (17592, 17610), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((17686, 17727), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (17704, 17727), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((17818, 17835), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (17832, 17835), True, 'import numpy as np\n'), ((18245, 18262), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (18254, 18262), True, 'import pandas as pd\n'), ((18494, 18530), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (18512, 18530), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((18606, 18642), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (18624, 18642), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((18686, 18703), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (18700, 18703), True, 'import numpy as np\n'), ((18869, 18886), 'itertools.count', 'itertools.count', ([], {}), '()\n', (18884, 18886), False, 'import itertools\n'), ((19434, 19470), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (19452, 19470), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((19542, 19578), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (19560, 19578), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((19620, 19637), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (19634, 19637), True, 'import numpy as np\n'), ((19773, 19809), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (19791, 19809), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((19882, 19918), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (19900, 19918), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((23280, 23297), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (23289, 23297), True, 'import pandas as pd\n'), ((23604, 23640), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (23622, 23640), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((23815, 23856), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (23833, 23856), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((24058, 24094), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (24076, 24094), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((24323, 24364), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (24341, 24364), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((24496, 24513), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (24510, 24513), True, 'import numpy as np\n'), ((24659, 24695), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (24677, 24695), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((24871, 24912), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual_iter', 'expected'], {}), '(actual_iter, expected)\n', (24889, 24912), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((26130, 26154), 'trackpy.strip_diagnostics', 'tp.strip_diagnostics', (['df'], {}), '(df)\n', (26150, 26154), True, 'import trackpy as tp\n'), ((28876, 28961), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['without_subnet', 'f_expected_without_subnet'], {'check_dtype': '(False)'}), '(without_subnet, f_expected_without_subnet, check_dtype=False\n )\n', (28894, 28961), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((883, 901), 'trackpy.linking.PointND', 'PointND', (['t', '(x, 0)'], {}), '(t, (x, 0))\n', (890, 901), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((1100, 1118), 'trackpy.linking.PointND', 'PointND', (['t', '(x, 5)'], {}), '(t, (x, 5))\n', (1107, 1118), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((4194, 4225), 'numpy.random.permutation', 'np.random.permutation', (['f1.index'], {}), '(f1.index)\n', (4215, 4225), True, 'import numpy as np\n'), ((5878, 5909), 'numpy.random.permutation', 'np.random.permutation', (['f1.index'], {}), '(f1.index)\n', (5899, 5909), True, 'import numpy as np\n'), ((9084, 9112), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (9096, 9112), False, 'import os\n'), ((10743, 10794), 'trackpy.linking.Hash_table', 'Hash_table', (['(level_count + 1, p_count * 2 + 1)', '(0.5)'], {}), '((level_count + 1, p_count * 2 + 1), 0.5)\n', (10753, 10794), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((11028, 11038), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (11035, 11038), True, 'import numpy as np\n'), ((11056, 11066), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (11063, 11066), True, 'import numpy as np\n'), ((11761, 11798), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['actual', 'f_inplace'], {}), '(actual, f_inplace)\n', (11779, 11798), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_almost_equal\n'), ((13740, 13804), 'trackpy.link_df_iter', 'tp.link_df_iter', (['f_iter', '(5)'], {'t_column': 'name', 'verify_integrity': '(True)'}), '(f_iter, 5, t_column=name, verify_integrity=True)\n', (13755, 13804), True, 'import trackpy as tp\n'), ((15351, 15382), 'numpy.random.permutation', 'np.random.permutation', (['f1.index'], {}), '(f1.index)\n', (15372, 15382), True, 'import numpy as np\n'), ((17496, 17527), 'numpy.random.permutation', 'np.random.permutation', (['f1.index'], {}), '(f1.index)\n', (17517, 17527), True, 'import numpy as np\n'), ((19695, 19726), 'numpy.random.permutation', 'np.random.permutation', (['f1.index'], {}), '(f1.index)\n', (19716, 19726), True, 'import numpy as np\n'), ((20420, 20527), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 10, 11), (0, 10, 8), (1, 9, 10), (1, 12, 10 + epsilon)]'], {'columns': "['frame', 'x', 'y']"}), "([(0, 10, 11), (0, 10, 8), (1, 9, 10), (1, 12, 10 + epsilon)],\n columns=['frame', 'x', 'y'])\n", (20432, 20527), True, 'import pandas as pd\n'), ((20742, 20799), 'numpy.allclose', 'np.allclose', (['trneg.particle.values', 'trpos.particle.values'], {}), '(trneg.particle.values, trpos.particle.values)\n', (20753, 20799), True, 'import numpy as np\n'), ((21135, 21209), 'copy.deepcopy', 'deepcopy', (['[[a[0], b[0]], [a[1], b[1]], [a[2]], [a[3], b[3]], [a[4], b[4]]]'], {}), '([[a[0], b[0]], [a[1], b[1]], [a[2]], [a[3], b[3]], [a[4], b[4]]])\n', (21143, 21209), False, 'from copy import deepcopy\n'), ((22427, 22489), 'copy.deepcopy', 'deepcopy', (['[[a[0]], [a[1], b[1]], [a[2]], [a[3]], [a[4], b[4]]]'], {}), '([[a[0]], [a[1], b[1]], [a[2]], [a[3]], [a[4], b[4]]])\n', (22435, 22489), False, 'from copy import deepcopy\n'), ((24571, 24602), 'numpy.random.permutation', 'np.random.permutation', (['f1.index'], {}), '(f1.index)\n', (24592, 24602), True, 'import numpy as np\n'), ((25368, 25441), 'trackpy.linking.Hash_table', 'Hash_table', (['(level_count + 1, p_count * 2 + level_count * shift + 1)', '(0.5)'], {}), '((level_count + 1, p_count * 2 + level_count * shift + 1), 0.5)\n', (25378, 25441), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((28436, 28481), 'pandas.DataFrame', 'DataFrame', (["{'x': [3], 'y': [1], 'frame': [1]}"], {}), "({'x': [3], 'y': [1], 'frame': [1]})\n", (28445, 28481), False, 'from pandas import DataFrame, Series\n'), ((2357, 2369), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2366, 2369), True, 'import numpy as np\n'), ((2376, 2386), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (2383, 2386), True, 'import numpy as np\n'), ((2397, 2409), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2406, 2409), True, 'import numpy as np\n'), ((3244, 3256), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3253, 3256), True, 'import numpy as np\n'), ((3263, 3273), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (3270, 3273), True, 'import numpy as np\n'), ((3284, 3296), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3293, 3296), True, 'import numpy as np\n'), ((3327, 3342), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (3336, 3342), True, 'import numpy as np\n'), ((3378, 3393), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (3387, 3393), True, 'import numpy as np\n'), ((3524, 3535), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3532, 3535), True, 'import numpy as np\n'), ((3537, 3551), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (3544, 3551), True, 'import numpy as np\n'), ((4677, 4689), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4686, 4689), True, 'import numpy as np\n'), ((4696, 4706), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (4703, 4706), True, 'import numpy as np\n'), ((4740, 4752), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4749, 4752), True, 'import numpy as np\n'), ((4828, 4843), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (4837, 4843), True, 'import numpy as np\n'), ((4902, 4917), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (4911, 4917), True, 'import numpy as np\n'), ((5025, 5047), 'numpy.array', 'np.array', (['[0, 0, 0, 2]'], {}), '([0, 0, 0, 2])\n', (5033, 5047), True, 'import numpy as np\n'), ((5049, 5063), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (5056, 5063), True, 'import numpy as np\n'), ((6398, 6410), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (6407, 6410), True, 'import numpy as np\n'), ((6507, 6522), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (6516, 6522), True, 'import numpy as np\n'), ((6653, 6664), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (6661, 6664), True, 'import numpy as np\n'), ((6666, 6680), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (6673, 6680), True, 'import numpy as np\n'), ((7989, 8001), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (7998, 8001), True, 'import numpy as np\n'), ((8008, 8018), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (8015, 8018), True, 'import numpy as np\n'), ((8447, 8459), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (8456, 8459), True, 'import numpy as np\n'), ((8466, 8476), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (8473, 8476), True, 'import numpy as np\n'), ((10603, 10621), 'numpy.arange', 'np.arange', (['p_count'], {}), '(p_count)\n', (10612, 10621), True, 'import numpy as np\n'), ((11091, 11101), 'numpy.sum', 'np.sum', (['dx'], {}), '(dx)\n', (11097, 11101), True, 'import numpy as np\n'), ((11140, 11150), 'numpy.sum', 'np.sum', (['dy'], {}), '(dy)\n', (11146, 11150), True, 'import numpy as np\n'), ((11316, 11328), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (11325, 11328), True, 'import numpy as np\n'), ((11335, 11345), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (11342, 11345), True, 'import numpy as np\n'), ((11356, 11368), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (11365, 11368), True, 'import numpy as np\n'), ((13295, 13307), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (13304, 13307), True, 'import numpy as np\n'), ((13314, 13324), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (13321, 13324), True, 'import numpy as np\n'), ((13335, 13347), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (13344, 13347), True, 'import numpy as np\n'), ((14401, 14413), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (14410, 14413), True, 'import numpy as np\n'), ((14420, 14430), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (14427, 14430), True, 'import numpy as np\n'), ((14441, 14453), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (14450, 14453), True, 'import numpy as np\n'), ((14484, 14499), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (14493, 14499), True, 'import numpy as np\n'), ((14535, 14550), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (14544, 14550), True, 'import numpy as np\n'), ((14681, 14692), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (14689, 14692), True, 'import numpy as np\n'), ((14694, 14708), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (14701, 14708), True, 'import numpy as np\n'), ((16436, 16448), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (16445, 16448), True, 'import numpy as np\n'), ((16455, 16465), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (16462, 16465), True, 'import numpy as np\n'), ((16476, 16488), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (16485, 16488), True, 'import numpy as np\n'), ((16519, 16534), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (16528, 16534), True, 'import numpy as np\n'), ((16570, 16585), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (16579, 16585), True, 'import numpy as np\n'), ((16761, 16783), 'numpy.array', 'np.array', (['[0, 0, 0, 2]'], {}), '([0, 0, 0, 2])\n', (16769, 16783), True, 'import numpy as np\n'), ((16785, 16799), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (16792, 16799), True, 'import numpy as np\n'), ((18060, 18072), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (18069, 18072), True, 'import numpy as np\n'), ((18215, 18230), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (18224, 18230), True, 'import numpy as np\n'), ((18361, 18372), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (18369, 18372), True, 'import numpy as np\n'), ((18374, 18388), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (18381, 18388), True, 'import numpy as np\n'), ((21859, 21877), 'trackpy.linking.PointND', 'PointND', (['(0)', '[1, 1]'], {}), '(0, [1, 1])\n', (21866, 21877), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((21879, 21897), 'trackpy.linking.PointND', 'PointND', (['(0)', '[4, 1]'], {}), '(0, [4, 1])\n', (21886, 21897), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((21937, 21955), 'trackpy.linking.PointND', 'PointND', (['(1)', '[1, 1]'], {}), '(1, [1, 1])\n', (21944, 21955), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((22016, 22034), 'trackpy.linking.PointND', 'PointND', (['(2)', '[1, 1]'], {}), '(2, [1, 1])\n', (22023, 22034), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((22036, 22054), 'trackpy.linking.PointND', 'PointND', (['(2)', '[2, 1]'], {}), '(2, [2, 1])\n', (22043, 22054), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((22095, 22113), 'trackpy.linking.PointND', 'PointND', (['(3)', '[1, 1]'], {}), '(3, [1, 1])\n', (22102, 22113), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((22115, 22133), 'trackpy.linking.PointND', 'PointND', (['(3)', '[2, 1]'], {}), '(3, [2, 1])\n', (22122, 22133), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((22135, 22153), 'trackpy.linking.PointND', 'PointND', (['(3)', '[4, 1]'], {}), '(3, [4, 1])\n', (22142, 22153), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((23071, 23083), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (23080, 23083), True, 'import numpy as np\n'), ((23090, 23100), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (23097, 23100), True, 'import numpy as np\n'), ((23111, 23123), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (23120, 23123), True, 'import numpy as np\n'), ((23154, 23169), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (23163, 23169), True, 'import numpy as np\n'), ((23205, 23220), 'numpy.arange', 'np.arange', (['(1)', 'N'], {}), '(1, N)\n', (23214, 23220), True, 'import numpy as np\n'), ((23396, 23418), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (23404, 23418), True, 'import numpy as np\n'), ((23420, 23434), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (23427, 23434), True, 'import numpy as np\n'), ((25211, 25229), 'numpy.arange', 'np.arange', (['p_count'], {}), '(p_count)\n', (25220, 25229), True, 'import numpy as np\n'), ((28350, 28362), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (28359, 28362), True, 'import numpy as np\n'), ((28369, 28379), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (28376, 28379), True, 'import numpy as np\n'), ((28390, 28402), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (28399, 28402), True, 'import numpy as np\n'), ((3353, 3367), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (3360, 3367), True, 'import numpy as np\n'), ((4854, 4868), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (4861, 4868), True, 'import numpy as np\n'), ((7341, 7356), 'numpy.arange', 'np.arange', (['i', 'N'], {}), '(i, N)\n', (7350, 7356), True, 'import numpy as np\n'), ((7524, 7538), 'numpy.ones', 'np.ones', (['(N - i)'], {}), '(N - i)\n', (7531, 7538), True, 'import numpy as np\n'), ((8066, 8078), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (8075, 8078), True, 'import numpy as np\n'), ((10656, 10674), 'trackpy.linking.PointND', 'PointND', (['j', '(j, k)'], {}), '(j, (j, k))\n', (10663, 10674), False, 'from trackpy.linking import PointND, link, Hash_table\n'), ((14510, 14524), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (14517, 14524), True, 'import numpy as np\n'), ((16545, 16559), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (16552, 16559), True, 'import numpy as np\n'), ((19092, 19107), 'numpy.arange', 'np.arange', (['i', 'N'], {}), '(i, N)\n', (19101, 19107), True, 'import numpy as np\n'), ((19275, 19289), 'numpy.ones', 'np.ones', (['(N - i)'], {}), '(N - i)\n', (19282, 19289), True, 'import numpy as np\n'), ((23180, 23194), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (23187, 23194), True, 'import numpy as np\n'), ((25264, 25299), 'trackpy.linking.PointND', 'PointND', (['(k // 2)', '(j, k + j * shift)'], {}), '(k // 2, (j, k + j * shift))\n', (25271, 25299), False, 'from trackpy.linking import PointND, link, Hash_table\n')] |
import pandas as pd
import sys
from typing import List
import numpy as np
def csv_to_arff(csv_file_name: str, arff_file_name: str, title: str) -> None:
"""Writes the arff file from the csv entered"""
data_frame = pd.read_csv(csv_file_name)
arff_list = df_to_arff(data_frame, title)
with open(arff_file_name, "w") as arrf_file:
for line in arff_list:
arrf_file.write(f"{line}\n")
def df_to_arff(data_frame: pd.DataFrame, title: str) -> List[str]:
header = df_to_arff_header(data_frame, title)
header.append("")
data = df_to_arff_data(data_frame)
for instance in data:
header.append(instance)
return header
def df_to_arff_data(df: pd.DataFrame) -> List[str]:
"""Returns the lists of lines corresponding to the data part of the
arff file"""
result = ["@data"]
for row in range(df.shape[0]):
instance = df.iloc[row]
list_instance = instance.tolist()
str_list_instance = list(map(str, list_instance))
str_instance = ",".join(str_list_instance)
result.append(str_instance)
return result
def df_to_arff_header(df: pd.DataFrame, title: str) -> List[str]:
"""Returns the list of lines corresponding to the header part of the
arff file"""
result = [f"@relation {title}", ""]
attributes = df.columns
types = df.dtypes
for att, type in zip(attributes, types):
unique_values_att: any
if np.issubdtype(type, np.number):
unique_values_att = "NUMERICAL"
elif "datetime64" == type:
unique_values_att = "DATE-TIME"
else:
unique_values_att = set(df[att].tolist())
result.append(f"@attribute {att} {unique_values_att.__str__()}")
return result
if '__main__' == __name__:
csv_file_name = sys.argv[1]
arrf_file_name = sys.argv[2]
title = sys.argv[3]
csv_to_arff(csv_file_name, arrf_file_name, title)
| [
"pandas.read_csv",
"numpy.issubdtype"
] | [((223, 249), 'pandas.read_csv', 'pd.read_csv', (['csv_file_name'], {}), '(csv_file_name)\n', (234, 249), True, 'import pandas as pd\n'), ((1446, 1476), 'numpy.issubdtype', 'np.issubdtype', (['type', 'np.number'], {}), '(type, np.number)\n', (1459, 1476), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch import autograd
from torch import jit
import math
import pdb
class NBeatsNet(nn.Module):
SEASONALITY_BLOCK = 'seasonality'
TREND_BLOCK = 'trend'
GENERIC_BLOCK = 'generic'
def __init__(self,
stack_types=(TREND_BLOCK, SEASONALITY_BLOCK),
nb_blocks_per_stack=1,
target_size=5,
input_size=10,
thetas_dims=(4, 8),
share_weights_in_stack=False,
hidden_layer_units=17,
classes=[],
model_type='alpha'):
super(NBeatsNet, self).__init__()
self.classes = classes
self.leads = []
self.target_size = target_size
self.input_size = input_size
self.hidden_layer_units = hidden_layer_units
self.nb_blocks_per_stack = nb_blocks_per_stack
self.share_weights_in_stack = share_weights_in_stack
self.stack_types = stack_types
self.stacks = []
self.thetas_dim = thetas_dims
self.parameters = []
if model_type == 'alpha':
linear_input_size = 353 * input_size
else:
self.linea_multiplier = input_size
if input_size > 6:
self.linea_multiplier = 6
linear_input_size = input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.linea_multiplier
self.fc_linear = nn.Linear(353 * len(classes), len(classes))
print(f'| N-Beats')
for stack_id in range(len(self.stack_types)):
self.stacks.append(self.create_stack(stack_id))
self.parameters = nn.ParameterList(self.parameters)
def create_stack(self, stack_id):
stack_type = self.stack_types[stack_id]
print(f'| -- Stack {stack_type.title()} (#{stack_id}) (share_weights_in_stack={self.share_weights_in_stack})')
blocks = []
for block_id in range(self.nb_blocks_per_stack):
block_init = NBeatsNet.select_block(stack_type)
if self.share_weights_in_stack and block_id != 0:
block = blocks[-1] # pick up the last one when we share weights.
else:
block = block_init(self.hidden_layer_units, self.thetas_dim[stack_id], self.input_size,
self.target_size, classes=len(self.classes))
self.parameters.extend(block.parameters())
print(f' | -- {block}')
blocks.append(block)
return blocks
@staticmethod
def select_block(block_type):
return GenericBlock
def forward(self, backcast):
forecast = torch.zeros(size=backcast.shape).cuda()
for stack_id in range(len(self.stacks)):
for block_id in range(len(self.stacks[stack_id])):
b, f = self.stacks[stack_id][block_id](backcast)
backcast = backcast - b
forecast = forecast + f
return backcast, forecast
def linspace(backcast_length, forecast_length):
lin_space = np.linspace(-backcast_length, forecast_length, backcast_length + forecast_length)
b_ls = lin_space[:backcast_length]
f_ls = lin_space[backcast_length:]
return b_ls, f_ls
class Block(nn.Module):
def __init__(self, units, thetas_dim, backcast_length=10, forecast_length=5, share_thetas=False, classes=16):
super(Block, self).__init__()
self.units = units
self.thetas_dim = thetas_dim
self.backcast_length = backcast_length
self.forecast_length = forecast_length
self.share_thetas = share_thetas
self.fc1 = nn.Linear(backcast_length, units)
self.fc2 = nn.Linear(units, units)
self.fc3 = nn.Linear(units, units)
self.fc4 = nn.Linear(units, units)
self.backcast_linspace, self.forecast_linspace = linspace(backcast_length, forecast_length)
self.classes = classes
if share_thetas:
self.theta_f_fc = self.theta_b_fc = nn.Linear(units, thetas_dim)
else:
self.theta_b_fc = nn.Linear(units, thetas_dim)
self.theta_f_fc = nn.Linear(units, thetas_dim)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
return x
def __str__(self):
block_type = type(self).__name__
return f'{block_type}(units={self.units}, thetas_dim={self.thetas_dim}, ' \
f'backcast_length={self.backcast_length}, forecast_length={self.forecast_length}, ' \
f'share_thetas={self.share_thetas}) at @{id(self)}'
class GenericBlock(Block):
def __init__(self, units, thetas_dim, backcast_length=10, forecast_length=5, classes=16):
super(GenericBlock, self).__init__(units, thetas_dim, backcast_length, forecast_length, classes=classes)
self.backcast_fc = nn.Linear(thetas_dim, backcast_length)
self.forecast_fc = nn.Linear(thetas_dim, backcast_length) # forecast_length)
def forward(self, x):
x = super(GenericBlock, self).forward(x)
theta_b = F.relu(self.theta_b_fc(x))
theta_f = F.relu(self.theta_f_fc(x)) # tutaj masz thetas_dim rozmiar
backcast = self.backcast_fc(theta_b) # generic. 3.3.
forecast = self.forecast_fc(theta_f) # generic. 3.3.
return backcast, forecast
class Nbeats_alpha(nn.Module):
def __init__(self,
input_size,
num_classes,
hidden_size,
num_layers,
seq_length,
classes=[],
model_type='alpha'):
super(Nbeats_alpha, self).__init__()
self.num_classes = num_classes # number of classes
self.num_layers = num_layers # number of layers
self.input_size = input_size # input size
self.hidden_size = hidden_size # hidden state
self.seq_length = seq_length # sequence length
self.model_type = model_type
self.classes = classes
self.relu = nn.ReLU()
self.nbeats_alpha1 = NBeatsNet(stack_types=[NBeatsNet.GENERIC_BLOCK],
nb_blocks_per_stack=self.num_layers,
target_size=num_classes,
input_size=input_size,
thetas_dims=(32, 32),
classes=self.classes,
hidden_layer_units=self.hidden_size)
self.nbeats_alpha2 = NBeatsNet(stack_types=[NBeatsNet.GENERIC_BLOCK],
nb_blocks_per_stack=self.num_layers,
target_size=num_classes,
input_size=input_size,
thetas_dims=(32, 32),
classes=self.classes,
hidden_layer_units=hidden_size)
self.fc_1 = nn.Linear(self.input_size * 541, 128) # hidden_size, 128) # fully connected 1
self.fc = nn.Linear(128, num_classes) # fully connected last layer
def forward(self, rr_x, rr_wavelets):
_, output_alpha1 = self.nbeats_alpha1(rr_x) # lstm with input, hidden, and internal state
_, output_alpha2 = self.nbeats_alpha2(rr_wavelets) # lstm with input, hidden, and internal state
tmp = torch.hstack((output_alpha1, output_alpha2))
tmp = torch.flatten(tmp, start_dim=1)
out = self.fc_1(tmp) # first Dense
out = self.relu(out) # relu
out = self.fc(out) # Final Output
return out
class Nbeats_beta(nn.Module):
def __init__(self,
input_size,
num_classes,
hidden_size,
num_layers,
seq_length,
classes=[],
model_type='beta'):
super(Nbeats_beta, self).__init__()
self.num_classes = num_classes # number of classes
self.num_layers = num_layers # number of layers
self.hidden_size = hidden_size # hidden state
self.seq_length = seq_length # sequence length
self.model_type = model_type
self.classes = classes
self.relu = nn.ReLU()
self.linea_multiplier = input_size
if input_size > 6:
self.linea_multiplier = 6
# self.hidden_size = 1
# self.num_layers = 3
self.input_size = 1
self.nbeats_beta = NBeatsNet(stack_types=[NBeatsNet.GENERIC_BLOCK],
nb_blocks_per_stack=self.num_layers,
target_size=num_classes,
input_size=self.input_size,
thetas_dims=(32, 32),
classes=self.classes,
hidden_layer_units=self.hidden_size)
self.fc = nn.Linear(input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.linea_multiplier,
num_classes) # hidden_size, 128) # fully connected 1# fully connected last layer
def forward(self, pca_features):
_, output_beta = self.nbeats_beta(pca_features) # lstm with input, hidden, and internal state
tmp = torch.squeeze(output_beta)
out = self.relu(tmp) # relu
out = self.fc(out) # Final Output
return out
class LSTM_ECG(nn.Module):
def __init__(self,
input_size,
num_classes,
hidden_size,
num_layers,
seq_length,
model_type='alpha',
classes=[]):
super(LSTM_ECG, self).__init__()
self.num_classes = num_classes # number of classes
self.num_layers = num_layers # number of layers
self.input_size = input_size # input size
self.hidden_size = hidden_size # hidden state
self.seq_length = seq_length # sequence length
self.model_type = model_type
self.classes = classes
self.sigmoid = nn.Sigmoid()
self.when_bidirectional = 1 # if bidirectional = True, then it has to be equal to 2
print(f'| LSTM_ECG')
# self.lstm = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
# The linear layer that maps from hidden state space to tag space
self.lstm_alpha1 = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True, bidirectional=False)
if model_type == 'alpha':
self.lstm_alpha2 = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True, bidirectional=False)
self.fc_1 = nn.Linear(hidden_size * 541, 128) # hidden_size, 128) # fully connected 1
self.fc = nn.Linear(128, num_classes) # fully connected last layer
else:
self.linea_multiplier = input_size
if input_size > 6:
self.linea_multiplier = 6
# self.hidden_size=1
# self.num_layers=1
self.input_size = 1
self.lstm_alpha1 = nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size,
num_layers=self.num_layers, batch_first=True, bidirectional=False)
self.fc = nn.Linear(
input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.linea_multiplier, num_classes)
self.relu = nn.ReLU()
def forward(self, rr_x, rr_wavelets):
if self.model_type == 'alpha':
h_0 = autograd.Variable(
torch.zeros(self.num_layers * self.when_bidirectional, rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
c_0 = autograd.Variable(
torch.zeros(self.num_layers * self.when_bidirectional, rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # internal state
h_1 = autograd.Variable(
torch.zeros(self.num_layers * self.when_bidirectional, rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
c_1 = autograd.Variable(
torch.zeros(self.num_layers * self.when_bidirectional, rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # internal state
output_alpha1, (hn_alpha1, cn) = self.lstm_alpha1(rr_x,
(h_0, c_0)) # lstm with input, hidden, and internal state
output_alpha2, (hn_alpha2, cn) = self.lstm_alpha2(rr_wavelets,
(h_1, c_1)) # lstm with input, hidden, and internal state
tmp = torch.hstack((output_alpha1, output_alpha2))
tmp = torch.flatten(tmp, start_dim=1)
out = self.fc_1(tmp) # first Dense
out = self.relu(out) # relu
out = self.fc(out) # Final Output
return out
else:
h_0 = autograd.Variable(
torch.zeros(self.num_layers * self.when_bidirectional, rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
c_0 = autograd.Variable(
torch.zeros(self.num_layers * self.when_bidirectional, rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # internal state
output_beta, (hn_beta, cn) = self.lstm_alpha1(rr_wavelets, (h_0, c_0))
out = torch.squeeze(output_beta)
out = self.relu(out) # relu
out = self.fc(out) # Final Output
return out
class GRU_ECG_ALPHA(nn.Module):
def __init__(self,
input_size,
num_classes,
hidden_size,
num_layers,
seq_length,
model_type='alpha',
classes=[]):
super(GRU_ECG_ALPHA, self).__init__()
self.num_classes = num_classes # number of classes
self.num_layers = num_layers # number of layers
self.input_size = input_size # input size
self.hidden_size = hidden_size # hidden state
self.seq_length = seq_length # sequence length
self.model_type = model_type
self.classes = classes
self.sigmoid = nn.Sigmoid()
self.when_bidirectional = 1 # if bidirectional = True, then it has to be equal to 2
print(f'| GRU_ECG_ALPHA')
# self.lstm = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
# The linear layer that maps from hidden state space to tag space
self.gru_alpha1 = nn.GRU(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True, bidirectional=False)
self.gru_alpha2 = nn.GRU(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True, bidirectional=False)
self.fc_1 = nn.Linear(hidden_size * 541, 128) # hidden_size, 128) # fully connected 1
self.fc = nn.Linear(128, num_classes) # fully connected last layer
self.relu = nn.ReLU()
def forward(self, rr_x, rr_wavelets):
h_0 = autograd.Variable(torch.zeros(self.num_layers * self.when_bidirectional, rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
h_1 = autograd.Variable(torch.zeros(self.num_layers * self.when_bidirectional, rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
output_alpha1, hn_alpha1 = self.gru_alpha1(rr_x, h_0) # lstm with input, hidden, and internal state
output_alpha2, hn_alpha2 = self.gru_alpha2(rr_wavelets, h_1) # lstm with input, hidden, and internal state
tmp = torch.hstack((output_alpha1, output_alpha2))
tmp = torch.flatten(tmp, start_dim=1)
out = self.fc_1(tmp) # first Dense
out = self.relu(out) # relu
out = self.fc(out) # Final Output
return out
class GRU_ECG_BETA(nn.Module):
def __init__(self,
input_size,
num_classes,
hidden_size,
num_layers,
seq_length,
model_type='alpha',
classes=[]):
super(GRU_ECG_BETA, self).__init__()
self.num_classes = num_classes # number of classes
self.num_layers = num_layers # number of layers
self.input_size = input_size # input size
self.hidden_size = hidden_size # hidden state
self.seq_length = seq_length # sequence length
self.model_type = model_type
self.classes = classes
self.sigmoid = nn.Sigmoid()
self.when_bidirectional = 1 # if bidirectional = True, then it has to be equal to 2
print(f'| GRU_ECG_BETA')
# self.lstm = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
# The linear layer that maps from hidden state space to tag space
self.linea_multiplier = input_size
if input_size > 6:
self.linea_multiplier = 6
# self.hidden_size=1
# self.num_layers=1
self.input_size = 1
self.gru_beta = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size,
num_layers=self.num_layers, batch_first=True, bidirectional=False)
# self.fc_1 = nn.Linear(hidden_size, 1)
self.fc = nn.Linear(
(input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.linea_multiplier) * hidden_size,
num_classes)
self.relu = nn.ReLU()
def forward(self, pca_features):
h_0 = autograd.Variable(
torch.zeros(self.num_layers * self.when_bidirectional, pca_features.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
output_beta, hn_beta = self.gru_beta(pca_features, h_0)
# out = torch.squeeze(output_beta)
out = torch.flatten(output_beta, start_dim=1)
out = self.relu(out) # relua
# out = self.fc_1(out)
out = self.fc(out) # Final Output
return out
class JitLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size):
super(JitLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = nn.Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.Tensor(4 * hidden_size, hidden_size))
self.bias_ih = nn.Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = nn.Parameter(torch.Tensor(4 * hidden_size))
self.weight_ch_i = nn.Parameter(torch.Tensor(hidden_size))
self.weight_ch_f = nn.Parameter(torch.Tensor(hidden_size))
self.weight_ch_o = nn.Parameter(torch.Tensor(hidden_size))
self.reset_parameter()
def reset_parameter(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def forward(self, input, state):
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
batch_size, sequence_size, input_size = input.size()
hidden_seq = []
hx, cx = state
for t in range(sequence_size):
inp = input[:, t, :]
xh = (torch.mm(inp, self.weight_ih.t()) + self.bias_ih + torch.mm(hx, self.weight_hh.t()) + self.bias_hh)
i, f, _c, o = xh.chunk(4, 1)
i = torch.sigmoid(i + (self.weight_ch_i * cx))
f = torch.sigmoid(f + (self.weight_ch_f * cx))
_c = torch.tanh(_c)
cy = (f * cx) + (i * _c)
o = torch.sigmoid(o + (self.weight_ch_o * cy))
hy = o * torch.tanh(cy)
hidden_seq.append(hy.unsqueeze(0))
hidden_seq = torch.cat(hidden_seq, dim=0)
hidden_seq = hidden_seq.transpose(0, 1).contiguous()
return hidden_seq, (hx, cx)
class LSTMPeephole_ALPHA(nn.Module):
def __init__(self,
input_size,
num_classes,
hidden_size,
num_layers,
seq_length,
model_type='alpha',
classes=[]):
super(LSTMPeephole_ALPHA, self).__init__()
self.num_classes = num_classes # number of classes
self.num_layers = num_layers # number of layers
self.input_size = input_size # input size
self.hidden_size = hidden_size # hidden state
self.seq_length = seq_length # sequence length
self.model_type = model_type
self.classes = classes
self.sigmoid = nn.Sigmoid()
self.when_bidirectional = 1 # if bidirectional = True, then it has to be equal to 2
print(f'| LSTM_PEEPHOLE_ALPHA')
# self.lstm = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
# The linear layer that maps from hidden state space to tag space
self.lstmpeephole_alpha1 = JitLSTMCell(input_size=input_size, hidden_size=hidden_size)
self.lstmpeephole_alpha2 = JitLSTMCell(input_size=input_size, hidden_size=hidden_size)
self.fc_1 = nn.Linear(hidden_size * 541, 128) # hidden_size, 128) # fully connected 1
self.fc = nn.Linear(128, num_classes) # fully connected last layer
self.relu = nn.ReLU()
def forward(self, rr_x, rr_wavelets):
h_0 = autograd.Variable(torch.zeros(rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
c_0 = autograd.Variable(torch.zeros(rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # internal state
h_1 = autograd.Variable(torch.zeros(rr_wavelets.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
c_1 = autograd.Variable(torch.zeros(rr_wavelets.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # internal state
oa1, _ = self.lstmpeephole_alpha1(rr_x,
(h_0, c_0)) # lstm with input, hidden, and internal state
oa2, _ = self.lstmpeephole_alpha2(rr_wavelets,
(h_1, c_1)) # lstm with input, hidden, and internal state
tmp = torch.hstack((oa1, oa2))
tmp = torch.flatten(tmp, start_dim=1)
out = self.fc_1(tmp) # first Dense
del tmp, h_0, c_0, h_1, c_1
out = self.relu(out) # relu
out = self.fc(out) # Final Output
return out
class LSTMPeephole_BETA(nn.Module):
def __init__(self,
input_size,
num_classes,
hidden_size,
num_layers,
seq_length,
model_type='beta',
classes=[]):
super(LSTMPeephole_BETA, self).__init__()
self.num_classes = num_classes # number of classes
self.num_layers = num_layers # number of layers
self.input_size = input_size # input size
self.hidden_size = hidden_size # hidden state
self.seq_length = seq_length # sequence length
self.model_type = model_type
self.classes = classes
self.sigmoid = nn.Sigmoid()
self.when_bidirectional = 1 # if bidirectional = True, then it has to be equal to 2
print(f'| LSTM_PEEPHOLE_BETA')
self.linea_multiplier = input_size
if input_size > 6:
self.linea_multiplier = 6
# self.hidden_size=1
# self.num_layers=1
self.input_size = 1
# self.lstm = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
# The linear layer that maps from hidden state space to tag space
self.lstmpeephole_beta = JitLSTMCell(input_size=self.input_size, hidden_size=hidden_size)
self.fc = nn.Linear(
(input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.linea_multiplier) * hidden_size,
num_classes)
self.relu = nn.ReLU()
def forward(self, pca_features):
h_0 = autograd.Variable(torch.zeros(pca_features.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
c_0 = autograd.Variable(torch.zeros(pca_features.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
oa1, _ = self.lstmpeephole_beta(pca_features,
(h_0, c_0)) # lstm with input, hidden, and internal state
out = torch.flatten(oa1, start_dim=1)
out = self.relu(out) # relu
out = self.fc(out) # Final Output
return out
class CustomLSTMPeephole(nn.Module):
def __init__(self, input_size, hidden_size, peephole=True):
super().__init__()
self.input_sz = input_size
self.hidden_size = hidden_size
self.peephole = peephole
self.W = nn.Parameter(torch.Tensor(input_size, hidden_size * 4))
self.U = nn.Parameter(torch.Tensor(hidden_size, hidden_size * 4))
self.bias = nn.Parameter(torch.Tensor(hidden_size * 4))
self.init_weights()
def init_weights(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, x,
init_states):
"""Assumes x is of shape (batch, sequence, feature)"""
bs, seq_sz, _ = x.size()
hidden_seq = []
h_t, c_t = init_states
HS = self.hidden_size
for t in range(seq_sz):
x_t = x[:, t, :]
# batch the computations into a single matrix multiplication
if self.peephole:
gates = (torch.mm(x_t, self.U) + torch.mm(c_t, self.W) + self.bias)
else:
gates = x_t @ self.U + h_t @ self.W + self.bias
g_t = torch.tanh(gates[:, HS * 2:HS * 3])
i_t, f_t, o_t = (
torch.sigmoid(gates[:, :HS]), # input
torch.sigmoid(gates[:, HS:HS * 2]), # forget
torch.sigmoid(gates[:, HS * 3:]), # output
)
if self.peephole:
c_t = f_t * c_t + i_t * torch.sigmoid(x_t @ self.U + self.bias)[:, HS * 2:HS * 3]
h_t = torch.tanh(o_t * c_t)
else:
c_t = f_t * c_t + i_t * g_t
h_t = o_t * torch.tanh(c_t)
hidden_seq.append(h_t.unsqueeze(0))
hidden_seq = torch.cat(hidden_seq, dim=0)
# reshape from shape (sequence, batch, feature) to (batch, sequence, feature)
hidden_seq = hidden_seq.transpose(0, 1).contiguous()
return hidden_seq, (h_t, c_t)
class CustomLSTMPeephole_ALPHA(nn.Module):
def __init__(self,
input_size,
num_classes,
hidden_size,
num_layers,
seq_length,
model_type='alpha',
classes=[]):
super(CustomLSTMPeephole_ALPHA, self).__init__()
self.num_classes = num_classes # number of classes
self.num_layers = num_layers # number of layers
self.input_size = input_size # input size
self.hidden_size = hidden_size # hidden state
self.seq_length = seq_length # sequence length
self.model_type = model_type
self.classes = classes
self.sigmoid = nn.Sigmoid()
self.when_bidirectional = 1 # if bidirectional = True, then it has to be equal to 2
print(f'| LSTM_PEEPHOLE_ALPHA')
# self.lstm = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
# The linear layer that maps from hidden state space to tag space
self.lstmpeephole_alpha1 = CustomLSTMPeephole(input_size=input_size, hidden_size=hidden_size)
self.lstmpeephole_alpha2 = CustomLSTMPeephole(input_size=input_size, hidden_size=hidden_size)
self.fc_1 = nn.Linear(hidden_size * 541, 128) # hidden_size, 128) # fully connected 1
self.fc = nn.Linear(128, num_classes) # fully connected last layer
self.relu = nn.ReLU()
def forward(self, rr_x, rr_wavelets):
h_0 = autograd.Variable(torch.zeros(rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
c_0 = autograd.Variable(torch.zeros(rr_x.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # internal state
h_1 = autograd.Variable(torch.zeros(rr_wavelets.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
c_1 = autograd.Variable(torch.zeros(rr_wavelets.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # internal state
output = []
oa1, (h_0, c_0) = self.lstmpeephole_alpha1(rr_x, (h_0, c_0)) # lstm with input, hidden, and internal state
oa2, (h_1, c_1) = self.lstmpeephole_alpha2(rr_wavelets, (h_1, c_1)) # lstm with input, hidden, and internal state
tmp = torch.hstack((oa1, oa2))
tmp = torch.flatten(tmp, start_dim=1)
out = self.fc_1(tmp) # first Dense
out = self.relu(out) # relu
out = self.fc(out) # Final Output
return out
class CustomLSTMPeephole_BETA(nn.Module):
def __init__(self,
input_size,
num_classes,
hidden_size,
num_layers,
seq_length,
model_type='beta',
classes=[]):
super(CustomLSTMPeephole_BETA, self).__init__()
self.num_classes = num_classes # number of classes
self.num_layers = num_layers # number of layers
self.input_size = input_size # input size
self.hidden_size = hidden_size # hidden state
self.seq_length = seq_length # sequence length
self.model_type = model_type
self.classes = classes
self.sigmoid = nn.Sigmoid()
self.when_bidirectional = 1 # if bidirectional = True, then it has to be equal to 2
print(f'| LSTM_PEEPHOLE_BETA')
self.linea_multiplier = input_size
if input_size > 6:
self.linea_multiplier = 6
#self.hidden_size=1
#self.num_layers=1
self.input_size = 1
self.lstmpeephole_beta = CustomLSTMPeephole(input_size=self.input_size, hidden_size=self.hidden_size)
self.fc = nn.Linear(
(input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.linea_multiplier) * self.hidden_size,
num_classes)
self.relu = nn.ReLU()
def forward(self, pca_features):
h_0 = autograd.Variable(torch.zeros(pca_features.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
c_0 = autograd.Variable(torch.zeros(pca_features.size(0), self.hidden_size,
device=torch.device('cuda:0'))) # hidden state
oa1, (h_0, c_0) = self.lstmpeephole_beta(pca_features, (h_0, c_0)) # lstm with input, hidden, and internal state
out = torch.flatten(oa1, start_dim=1)
out = self.relu(out) # relu
out = self.fc(out) # Final Output
return out
class BlendMLP(nn.Module):
def __init__(self, modelA, modelB, classes):
super(BlendMLP, self).__init__()
self.modelA = modelA
self.modelB = modelB
self.classes = classes
self.linear = nn.Linear(2 * len(classes), len(classes))
def forward(self, rr_x, rr_wavelets, pca_features):
x1 = self.modelA(rr_x, rr_wavelets)
# x2 = self.modelB(rr_x, pca_features) # FOR LSTM
x2 = self.modelB(pca_features) # FOR NBEATS and GRU
if x1.shape == x2.shape:
out = torch.cat((x1, x2), dim=1)
out = self.linear(F.relu(out))
return out
else:
return x1
| [
"torch.nn.init.uniform_",
"torch.cat",
"torch.mm",
"torch.device",
"torch.flatten",
"torch.hstack",
"torch.squeeze",
"torch.Tensor",
"torch.nn.ParameterList",
"numpy.linspace",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.functional.relu",
"torch.nn.LSTM",
"torch.nn.GRU",
"math.sqrt",
... | [((3130, 3215), 'numpy.linspace', 'np.linspace', (['(-backcast_length)', 'forecast_length', '(backcast_length + forecast_length)'], {}), '(-backcast_length, forecast_length, backcast_length +\n forecast_length)\n', (3141, 3215), True, 'import numpy as np\n'), ((1720, 1753), 'torch.nn.ParameterList', 'nn.ParameterList', (['self.parameters'], {}), '(self.parameters)\n', (1736, 1753), False, 'from torch import nn\n'), ((3708, 3741), 'torch.nn.Linear', 'nn.Linear', (['backcast_length', 'units'], {}), '(backcast_length, units)\n', (3717, 3741), False, 'from torch import nn\n'), ((3761, 3784), 'torch.nn.Linear', 'nn.Linear', (['units', 'units'], {}), '(units, units)\n', (3770, 3784), False, 'from torch import nn\n'), ((3804, 3827), 'torch.nn.Linear', 'nn.Linear', (['units', 'units'], {}), '(units, units)\n', (3813, 3827), False, 'from torch import nn\n'), ((3847, 3870), 'torch.nn.Linear', 'nn.Linear', (['units', 'units'], {}), '(units, units)\n', (3856, 3870), False, 'from torch import nn\n'), ((4991, 5029), 'torch.nn.Linear', 'nn.Linear', (['thetas_dim', 'backcast_length'], {}), '(thetas_dim, backcast_length)\n', (5000, 5029), False, 'from torch import nn\n'), ((5057, 5095), 'torch.nn.Linear', 'nn.Linear', (['thetas_dim', 'backcast_length'], {}), '(thetas_dim, backcast_length)\n', (5066, 5095), False, 'from torch import nn\n'), ((6159, 6168), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6166, 6168), False, 'from torch import nn\n'), ((7143, 7180), 'torch.nn.Linear', 'nn.Linear', (['(self.input_size * 541)', '(128)'], {}), '(self.input_size * 541, 128)\n', (7152, 7180), False, 'from torch import nn\n'), ((7241, 7268), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'num_classes'], {}), '(128, num_classes)\n', (7250, 7268), False, 'from torch import nn\n'), ((7562, 7606), 'torch.hstack', 'torch.hstack', (['(output_alpha1, output_alpha2)'], {}), '((output_alpha1, output_alpha2))\n', (7574, 7606), False, 'import torch\n'), ((7621, 7652), 'torch.flatten', 'torch.flatten', (['tmp'], {'start_dim': '(1)'}), '(tmp, start_dim=1)\n', (7634, 7652), False, 'import torch\n'), ((8426, 8435), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8433, 8435), False, 'from torch import nn\n'), ((9123, 9239), 'torch.nn.Linear', 'nn.Linear', (['(input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.\n linea_multiplier)', 'num_classes'], {}), '(input_size * self.linea_multiplier + 363 * self.linea_multiplier +\n self.linea_multiplier, num_classes)\n', (9132, 9239), False, 'from torch import nn\n'), ((9490, 9516), 'torch.squeeze', 'torch.squeeze', (['output_beta'], {}), '(output_beta)\n', (9503, 9516), False, 'import torch\n'), ((10294, 10306), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (10304, 10306), False, 'from torch import nn\n'), ((10624, 10746), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'batch_first': '(True)', 'bidirectional': '(False)'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=\n num_layers, batch_first=True, bidirectional=False)\n', (10631, 10746), False, 'from torch import nn\n'), ((11787, 11796), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11794, 11796), False, 'from torch import nn\n'), ((14779, 14791), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (14789, 14791), False, 'from torch import nn\n'), ((15113, 15234), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'batch_first': '(True)', 'bidirectional': '(False)'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=\n num_layers, batch_first=True, bidirectional=False)\n', (15119, 15234), False, 'from torch import nn\n'), ((15289, 15410), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'batch_first': '(True)', 'bidirectional': '(False)'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=\n num_layers, batch_first=True, bidirectional=False)\n', (15295, 15410), False, 'from torch import nn\n'), ((15460, 15493), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * 541)', '(128)'], {}), '(hidden_size * 541, 128)\n', (15469, 15493), False, 'from torch import nn\n'), ((15554, 15581), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'num_classes'], {}), '(128, num_classes)\n', (15563, 15581), False, 'from torch import nn\n'), ((15632, 15641), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15639, 15641), False, 'from torch import nn\n'), ((16347, 16391), 'torch.hstack', 'torch.hstack', (['(output_alpha1, output_alpha2)'], {}), '((output_alpha1, output_alpha2))\n', (16359, 16391), False, 'import torch\n'), ((16406, 16437), 'torch.flatten', 'torch.flatten', (['tmp'], {'start_dim': '(1)'}), '(tmp, start_dim=1)\n', (16419, 16437), False, 'import torch\n'), ((17268, 17280), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (17278, 17280), False, 'from torch import nn\n'), ((17792, 17928), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'self.input_size', 'hidden_size': 'self.hidden_size', 'num_layers': 'self.num_layers', 'batch_first': '(True)', 'bidirectional': '(False)'}), '(input_size=self.input_size, hidden_size=self.hidden_size, num_layers\n =self.num_layers, batch_first=True, bidirectional=False)\n', (17798, 17928), False, 'from torch import nn\n'), ((18021, 18153), 'torch.nn.Linear', 'nn.Linear', (['((input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.\n linea_multiplier) * hidden_size)', 'num_classes'], {}), '((input_size * self.linea_multiplier + 363 * self.linea_multiplier +\n self.linea_multiplier) * hidden_size, num_classes)\n', (18030, 18153), False, 'from torch import nn\n'), ((18195, 18204), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (18202, 18204), False, 'from torch import nn\n'), ((18577, 18616), 'torch.flatten', 'torch.flatten', (['output_beta'], {'start_dim': '(1)'}), '(output_beta, start_dim=1)\n', (18590, 18616), False, 'import torch\n'), ((20476, 20504), 'torch.cat', 'torch.cat', (['hidden_seq'], {'dim': '(0)'}), '(hidden_seq, dim=0)\n', (20485, 20504), False, 'import torch\n'), ((21301, 21313), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (21311, 21313), False, 'from torch import nn\n'), ((21827, 21860), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * 541)', '(128)'], {}), '(hidden_size * 541, 128)\n', (21836, 21860), False, 'from torch import nn\n'), ((21921, 21948), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'num_classes'], {}), '(128, num_classes)\n', (21930, 21948), False, 'from torch import nn\n'), ((21999, 22008), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (22006, 22008), False, 'from torch import nn\n'), ((23082, 23106), 'torch.hstack', 'torch.hstack', (['(oa1, oa2)'], {}), '((oa1, oa2))\n', (23094, 23106), False, 'import torch\n'), ((23121, 23152), 'torch.flatten', 'torch.flatten', (['tmp'], {'start_dim': '(1)'}), '(tmp, start_dim=1)\n', (23134, 23152), False, 'import torch\n'), ((24027, 24039), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (24037, 24039), False, 'from torch import nn\n'), ((24652, 24784), 'torch.nn.Linear', 'nn.Linear', (['((input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.\n linea_multiplier) * hidden_size)', 'num_classes'], {}), '((input_size * self.linea_multiplier + 363 * self.linea_multiplier +\n self.linea_multiplier) * hidden_size, num_classes)\n', (24661, 24784), False, 'from torch import nn\n'), ((24826, 24835), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (24833, 24835), False, 'from torch import nn\n'), ((25407, 25438), 'torch.flatten', 'torch.flatten', (['oa1'], {'start_dim': '(1)'}), '(oa1, start_dim=1)\n', (25420, 25438), False, 'import torch\n'), ((27379, 27407), 'torch.cat', 'torch.cat', (['hidden_seq'], {'dim': '(0)'}), '(hidden_seq, dim=0)\n', (27388, 27407), False, 'import torch\n'), ((28305, 28317), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (28315, 28317), False, 'from torch import nn\n'), ((28845, 28878), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * 541)', '(128)'], {}), '(hidden_size * 541, 128)\n', (28854, 28878), False, 'from torch import nn\n'), ((28939, 28966), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'num_classes'], {}), '(128, num_classes)\n', (28948, 28966), False, 'from torch import nn\n'), ((29017, 29026), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (29024, 29026), False, 'from torch import nn\n'), ((30035, 30059), 'torch.hstack', 'torch.hstack', (['(oa1, oa2)'], {}), '((oa1, oa2))\n', (30047, 30059), False, 'import torch\n'), ((30074, 30105), 'torch.flatten', 'torch.flatten', (['tmp'], {'start_dim': '(1)'}), '(tmp, start_dim=1)\n', (30087, 30105), False, 'import torch\n'), ((30958, 30970), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (30968, 30970), False, 'from torch import nn\n'), ((31426, 31563), 'torch.nn.Linear', 'nn.Linear', (['((input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.\n linea_multiplier) * self.hidden_size)', 'num_classes'], {}), '((input_size * self.linea_multiplier + 363 * self.linea_multiplier +\n self.linea_multiplier) * self.hidden_size, num_classes)\n', (31435, 31563), False, 'from torch import nn\n'), ((31605, 31614), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (31612, 31614), False, 'from torch import nn\n'), ((32144, 32175), 'torch.flatten', 'torch.flatten', (['oa1'], {'start_dim': '(1)'}), '(oa1, start_dim=1)\n', (32157, 32175), False, 'import torch\n'), ((4076, 4104), 'torch.nn.Linear', 'nn.Linear', (['units', 'thetas_dim'], {}), '(units, thetas_dim)\n', (4085, 4104), False, 'from torch import nn\n'), ((4149, 4177), 'torch.nn.Linear', 'nn.Linear', (['units', 'thetas_dim'], {}), '(units, thetas_dim)\n', (4158, 4177), False, 'from torch import nn\n'), ((4208, 4236), 'torch.nn.Linear', 'nn.Linear', (['units', 'thetas_dim'], {}), '(units, thetas_dim)\n', (4217, 4236), False, 'from torch import nn\n'), ((10842, 10964), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'batch_first': '(True)', 'bidirectional': '(False)'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=\n num_layers, batch_first=True, bidirectional=False)\n', (10849, 10964), False, 'from torch import nn\n'), ((11024, 11057), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * 541)', '(128)'], {}), '(hidden_size * 541, 128)\n', (11033, 11057), False, 'from torch import nn\n'), ((11122, 11149), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'num_classes'], {}), '(128, num_classes)\n', (11131, 11149), False, 'from torch import nn\n'), ((11442, 11578), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'self.input_size', 'hidden_size': 'self.hidden_size', 'num_layers': 'self.num_layers', 'batch_first': '(True)', 'bidirectional': '(False)'}), '(input_size=self.input_size, hidden_size=self.hidden_size,\n num_layers=self.num_layers, batch_first=True, bidirectional=False)\n', (11449, 11578), False, 'from torch import nn\n'), ((11636, 11752), 'torch.nn.Linear', 'nn.Linear', (['(input_size * self.linea_multiplier + 363 * self.linea_multiplier + self.\n linea_multiplier)', 'num_classes'], {}), '(input_size * self.linea_multiplier + 363 * self.linea_multiplier +\n self.linea_multiplier, num_classes)\n', (11645, 11752), False, 'from torch import nn\n'), ((13151, 13195), 'torch.hstack', 'torch.hstack', (['(output_alpha1, output_alpha2)'], {}), '((output_alpha1, output_alpha2))\n', (13163, 13195), False, 'import torch\n'), ((13214, 13245), 'torch.flatten', 'torch.flatten', (['tmp'], {'start_dim': '(1)'}), '(tmp, start_dim=1)\n', (13227, 13245), False, 'import torch\n'), ((13957, 13983), 'torch.squeeze', 'torch.squeeze', (['output_beta'], {}), '(output_beta)\n', (13970, 13983), False, 'import torch\n'), ((18990, 19031), 'torch.Tensor', 'torch.Tensor', (['(4 * hidden_size)', 'input_size'], {}), '(4 * hidden_size, input_size)\n', (19002, 19031), False, 'import torch\n'), ((19071, 19113), 'torch.Tensor', 'torch.Tensor', (['(4 * hidden_size)', 'hidden_size'], {}), '(4 * hidden_size, hidden_size)\n', (19083, 19113), False, 'import torch\n'), ((19152, 19181), 'torch.Tensor', 'torch.Tensor', (['(4 * hidden_size)'], {}), '(4 * hidden_size)\n', (19164, 19181), False, 'import torch\n'), ((19219, 19248), 'torch.Tensor', 'torch.Tensor', (['(4 * hidden_size)'], {}), '(4 * hidden_size)\n', (19231, 19248), False, 'import torch\n'), ((19291, 19316), 'torch.Tensor', 'torch.Tensor', (['hidden_size'], {}), '(hidden_size)\n', (19303, 19316), False, 'import torch\n'), ((19358, 19383), 'torch.Tensor', 'torch.Tensor', (['hidden_size'], {}), '(hidden_size)\n', (19370, 19383), False, 'import torch\n'), ((19425, 19450), 'torch.Tensor', 'torch.Tensor', (['hidden_size'], {}), '(hidden_size)\n', (19437, 19450), False, 'import torch\n'), ((19537, 19564), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (19546, 19564), False, 'import math\n'), ((19618, 19655), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['weight', '(-stdv)', 'stdv'], {}), '(weight, -stdv, stdv)\n', (19634, 19655), False, 'from torch import nn\n'), ((20139, 20179), 'torch.sigmoid', 'torch.sigmoid', (['(i + self.weight_ch_i * cx)'], {}), '(i + self.weight_ch_i * cx)\n', (20152, 20179), False, 'import torch\n'), ((20198, 20238), 'torch.sigmoid', 'torch.sigmoid', (['(f + self.weight_ch_f * cx)'], {}), '(f + self.weight_ch_f * cx)\n', (20211, 20238), False, 'import torch\n'), ((20258, 20272), 'torch.tanh', 'torch.tanh', (['_c'], {}), '(_c)\n', (20268, 20272), False, 'import torch\n'), ((20328, 20368), 'torch.sigmoid', 'torch.sigmoid', (['(o + self.weight_ch_o * cy)'], {}), '(o + self.weight_ch_o * cy)\n', (20341, 20368), False, 'import torch\n'), ((25805, 25846), 'torch.Tensor', 'torch.Tensor', (['input_size', '(hidden_size * 4)'], {}), '(input_size, hidden_size * 4)\n', (25817, 25846), False, 'import torch\n'), ((25878, 25920), 'torch.Tensor', 'torch.Tensor', (['hidden_size', '(hidden_size * 4)'], {}), '(hidden_size, hidden_size * 4)\n', (25890, 25920), False, 'import torch\n'), ((25955, 25984), 'torch.Tensor', 'torch.Tensor', (['(hidden_size * 4)'], {}), '(hidden_size * 4)\n', (25967, 25984), False, 'import torch\n'), ((26064, 26091), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (26073, 26091), False, 'import math\n'), ((32818, 32844), 'torch.cat', 'torch.cat', (['(x1, x2)'], {'dim': '(1)'}), '((x1, x2), dim=1)\n', (32827, 32844), False, 'import torch\n'), ((2732, 2764), 'torch.zeros', 'torch.zeros', ([], {'size': 'backcast.shape'}), '(size=backcast.shape)\n', (2743, 2764), False, 'import torch\n'), ((20392, 20406), 'torch.tanh', 'torch.tanh', (['cy'], {}), '(cy)\n', (20402, 20406), False, 'import torch\n'), ((26771, 26806), 'torch.tanh', 'torch.tanh', (['gates[:, HS * 2:HS * 3]'], {}), '(gates[:, HS * 2:HS * 3])\n', (26781, 26806), False, 'import torch\n'), ((26854, 26882), 'torch.sigmoid', 'torch.sigmoid', (['gates[:, :HS]'], {}), '(gates[:, :HS])\n', (26867, 26882), False, 'import torch\n'), ((26909, 26943), 'torch.sigmoid', 'torch.sigmoid', (['gates[:, HS:HS * 2]'], {}), '(gates[:, HS:HS * 2])\n', (26922, 26943), False, 'import torch\n'), ((26971, 27003), 'torch.sigmoid', 'torch.sigmoid', (['gates[:, HS * 3:]'], {}), '(gates[:, HS * 3:])\n', (26984, 27003), False, 'import torch\n'), ((27180, 27201), 'torch.tanh', 'torch.tanh', (['(o_t * c_t)'], {}), '(o_t * c_t)\n', (27190, 27201), False, 'import torch\n'), ((32875, 32886), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (32881, 32886), True, 'from torch.nn import functional as F\n'), ((15855, 15877), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (15867, 15877), False, 'import torch\n'), ((16066, 16088), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (16078, 16088), False, 'import torch\n'), ((18414, 18436), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (18426, 18436), False, 'import torch\n'), ((22179, 22201), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (22191, 22201), False, 'import torch\n'), ((22347, 22369), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (22359, 22369), False, 'import torch\n'), ((22524, 22546), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (22536, 22546), False, 'import torch\n'), ((22699, 22721), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (22711, 22721), False, 'import torch\n'), ((25009, 25031), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (25021, 25031), False, 'import torch\n'), ((25186, 25208), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (25198, 25208), False, 'import torch\n'), ((27292, 27307), 'torch.tanh', 'torch.tanh', (['c_t'], {}), '(c_t)\n', (27302, 27307), False, 'import torch\n'), ((29197, 29219), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (29209, 29219), False, 'import torch\n'), ((29365, 29387), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (29377, 29387), False, 'import torch\n'), ((29542, 29564), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (29554, 29564), False, 'import torch\n'), ((29717, 29739), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (29729, 29739), False, 'import torch\n'), ((31788, 31810), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (31800, 31810), False, 'import torch\n'), ((31965, 31987), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (31977, 31987), False, 'import torch\n'), ((12054, 12076), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (12066, 12076), False, 'import torch\n'), ((12270, 12292), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (12282, 12292), False, 'import torch\n'), ((12488, 12510), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (12500, 12510), False, 'import torch\n'), ((12704, 12726), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (12716, 12726), False, 'import torch\n'), ((13595, 13617), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (13607, 13617), False, 'import torch\n'), ((13811, 13833), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (13823, 13833), False, 'import torch\n'), ((26608, 26629), 'torch.mm', 'torch.mm', (['x_t', 'self.U'], {}), '(x_t, self.U)\n', (26616, 26629), False, 'import torch\n'), ((26632, 26653), 'torch.mm', 'torch.mm', (['c_t', 'self.W'], {}), '(c_t, self.W)\n', (26640, 26653), False, 'import torch\n'), ((27100, 27139), 'torch.sigmoid', 'torch.sigmoid', (['(x_t @ self.U + self.bias)'], {}), '(x_t @ self.U + self.bias)\n', (27113, 27139), False, 'import torch\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import gin
import numpy as np
from slac.agents.slac.model_distribution_network import Bernoulli
from slac.agents.slac.model_distribution_network import Compressor
from slac.agents.slac.model_distribution_network import ConstantMultivariateNormalDiag
from slac.agents.slac.model_distribution_network import Decoder
from slac.agents.slac.model_distribution_network import MultivariateNormalDiag
from slac.agents.slac.model_distribution_network import Normal
from slac.utils import nest_utils
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.trajectories import time_step as ts
tfd = tfp.distributions
@gin.configurable
class SlacModelDistributionNetwork(tf.Module):
"""Equivalent to model_distribution_network.ModelDistributionNetwork.
We keep the implementations separate to minimize cluttering the implementation
of the main method.
"""
def __init__(self,
observation_spec,
action_spec,
latent1_first_prior_distribution_ctor=ConstantMultivariateNormalDiag,
latent1_prior_distribution_ctor=MultivariateNormalDiag,
latent1_posterior_distribution_ctor=MultivariateNormalDiag,
latent2_prior_distribution_ctor=MultivariateNormalDiag,
latent2_posterior_distribution_ctor=MultivariateNormalDiag,
base_depth=32,
latent1_size=32,
latent2_size=256,
kl_analytic=True,
skip_first_kl=False,
sequential_latent1_prior=True,
sequential_latent2_prior=True,
sequential_latent1_posterior=True,
sequential_latent2_posterior=True,
model_reward=False,
model_discount=False,
decoder_stddev=np.sqrt(0.1, dtype=np.float32),
reward_stddev=None,
name=None):
super(SlacModelDistributionNetwork, self).__init__(name=name)
self.observation_spec = observation_spec
self.action_spec = action_spec
self.base_depth = base_depth
self.latent1_size = latent1_size
self.latent2_size = latent2_size
self.kl_analytic = kl_analytic
self.skip_first_kl = skip_first_kl
self.model_reward = model_reward
self.model_discount = model_discount
# p(z_1^1)
self.latent1_first_prior = latent1_first_prior_distribution_ctor(latent1_size)
# p(z_1^2 | z_1^1)
self.latent2_first_prior = latent2_prior_distribution_ctor(8 * base_depth, latent2_size)
if sequential_latent1_prior:
# p(z_{t+1}^1 | z_t^2, a_t)
self.latent1_prior = latent1_prior_distribution_ctor(8 * base_depth, latent1_size)
else:
# p(z_{t+1}^1)
self.latent1_prior = lambda prev_latent, prev_action: self.latent1_first_prior(prev_latent[..., 0]) # prev_latent is only used to determine the batch shape
if sequential_latent2_prior:
# p(z_{t+1}^2 | z_{t+1}^1, z_t^2, a_t)
self.latent2_prior = latent2_prior_distribution_ctor(8 * base_depth, latent2_size)
else:
# p(z_{t+1}^2 | z_{t+1}^1)
self.latent2_prior = lambda latent1, prev_latent2, prev_action: self.latent2_first_prior(latent1)
# q(z_1^1 | x_1)
self.latent1_first_posterior = latent1_posterior_distribution_ctor(8 * base_depth, latent1_size)
# q(z_1^2 | z_1^1) = p(z_1^2 | z_1^1)
if latent2_posterior_distribution_ctor == latent2_prior_distribution_ctor:
self.latent2_first_posterior = self.latent2_first_prior # share
else:
self.latent2_first_posterior = latent2_posterior_distribution_ctor(8 * base_depth, latent2_size)
if sequential_latent1_posterior:
# q(z_{t+1}^1 | x_{t+1}, z_t^2, a_t)
self.latent1_posterior = latent1_posterior_distribution_ctor(8 * base_depth, latent1_size)
else:
# q(z_{t+1}^1 | x_{t+1})
self.latent1_posterior = lambda feature, prev_latent2, prev_action: self.latent1_first_posterior(feature)
if sequential_latent2_posterior:
# q(z_{t+1}^2 | z_{t+1}^1, z_t^2, a_t) = p(z_{t+1}^2 | z_{t+1}^1, z_t^2, a_t)
if latent2_posterior_distribution_ctor == latent2_prior_distribution_ctor:
self.latent2_posterior = self.latent2_prior
else:
self.latent2_posterior = latent2_posterior_distribution_ctor(8 * base_depth, latent2_size)
else:
# q(z_{t+1}^2 | z_{t+1}^1) = p(z_{t+1}^2 | z_{t+1}^1)
self.latent2_posterior = lambda latent1, prev_latent2, prev_action: self.latent2_first_posterior(latent1)
# compresses x_t into a vector
self.compressor = Compressor(base_depth, 8 * base_depth)
# p(x_t | z_t^1, z_t^2)
self.decoder = Decoder(base_depth, scale=decoder_stddev)
if self.model_reward:
# p(r_t | z_t^1, z_t^2, a_t, z_{t+1}^1, z_{t+1}^2)
self.reward_predictor = Normal(8 * base_depth, scale=reward_stddev)
else:
self.reward_predictor = None
if self.model_discount:
# p(d_t | z_{t+1}^1, z_{t+1}^2)
self.discount_predictor = Bernoulli(8 * base_depth)
else:
self.discount_predictor = None
@property
def state_size(self):
return self.latent1_size + self.latent2_size
def compute_loss(self, images, actions, step_types, rewards=None, discounts=None, latent_posterior_samples_and_dists=None):
sequence_length = step_types.shape[1].value - 1
if latent_posterior_samples_and_dists is None:
latent_posterior_samples_and_dists = self.sample_posterior(images, actions, step_types)
(latent1_posterior_samples, latent2_posterior_samples), (latent1_posterior_dists, latent2_posterior_dists) = (
latent_posterior_samples_and_dists)
(latent1_prior_samples, latent2_prior_samples), _ = self.sample_prior_or_posterior(actions, step_types) # for visualization
(latent1_conditional_prior_samples, latent2_conditional_prior_samples), _ = self.sample_prior_or_posterior(
actions, step_types, images=images[:, :1]) # for visualization. condition on first image only
def where_and_concat(reset_masks, first_prior_tensors, after_first_prior_tensors):
after_first_prior_tensors = tf.where(reset_masks[:, 1:], first_prior_tensors[:, 1:], after_first_prior_tensors)
prior_tensors = tf.concat([first_prior_tensors[:, 0:1], after_first_prior_tensors], axis=1)
return prior_tensors
reset_masks = tf.concat([tf.ones_like(step_types[:, 0:1], dtype=tf.bool),
tf.equal(step_types[:, 1:], ts.StepType.FIRST)], axis=1)
latent1_reset_masks = tf.tile(reset_masks[:, :, None], [1, 1, self.latent1_size])
latent1_first_prior_dists = self.latent1_first_prior(step_types)
# these distributions start at t=1 and the inputs are from t-1
latent1_after_first_prior_dists = self.latent1_prior(
latent2_posterior_samples[:, :sequence_length],
actions[:, :sequence_length])
latent1_prior_dists = nest_utils.map_distribution_structure(
functools.partial(where_and_concat, latent1_reset_masks),
latent1_first_prior_dists,
latent1_after_first_prior_dists)
latent2_reset_masks = tf.tile(reset_masks[:, :, None], [1, 1, self.latent2_size])
latent2_first_prior_dists = self.latent2_first_prior(latent1_posterior_samples)
# these distributions start at t=1 and the last 2 inputs are from t-1
latent2_after_first_prior_dists = self.latent2_prior(
latent1_posterior_samples[:, 1:sequence_length+1],
latent2_posterior_samples[:, :sequence_length],
actions[:, :sequence_length])
latent2_prior_dists = nest_utils.map_distribution_structure(
functools.partial(where_and_concat, latent2_reset_masks),
latent2_first_prior_dists,
latent2_after_first_prior_dists)
outputs = {}
if self.kl_analytic:
latent1_kl_divergences = tfd.kl_divergence(latent1_posterior_dists, latent1_prior_dists)
else:
latent1_kl_divergences = (latent1_posterior_dists.log_prob(latent1_posterior_samples)
- latent1_prior_dists.log_prob(latent1_posterior_samples))
if self.skip_first_kl:
latent1_kl_divergences = latent1_kl_divergences[:, 1:]
latent1_kl_divergences = tf.reduce_sum(latent1_kl_divergences, axis=1)
outputs.update({
'latent1_kl_divergence': tf.reduce_mean(latent1_kl_divergences),
})
if self.latent2_posterior == self.latent2_prior:
latent2_kl_divergences = 0.0
else:
if self.kl_analytic:
latent2_kl_divergences = tfd.kl_divergence(latent2_posterior_dists, latent2_prior_dists)
else:
latent2_kl_divergences = (latent2_posterior_dists.log_prob(latent2_posterior_samples)
- latent2_prior_dists.log_prob(latent2_posterior_samples))
if self.skip_first_kl:
latent2_kl_divergences = latent2_kl_divergences[:, 1:]
latent2_kl_divergences = tf.reduce_sum(latent2_kl_divergences, axis=1)
outputs.update({
'latent2_kl_divergence': tf.reduce_mean(latent2_kl_divergences),
})
outputs.update({
'kl_divergence': tf.reduce_mean(latent1_kl_divergences + latent2_kl_divergences),
})
likelihood_dists = self.decoder(latent1_posterior_samples, latent2_posterior_samples)
likelihood_log_probs = likelihood_dists.log_prob(images)
likelihood_log_probs = tf.reduce_sum(likelihood_log_probs, axis=1)
reconstruction_error = tf.reduce_sum(tf.square(images - likelihood_dists.distribution.loc),
axis=list(range(-len(likelihood_dists.event_shape), 0)))
reconstruction_error = tf.reduce_sum(reconstruction_error, axis=1)
outputs.update({
'log_likelihood': tf.reduce_mean(likelihood_log_probs),
'reconstruction_error': tf.reduce_mean(reconstruction_error),
})
# summed over the time dimension
elbo = likelihood_log_probs - latent1_kl_divergences - latent2_kl_divergences
if self.model_reward:
reward_dists = self.reward_predictor(
latent1_posterior_samples[:, :sequence_length],
latent2_posterior_samples[:, :sequence_length],
actions[:, :sequence_length],
latent1_posterior_samples[:, 1:sequence_length + 1],
latent2_posterior_samples[:, 1:sequence_length + 1])
reward_valid_mask = tf.cast(tf.not_equal(step_types[:, :sequence_length], ts.StepType.LAST), tf.float32)
reward_log_probs = reward_dists.log_prob(rewards[:, :sequence_length])
reward_log_probs = tf.reduce_sum(reward_log_probs * reward_valid_mask, axis=1)
reward_reconstruction_error = tf.square(rewards[:, :sequence_length] - reward_dists.loc)
reward_reconstruction_error = tf.reduce_sum(reward_reconstruction_error * reward_valid_mask, axis=1)
outputs.update({
'reward_log_likelihood': tf.reduce_mean(reward_log_probs),
'reward_reconstruction_error': tf.reduce_mean(reward_reconstruction_error),
})
elbo += reward_log_probs
if self.model_discount:
discount_dists = self.discount_predictor(
latent1_posterior_samples[:, 1:sequence_length + 1],
latent2_posterior_samples[:, 1:sequence_length + 1])
discount_log_probs = discount_dists.log_prob(discounts[:, :sequence_length])
discount_log_probs = tf.reduce_sum(discount_log_probs, axis=1)
discount_accuracy = tf.cast(
tf.equal(tf.cast(discount_dists.mode(), tf.float32), discounts[:, :sequence_length]), tf.float32)
discount_accuracy = tf.reduce_sum(discount_accuracy, axis=1)
outputs.update({
'discount_log_likelihood': tf.reduce_mean(discount_log_probs),
'discount_accuracy': tf.reduce_mean(discount_accuracy),
})
elbo += discount_log_probs
# average over the batch dimension
loss = -tf.reduce_mean(elbo)
posterior_images = likelihood_dists.mean()
prior_images = self.decoder(latent1_prior_samples, latent2_prior_samples).mean()
conditional_prior_images = self.decoder(latent1_conditional_prior_samples, latent2_conditional_prior_samples).mean()
outputs.update({
'elbo': tf.reduce_mean(elbo),
'images': images,
'posterior_images': posterior_images,
'prior_images': prior_images,
'conditional_prior_images': conditional_prior_images,
})
return loss, outputs
def sample_prior_or_posterior(self, actions, step_types=None, images=None):
"""Samples from the prior, except for the first time steps in which conditioning images are given."""
if step_types is None:
batch_size = tf.shape(actions)[0]
sequence_length = actions.shape[1].value # should be statically defined
step_types = tf.fill(
[batch_size, sequence_length + 1], ts.StepType.MID)
else:
sequence_length = step_types.shape[1].value - 1
actions = actions[:, :sequence_length]
if images is not None:
features = self.compressor(images)
# swap batch and time axes
actions = tf.transpose(actions, [1, 0, 2])
step_types = tf.transpose(step_types, [1, 0])
if images is not None:
features = tf.transpose(features, [1, 0, 2])
latent1_dists = []
latent1_samples = []
latent2_dists = []
latent2_samples = []
for t in range(sequence_length + 1):
is_conditional = images is not None and (t < images.shape[1].value)
if t == 0:
if is_conditional:
latent1_dist = self.latent1_first_posterior(features[t])
else:
latent1_dist = self.latent1_first_prior(step_types[t]) # step_types is only used to infer batch_size
latent1_sample = latent1_dist.sample()
if is_conditional:
latent2_dist = self.latent2_first_posterior(latent1_sample)
else:
latent2_dist = self.latent2_first_prior(latent1_sample)
latent2_sample = latent2_dist.sample()
else:
reset_mask = tf.equal(step_types[t], ts.StepType.FIRST)
if is_conditional:
latent1_first_dist = self.latent1_first_posterior(features[t])
latent1_dist = self.latent1_posterior(features[t], latent2_samples[t-1], actions[t-1])
else:
latent1_first_dist = self.latent1_first_prior(step_types[t])
latent1_dist = self.latent1_prior(latent2_samples[t-1], actions[t-1])
latent1_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent1_first_dist, latent1_dist)
latent1_sample = latent1_dist.sample()
if is_conditional:
latent2_first_dist = self.latent2_first_posterior(latent1_sample)
latent2_dist = self.latent2_posterior(latent1_sample, latent2_samples[t-1], actions[t-1])
else:
latent2_first_dist = self.latent2_first_prior(latent1_sample)
latent2_dist = self.latent2_prior(latent1_sample, latent2_samples[t-1], actions[t-1])
latent2_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent2_first_dist, latent2_dist)
latent2_sample = latent2_dist.sample()
latent1_dists.append(latent1_dist)
latent1_samples.append(latent1_sample)
latent2_dists.append(latent2_dist)
latent2_samples.append(latent2_sample)
try:
latent1_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent1_dists)
except:
latent1_dists = None
latent1_samples = tf.stack(latent1_samples, axis=1)
try:
latent2_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent2_dists)
except:
latent2_dists = None
latent2_samples = tf.stack(latent2_samples, axis=1)
return (latent1_samples, latent2_samples), (latent1_dists, latent2_dists)
def sample_posterior(self, images, actions, step_types, features=None):
sequence_length = step_types.shape[1].value - 1
actions = actions[:, :sequence_length]
if features is None:
features = self.compressor(images)
# swap batch and time axes
features = tf.transpose(features, [1, 0, 2])
actions = tf.transpose(actions, [1, 0, 2])
step_types = tf.transpose(step_types, [1, 0])
latent1_dists = []
latent1_samples = []
latent2_dists = []
latent2_samples = []
for t in range(sequence_length + 1):
if t == 0:
latent1_dist = self.latent1_first_posterior(features[t])
latent1_sample = latent1_dist.sample()
latent2_dist = self.latent2_first_posterior(latent1_sample)
latent2_sample = latent2_dist.sample()
else:
prev_latent2_sample = latent2_samples[t-1]
reset_mask = tf.equal(step_types[t], ts.StepType.FIRST)
latent1_first_dist = self.latent1_first_posterior(features[t])
latent1_dist = self.latent1_posterior(features[t], prev_latent2_sample, actions[t-1])
latent1_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent1_first_dist, latent1_dist)
latent1_sample = latent1_dist.sample()
latent2_first_dist = self.latent2_first_posterior(latent1_sample)
latent2_dist = self.latent2_posterior(latent1_sample, prev_latent2_sample, actions[t-1])
latent2_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent2_first_dist, latent2_dist)
latent2_sample = latent2_dist.sample()
latent1_dists.append(latent1_dist)
latent1_samples.append(latent1_sample)
latent2_dists.append(latent2_dist)
latent2_samples.append(latent2_sample)
latent1_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent1_dists)
latent1_samples = tf.stack(latent1_samples, axis=1)
latent2_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent2_dists)
latent2_samples = tf.stack(latent2_samples, axis=1)
return (latent1_samples, latent2_samples), (latent1_dists, latent2_dists)
@gin.configurable
class SimpleModelDistributionNetwork(tf.Module):
def __init__(self,
observation_spec,
action_spec,
base_depth=32,
latent_size=256,
kl_analytic=True,
sequential_latent_prior=True,
sequential_latent_posterior=True,
model_reward=False,
model_discount=False,
decoder_stddev=np.sqrt(0.1, dtype=np.float32),
reward_stddev=None,
name=None):
super(SimpleModelDistributionNetwork, self).__init__(name=name)
self.observation_spec = observation_spec
self.action_spec = action_spec
self.base_depth = base_depth
self.latent_size = latent_size
self.kl_analytic = kl_analytic
self.model_reward = model_reward
self.model_discount = model_discount
# p(z_1)
self.latent_first_prior = ConstantMultivariateNormalDiag(latent_size)
if sequential_latent_prior:
# p(z_{t+1} | z_t, a_t)
self.latent_prior = MultivariateNormalDiag(8 * base_depth, latent_size)
else:
# p(z_{t+1})
self.latent_prior = lambda prev_latent, prev_action: self.latent_first_prior(prev_latent[..., 0]) # prev_latent is only used to determine the batch shape
# q(z_1 | x_1)
self.latent_first_posterior = MultivariateNormalDiag(8 * base_depth, latent_size)
if sequential_latent_posterior:
# q(z_{t+1} | x_{t+1}, z_t, a_t)
self.latent_posterior = MultivariateNormalDiag(8 * base_depth, latent_size)
else:
# q(z_{t+1} | x_{t+1})
self.latent_posterior = lambda feature, prev_latent, prev_action: self.latent_first_posterior(feature)
# compresses x_t into a vector
self.compressor = Compressor(base_depth, 8 * base_depth)
# p(x_t | z_t)
self.decoder = Decoder(base_depth, scale=decoder_stddev)
if self.model_reward:
# p(r_t | z_t, a_t, z_{t+1})
self.reward_predictor = Normal(8 * base_depth, scale=reward_stddev)
else:
self.reward_predictor = None
if self.model_discount:
# p(d_t | z_{t+1})
self.discount_predictor = Bernoulli(8 * base_depth)
else:
self.discount_predictor = None
@property
def state_size(self):
return self.latent_size
def compute_loss(self, images, actions, step_types, rewards=None, discounts=None, latent_posterior_samples_and_dists=None):
sequence_length = step_types.shape[1].value - 1
if latent_posterior_samples_and_dists is None:
latent_posterior_samples_and_dists = self.sample_posterior(images, actions, step_types)
latent_posterior_samples, latent_posterior_dists = latent_posterior_samples_and_dists
latent_prior_samples, _ = self.sample_prior_or_posterior(actions, step_types) # for visualization
latent_conditional_prior_samples, _ = self.sample_prior_or_posterior(
actions, step_types, images=images[:, :1]) # for visualization. condition on first image only
def where_and_concat(reset_masks, first_prior_tensors, after_first_prior_tensors):
after_first_prior_tensors = tf.where(reset_masks[:, 1:], first_prior_tensors[:, 1:], after_first_prior_tensors)
prior_tensors = tf.concat([first_prior_tensors[:, 0:1], after_first_prior_tensors], axis=1)
return prior_tensors
reset_masks = tf.concat([tf.ones_like(step_types[:, 0:1], dtype=tf.bool),
tf.equal(step_types[:, 1:], ts.StepType.FIRST)], axis=1)
latent_reset_masks = tf.tile(reset_masks[:, :, None], [1, 1, self.latent_size])
latent_first_prior_dists = self.latent_first_prior(step_types)
# these distributions start at t=1 and the inputs are from t-1
latent_after_first_prior_dists = self.latent_prior(
latent_posterior_samples[:, :sequence_length], actions[:, :sequence_length])
latent_prior_dists = nest_utils.map_distribution_structure(
functools.partial(where_and_concat, latent_reset_masks),
latent_first_prior_dists,
latent_after_first_prior_dists)
outputs = {}
if self.kl_analytic:
latent_kl_divergences = tfd.kl_divergence(latent_posterior_dists, latent_prior_dists)
else:
latent_kl_divergences = (latent_posterior_dists.log_prob(latent_posterior_samples)
- latent_prior_dists.log_prob(latent_posterior_samples))
latent_kl_divergences = tf.reduce_sum(latent_kl_divergences, axis=1)
outputs.update({
'latent_kl_divergence': tf.reduce_mean(latent_kl_divergences),
})
outputs.update({
'kl_divergence': tf.reduce_mean(latent_kl_divergences),
})
likelihood_dists = self.decoder(latent_posterior_samples)
likelihood_log_probs = likelihood_dists.log_prob(images)
likelihood_log_probs = tf.reduce_sum(likelihood_log_probs, axis=1)
reconstruction_error = tf.reduce_sum(tf.square(images - likelihood_dists.distribution.loc),
axis=list(range(-len(likelihood_dists.event_shape), 0)))
reconstruction_error = tf.reduce_sum(reconstruction_error, axis=1)
outputs.update({
'log_likelihood': tf.reduce_mean(likelihood_log_probs),
'reconstruction_error': tf.reduce_mean(reconstruction_error),
})
# summed over the time dimension
elbo = likelihood_log_probs - latent_kl_divergences
if self.model_reward:
reward_dists = self.reward_predictor(
latent_posterior_samples[:, :sequence_length],
actions[:, :sequence_length],
latent_posterior_samples[:, 1:sequence_length + 1])
reward_valid_mask = tf.cast(tf.not_equal(step_types[:, :sequence_length], ts.StepType.LAST), tf.float32)
reward_log_probs = reward_dists.log_prob(rewards[:, :sequence_length])
reward_log_probs = tf.reduce_sum(reward_log_probs * reward_valid_mask, axis=1)
reward_reconstruction_error = tf.square(rewards[:, :sequence_length] - reward_dists.loc)
reward_reconstruction_error = tf.reduce_sum(reward_reconstruction_error * reward_valid_mask, axis=1)
outputs.update({
'reward_log_likelihood': tf.reduce_mean(reward_log_probs),
'reward_reconstruction_error': tf.reduce_mean(reward_reconstruction_error),
})
elbo += reward_log_probs
if self.model_discount:
discount_dists = self.discount_predictor(
latent_posterior_samples[:, 1:sequence_length + 1])
discount_log_probs = discount_dists.log_prob(discounts[:, :sequence_length])
discount_log_probs = tf.reduce_sum(discount_log_probs, axis=1)
discount_accuracy = tf.cast(
tf.equal(tf.cast(discount_dists.mode(), tf.float32), discounts[:, :sequence_length]), tf.float32)
discount_accuracy = tf.reduce_sum(discount_accuracy, axis=1)
outputs.update({
'discount_log_likelihood': tf.reduce_mean(discount_log_probs),
'discount_accuracy': tf.reduce_mean(discount_accuracy),
})
elbo += discount_log_probs
# average over the batch dimension
loss = -tf.reduce_mean(elbo)
posterior_images = likelihood_dists.mean()
prior_images = self.decoder(latent_prior_samples).mean()
conditional_prior_images = self.decoder(latent_conditional_prior_samples).mean()
outputs.update({
'elbo': tf.reduce_mean(elbo),
'images': images,
'posterior_images': posterior_images,
'prior_images': prior_images,
'conditional_prior_images': conditional_prior_images,
})
return loss, outputs
def sample_prior_or_posterior(self, actions, step_types=None, images=None):
"""Samples from the prior, except for the first time steps in which conditioning images are given."""
if step_types is None:
batch_size = tf.shape(actions)[0]
sequence_length = actions.shape[1].value # should be statically defined
step_types = tf.fill(
[batch_size, sequence_length + 1], ts.StepType.MID)
else:
sequence_length = step_types.shape[1].value - 1
actions = actions[:, :sequence_length]
if images is not None:
features = self.compressor(images)
# swap batch and time axes
actions = tf.transpose(actions, [1, 0, 2])
step_types = tf.transpose(step_types, [1, 0])
if images is not None:
features = tf.transpose(features, [1, 0, 2])
latent_dists = []
latent_samples = []
for t in range(sequence_length + 1):
is_conditional = images is not None and (t < images.shape[1].value)
if t == 0:
if is_conditional:
latent_dist = self.latent_first_posterior(features[t])
else:
latent_dist = self.latent_first_prior(step_types[t]) # step_types is only used to infer batch_size
latent_sample = latent_dist.sample()
else:
reset_mask = tf.equal(step_types[t], ts.StepType.FIRST)
if is_conditional:
latent_first_dist = self.latent_first_posterior(features[t])
latent_dist = self.latent_posterior(features[t], latent_samples[t-1], actions[t-1])
else:
latent_first_dist = self.latent_first_prior(step_types[t])
latent_dist = self.latent_prior(latent_samples[t-1], actions[t-1])
latent_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent_first_dist, latent_dist)
latent_sample = latent_dist.sample()
latent_dists.append(latent_dist)
latent_samples.append(latent_sample)
latent_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent_dists)
latent_samples = tf.stack(latent_samples, axis=1)
return latent_samples, latent_dists
def sample_posterior(self, images, actions, step_types, features=None):
sequence_length = step_types.shape[1].value - 1
actions = actions[:, :sequence_length]
if features is None:
features = self.compressor(images)
# swap batch and time axes
features = tf.transpose(features, [1, 0, 2])
actions = tf.transpose(actions, [1, 0, 2])
step_types = tf.transpose(step_types, [1, 0])
latent_dists = []
latent_samples = []
for t in range(sequence_length + 1):
if t == 0:
latent_dist = self.latent_first_posterior(features[t])
latent_sample = latent_dist.sample()
else:
reset_mask = tf.equal(step_types[t], ts.StepType.FIRST)
latent_first_dist = self.latent_first_posterior(features[t])
latent_dist = self.latent_posterior(features[t], latent_samples[t-1], actions[t-1])
latent_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent_first_dist, latent_dist)
latent_sample = latent_dist.sample()
latent_dists.append(latent_dist)
latent_samples.append(latent_sample)
latent_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent_dists)
latent_samples = tf.stack(latent_samples, axis=1)
return latent_samples, latent_dists
| [
"slac.agents.slac.model_distribution_network.Compressor",
"tensorflow.reduce_sum",
"slac.agents.slac.model_distribution_network.Decoder",
"slac.agents.slac.model_distribution_network.Bernoulli",
"tensorflow.not_equal",
"tensorflow.concat",
"slac.agents.slac.model_distribution_network.Normal",
"tensorf... | [((1922, 1952), 'numpy.sqrt', 'np.sqrt', (['(0.1)'], {'dtype': 'np.float32'}), '(0.1, dtype=np.float32)\n', (1929, 1952), True, 'import numpy as np\n'), ((4663, 4701), 'slac.agents.slac.model_distribution_network.Compressor', 'Compressor', (['base_depth', '(8 * base_depth)'], {}), '(base_depth, 8 * base_depth)\n', (4673, 4701), False, 'from slac.agents.slac.model_distribution_network import Compressor\n'), ((4749, 4790), 'slac.agents.slac.model_distribution_network.Decoder', 'Decoder', (['base_depth'], {'scale': 'decoder_stddev'}), '(base_depth, scale=decoder_stddev)\n', (4756, 4790), False, 'from slac.agents.slac.model_distribution_network import Decoder\n'), ((6602, 6661), 'tensorflow.tile', 'tf.tile', (['reset_masks[:, :, None]', '[1, 1, self.latent1_size]'], {}), '(reset_masks[:, :, None], [1, 1, self.latent1_size])\n', (6609, 6661), True, 'import tensorflow as tf\n'), ((7184, 7243), 'tensorflow.tile', 'tf.tile', (['reset_masks[:, :, None]', '[1, 1, self.latent2_size]'], {}), '(reset_masks[:, :, None], [1, 1, self.latent2_size])\n', (7191, 7243), True, 'import tensorflow as tf\n'), ((8269, 8314), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['latent1_kl_divergences'], {'axis': '(1)'}), '(latent1_kl_divergences, axis=1)\n', (8282, 8314), True, 'import tensorflow as tf\n'), ((9410, 9453), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['likelihood_log_probs'], {'axis': '(1)'}), '(likelihood_log_probs, axis=1)\n', (9423, 9453), True, 'import tensorflow as tf\n'), ((9675, 9718), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['reconstruction_error'], {'axis': '(1)'}), '(reconstruction_error, axis=1)\n', (9688, 9718), True, 'import tensorflow as tf\n'), ((13029, 13061), 'tensorflow.transpose', 'tf.transpose', (['actions', '[1, 0, 2]'], {}), '(actions, [1, 0, 2])\n', (13041, 13061), True, 'import tensorflow as tf\n'), ((13079, 13111), 'tensorflow.transpose', 'tf.transpose', (['step_types', '[1, 0]'], {}), '(step_types, [1, 0])\n', (13091, 13111), True, 'import tensorflow as tf\n'), ((15478, 15511), 'tensorflow.stack', 'tf.stack', (['latent1_samples'], {'axis': '(1)'}), '(latent1_samples, axis=1)\n', (15486, 15511), True, 'import tensorflow as tf\n'), ((15690, 15723), 'tensorflow.stack', 'tf.stack', (['latent2_samples'], {'axis': '(1)'}), '(latent2_samples, axis=1)\n', (15698, 15723), True, 'import tensorflow as tf\n'), ((16086, 16119), 'tensorflow.transpose', 'tf.transpose', (['features', '[1, 0, 2]'], {}), '(features, [1, 0, 2])\n', (16098, 16119), True, 'import tensorflow as tf\n'), ((16134, 16166), 'tensorflow.transpose', 'tf.transpose', (['actions', '[1, 0, 2]'], {}), '(actions, [1, 0, 2])\n', (16146, 16166), True, 'import tensorflow as tf\n'), ((16184, 16216), 'tensorflow.transpose', 'tf.transpose', (['step_types', '[1, 0]'], {}), '(step_types, [1, 0])\n', (16196, 16216), True, 'import tensorflow as tf\n'), ((17756, 17789), 'tensorflow.stack', 'tf.stack', (['latent1_samples'], {'axis': '(1)'}), '(latent1_samples, axis=1)\n', (17764, 17789), True, 'import tensorflow as tf\n'), ((17918, 17951), 'tensorflow.stack', 'tf.stack', (['latent2_samples'], {'axis': '(1)'}), '(latent2_samples, axis=1)\n', (17926, 17951), True, 'import tensorflow as tf\n'), ((18473, 18503), 'numpy.sqrt', 'np.sqrt', (['(0.1)'], {'dtype': 'np.float32'}), '(0.1, dtype=np.float32)\n', (18480, 18503), True, 'import numpy as np\n'), ((18940, 18983), 'slac.agents.slac.model_distribution_network.ConstantMultivariateNormalDiag', 'ConstantMultivariateNormalDiag', (['latent_size'], {}), '(latent_size)\n', (18970, 18983), False, 'from slac.agents.slac.model_distribution_network import ConstantMultivariateNormalDiag\n'), ((19368, 19419), 'slac.agents.slac.model_distribution_network.MultivariateNormalDiag', 'MultivariateNormalDiag', (['(8 * base_depth)', 'latent_size'], {}), '(8 * base_depth, latent_size)\n', (19390, 19419), False, 'from slac.agents.slac.model_distribution_network import MultivariateNormalDiag\n'), ((19783, 19821), 'slac.agents.slac.model_distribution_network.Compressor', 'Compressor', (['base_depth', '(8 * base_depth)'], {}), '(base_depth, 8 * base_depth)\n', (19793, 19821), False, 'from slac.agents.slac.model_distribution_network import Compressor\n'), ((19860, 19901), 'slac.agents.slac.model_distribution_network.Decoder', 'Decoder', (['base_depth'], {'scale': 'decoder_stddev'}), '(base_depth, scale=decoder_stddev)\n', (19867, 19901), False, 'from slac.agents.slac.model_distribution_network import Decoder\n'), ((21523, 21581), 'tensorflow.tile', 'tf.tile', (['reset_masks[:, :, None]', '[1, 1, self.latent_size]'], {}), '(reset_masks[:, :, None], [1, 1, self.latent_size])\n', (21530, 21581), True, 'import tensorflow as tf\n'), ((22411, 22455), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['latent_kl_divergences'], {'axis': '(1)'}), '(latent_kl_divergences, axis=1)\n', (22424, 22455), True, 'import tensorflow as tf\n'), ((22798, 22841), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['likelihood_log_probs'], {'axis': '(1)'}), '(likelihood_log_probs, axis=1)\n', (22811, 22841), True, 'import tensorflow as tf\n'), ((23063, 23106), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['reconstruction_error'], {'axis': '(1)'}), '(reconstruction_error, axis=1)\n', (23076, 23106), True, 'import tensorflow as tf\n'), ((26166, 26198), 'tensorflow.transpose', 'tf.transpose', (['actions', '[1, 0, 2]'], {}), '(actions, [1, 0, 2])\n', (26178, 26198), True, 'import tensorflow as tf\n'), ((26216, 26248), 'tensorflow.transpose', 'tf.transpose', (['step_types', '[1, 0]'], {}), '(step_types, [1, 0])\n', (26228, 26248), True, 'import tensorflow as tf\n'), ((27595, 27627), 'tensorflow.stack', 'tf.stack', (['latent_samples'], {'axis': '(1)'}), '(latent_samples, axis=1)\n', (27603, 27627), True, 'import tensorflow as tf\n'), ((27952, 27985), 'tensorflow.transpose', 'tf.transpose', (['features', '[1, 0, 2]'], {}), '(features, [1, 0, 2])\n', (27964, 27985), True, 'import tensorflow as tf\n'), ((28000, 28032), 'tensorflow.transpose', 'tf.transpose', (['actions', '[1, 0, 2]'], {}), '(actions, [1, 0, 2])\n', (28012, 28032), True, 'import tensorflow as tf\n'), ((28050, 28082), 'tensorflow.transpose', 'tf.transpose', (['step_types', '[1, 0]'], {}), '(step_types, [1, 0])\n', (28062, 28082), True, 'import tensorflow as tf\n'), ((28933, 28965), 'tensorflow.stack', 'tf.stack', (['latent_samples'], {'axis': '(1)'}), '(latent_samples, axis=1)\n', (28941, 28965), True, 'import tensorflow as tf\n'), ((4905, 4948), 'slac.agents.slac.model_distribution_network.Normal', 'Normal', (['(8 * base_depth)'], {'scale': 'reward_stddev'}), '(8 * base_depth, scale=reward_stddev)\n', (4911, 4948), False, 'from slac.agents.slac.model_distribution_network import Normal\n'), ((5092, 5117), 'slac.agents.slac.model_distribution_network.Bernoulli', 'Bernoulli', (['(8 * base_depth)'], {}), '(8 * base_depth)\n', (5101, 5117), False, 'from slac.agents.slac.model_distribution_network import Bernoulli\n'), ((6201, 6288), 'tensorflow.where', 'tf.where', (['reset_masks[:, 1:]', 'first_prior_tensors[:, 1:]', 'after_first_prior_tensors'], {}), '(reset_masks[:, 1:], first_prior_tensors[:, 1:],\n after_first_prior_tensors)\n', (6209, 6288), True, 'import tensorflow as tf\n'), ((6307, 6382), 'tensorflow.concat', 'tf.concat', (['[first_prior_tensors[:, 0:1], after_first_prior_tensors]'], {'axis': '(1)'}), '([first_prior_tensors[:, 0:1], after_first_prior_tensors], axis=1)\n', (6316, 6382), True, 'import tensorflow as tf\n'), ((7023, 7079), 'functools.partial', 'functools.partial', (['where_and_concat', 'latent1_reset_masks'], {}), '(where_and_concat, latent1_reset_masks)\n', (7040, 7079), False, 'import functools\n'), ((7686, 7742), 'functools.partial', 'functools.partial', (['where_and_concat', 'latent2_reset_masks'], {}), '(where_and_concat, latent2_reset_masks)\n', (7703, 7742), False, 'import functools\n'), ((8960, 9005), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['latent2_kl_divergences'], {'axis': '(1)'}), '(latent2_kl_divergences, axis=1)\n', (8973, 9005), True, 'import tensorflow as tf\n'), ((9495, 9548), 'tensorflow.square', 'tf.square', (['(images - likelihood_dists.distribution.loc)'], {}), '(images - likelihood_dists.distribution.loc)\n', (9504, 9548), True, 'import tensorflow as tf\n'), ((10563, 10622), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(reward_log_probs * reward_valid_mask)'], {'axis': '(1)'}), '(reward_log_probs * reward_valid_mask, axis=1)\n', (10576, 10622), True, 'import tensorflow as tf\n'), ((10659, 10717), 'tensorflow.square', 'tf.square', (['(rewards[:, :sequence_length] - reward_dists.loc)'], {}), '(rewards[:, :sequence_length] - reward_dists.loc)\n', (10668, 10717), True, 'import tensorflow as tf\n'), ((10754, 10824), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(reward_reconstruction_error * reward_valid_mask)'], {'axis': '(1)'}), '(reward_reconstruction_error * reward_valid_mask, axis=1)\n', (10767, 10824), True, 'import tensorflow as tf\n'), ((11352, 11393), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['discount_log_probs'], {'axis': '(1)'}), '(discount_log_probs, axis=1)\n', (11365, 11393), True, 'import tensorflow as tf\n'), ((11563, 11603), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['discount_accuracy'], {'axis': '(1)'}), '(discount_accuracy, axis=1)\n', (11576, 11603), True, 'import tensorflow as tf\n'), ((11856, 11876), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['elbo'], {}), '(elbo)\n', (11870, 11876), True, 'import tensorflow as tf\n'), ((12735, 12794), 'tensorflow.fill', 'tf.fill', (['[batch_size, sequence_length + 1]', 'ts.StepType.MID'], {}), '([batch_size, sequence_length + 1], ts.StepType.MID)\n', (12742, 12794), True, 'import tensorflow as tf\n'), ((13156, 13189), 'tensorflow.transpose', 'tf.transpose', (['features', '[1, 0, 2]'], {}), '(features, [1, 0, 2])\n', (13168, 13189), True, 'import tensorflow as tf\n'), ((19072, 19123), 'slac.agents.slac.model_distribution_network.MultivariateNormalDiag', 'MultivariateNormalDiag', (['(8 * base_depth)', 'latent_size'], {}), '(8 * base_depth, latent_size)\n', (19094, 19123), False, 'from slac.agents.slac.model_distribution_network import MultivariateNormalDiag\n'), ((19525, 19576), 'slac.agents.slac.model_distribution_network.MultivariateNormalDiag', 'MultivariateNormalDiag', (['(8 * base_depth)', 'latent_size'], {}), '(8 * base_depth, latent_size)\n', (19547, 19576), False, 'from slac.agents.slac.model_distribution_network import MultivariateNormalDiag\n'), ((19994, 20037), 'slac.agents.slac.model_distribution_network.Normal', 'Normal', (['(8 * base_depth)'], {'scale': 'reward_stddev'}), '(8 * base_depth, scale=reward_stddev)\n', (20000, 20037), False, 'from slac.agents.slac.model_distribution_network import Normal\n'), ((20168, 20193), 'slac.agents.slac.model_distribution_network.Bernoulli', 'Bernoulli', (['(8 * base_depth)'], {}), '(8 * base_depth)\n', (20177, 20193), False, 'from slac.agents.slac.model_distribution_network import Bernoulli\n'), ((21123, 21210), 'tensorflow.where', 'tf.where', (['reset_masks[:, 1:]', 'first_prior_tensors[:, 1:]', 'after_first_prior_tensors'], {}), '(reset_masks[:, 1:], first_prior_tensors[:, 1:],\n after_first_prior_tensors)\n', (21131, 21210), True, 'import tensorflow as tf\n'), ((21229, 21304), 'tensorflow.concat', 'tf.concat', (['[first_prior_tensors[:, 0:1], after_first_prior_tensors]'], {'axis': '(1)'}), '([first_prior_tensors[:, 0:1], after_first_prior_tensors], axis=1)\n', (21238, 21304), True, 'import tensorflow as tf\n'), ((21929, 21984), 'functools.partial', 'functools.partial', (['where_and_concat', 'latent_reset_masks'], {}), '(where_and_concat, latent_reset_masks)\n', (21946, 21984), False, 'import functools\n'), ((22883, 22936), 'tensorflow.square', 'tf.square', (['(images - likelihood_dists.distribution.loc)'], {}), '(images - likelihood_dists.distribution.loc)\n', (22892, 22936), True, 'import tensorflow as tf\n'), ((23806, 23865), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(reward_log_probs * reward_valid_mask)'], {'axis': '(1)'}), '(reward_log_probs * reward_valid_mask, axis=1)\n', (23819, 23865), True, 'import tensorflow as tf\n'), ((23902, 23960), 'tensorflow.square', 'tf.square', (['(rewards[:, :sequence_length] - reward_dists.loc)'], {}), '(rewards[:, :sequence_length] - reward_dists.loc)\n', (23911, 23960), True, 'import tensorflow as tf\n'), ((23997, 24067), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(reward_reconstruction_error * reward_valid_mask)'], {'axis': '(1)'}), '(reward_reconstruction_error * reward_valid_mask, axis=1)\n', (24010, 24067), True, 'import tensorflow as tf\n'), ((24535, 24576), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['discount_log_probs'], {'axis': '(1)'}), '(discount_log_probs, axis=1)\n', (24548, 24576), True, 'import tensorflow as tf\n'), ((24746, 24786), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['discount_accuracy'], {'axis': '(1)'}), '(discount_accuracy, axis=1)\n', (24759, 24786), True, 'import tensorflow as tf\n'), ((25043, 25063), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['elbo'], {}), '(elbo)\n', (25057, 25063), True, 'import tensorflow as tf\n'), ((25872, 25931), 'tensorflow.fill', 'tf.fill', (['[batch_size, sequence_length + 1]', 'ts.StepType.MID'], {}), '([batch_size, sequence_length + 1], ts.StepType.MID)\n', (25879, 25931), True, 'import tensorflow as tf\n'), ((26293, 26326), 'tensorflow.transpose', 'tf.transpose', (['features', '[1, 0, 2]'], {}), '(features, [1, 0, 2])\n', (26305, 26326), True, 'import tensorflow as tf\n'), ((6440, 6487), 'tensorflow.ones_like', 'tf.ones_like', (['step_types[:, 0:1]'], {'dtype': 'tf.bool'}), '(step_types[:, 0:1], dtype=tf.bool)\n', (6452, 6487), True, 'import tensorflow as tf\n'), ((6518, 6564), 'tensorflow.equal', 'tf.equal', (['step_types[:, 1:]', 'ts.StepType.FIRST'], {}), '(step_types[:, 1:], ts.StepType.FIRST)\n', (6526, 6564), True, 'import tensorflow as tf\n'), ((8369, 8407), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['latent1_kl_divergences'], {}), '(latent1_kl_divergences)\n', (8383, 8407), True, 'import tensorflow as tf\n'), ((9159, 9222), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(latent1_kl_divergences + latent2_kl_divergences)'], {}), '(latent1_kl_divergences + latent2_kl_divergences)\n', (9173, 9222), True, 'import tensorflow as tf\n'), ((9764, 9800), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['likelihood_log_probs'], {}), '(likelihood_log_probs)\n', (9778, 9800), True, 'import tensorflow as tf\n'), ((9832, 9868), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['reconstruction_error'], {}), '(reconstruction_error)\n', (9846, 9868), True, 'import tensorflow as tf\n'), ((10384, 10447), 'tensorflow.not_equal', 'tf.not_equal', (['step_types[:, :sequence_length]', 'ts.StepType.LAST'], {}), '(step_types[:, :sequence_length], ts.StepType.LAST)\n', (10396, 10447), True, 'import tensorflow as tf\n'), ((12167, 12187), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['elbo'], {}), '(elbo)\n', (12181, 12187), True, 'import tensorflow as tf\n'), ((12616, 12633), 'tensorflow.shape', 'tf.shape', (['actions'], {}), '(actions)\n', (12624, 12633), True, 'import tensorflow as tf\n'), ((13943, 13985), 'tensorflow.equal', 'tf.equal', (['step_types[t]', 'ts.StepType.FIRST'], {}), '(step_types[t], ts.StepType.FIRST)\n', (13951, 13985), True, 'import tensorflow as tf\n'), ((16683, 16725), 'tensorflow.equal', 'tf.equal', (['step_types[t]', 'ts.StepType.FIRST'], {}), '(step_types[t], ts.StepType.FIRST)\n', (16691, 16725), True, 'import tensorflow as tf\n'), ((17697, 17716), 'tensorflow.stack', 'tf.stack', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (17705, 17716), True, 'import tensorflow as tf\n'), ((17859, 17878), 'tensorflow.stack', 'tf.stack', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (17867, 17878), True, 'import tensorflow as tf\n'), ((21362, 21409), 'tensorflow.ones_like', 'tf.ones_like', (['step_types[:, 0:1]'], {'dtype': 'tf.bool'}), '(step_types[:, 0:1], dtype=tf.bool)\n', (21374, 21409), True, 'import tensorflow as tf\n'), ((21440, 21486), 'tensorflow.equal', 'tf.equal', (['step_types[:, 1:]', 'ts.StepType.FIRST'], {}), '(step_types[:, 1:], ts.StepType.FIRST)\n', (21448, 21486), True, 'import tensorflow as tf\n'), ((22509, 22546), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['latent_kl_divergences'], {}), '(latent_kl_divergences)\n', (22523, 22546), True, 'import tensorflow as tf\n'), ((22601, 22638), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['latent_kl_divergences'], {}), '(latent_kl_divergences)\n', (22615, 22638), True, 'import tensorflow as tf\n'), ((23154, 23190), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['likelihood_log_probs'], {}), '(likelihood_log_probs)\n', (23168, 23190), True, 'import tensorflow as tf\n'), ((23224, 23260), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['reconstruction_error'], {}), '(reconstruction_error)\n', (23238, 23260), True, 'import tensorflow as tf\n'), ((23627, 23690), 'tensorflow.not_equal', 'tf.not_equal', (['step_types[:, :sequence_length]', 'ts.StepType.LAST'], {}), '(step_types[:, :sequence_length], ts.StepType.LAST)\n', (23639, 23690), True, 'import tensorflow as tf\n'), ((25296, 25316), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['elbo'], {}), '(elbo)\n', (25310, 25316), True, 'import tensorflow as tf\n'), ((25753, 25770), 'tensorflow.shape', 'tf.shape', (['actions'], {}), '(actions)\n', (25761, 25770), True, 'import tensorflow as tf\n'), ((26800, 26842), 'tensorflow.equal', 'tf.equal', (['step_types[t]', 'ts.StepType.FIRST'], {}), '(step_types[t], ts.StepType.FIRST)\n', (26808, 26842), True, 'import tensorflow as tf\n'), ((27538, 27557), 'tensorflow.stack', 'tf.stack', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (27546, 27557), True, 'import tensorflow as tf\n'), ((28329, 28371), 'tensorflow.equal', 'tf.equal', (['step_types[t]', 'ts.StepType.FIRST'], {}), '(step_types[t], ts.StepType.FIRST)\n', (28337, 28371), True, 'import tensorflow as tf\n'), ((28876, 28895), 'tensorflow.stack', 'tf.stack', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (28884, 28895), True, 'import tensorflow as tf\n'), ((9064, 9102), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['latent2_kl_divergences'], {}), '(latent2_kl_divergences)\n', (9078, 9102), True, 'import tensorflow as tf\n'), ((10881, 10913), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['reward_log_probs'], {}), '(reward_log_probs)\n', (10895, 10913), True, 'import tensorflow as tf\n'), ((10954, 10997), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['reward_reconstruction_error'], {}), '(reward_reconstruction_error)\n', (10968, 10997), True, 'import tensorflow as tf\n'), ((11662, 11696), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['discount_log_probs'], {}), '(discount_log_probs)\n', (11676, 11696), True, 'import tensorflow as tf\n'), ((11727, 11760), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['discount_accuracy'], {}), '(discount_accuracy)\n', (11741, 11760), True, 'import tensorflow as tf\n'), ((14422, 14461), 'functools.partial', 'functools.partial', (['tf.where', 'reset_mask'], {}), '(tf.where, reset_mask)\n', (14439, 14461), False, 'import functools\n'), ((15004, 15043), 'functools.partial', 'functools.partial', (['tf.where', 'reset_mask'], {}), '(tf.where, reset_mask)\n', (15021, 15043), False, 'import functools\n'), ((15380, 15399), 'tensorflow.stack', 'tf.stack', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (15388, 15399), True, 'import tensorflow as tf\n'), ((15592, 15611), 'tensorflow.stack', 'tf.stack', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (15600, 15611), True, 'import tensorflow as tf\n'), ((16965, 17004), 'functools.partial', 'functools.partial', (['tf.where', 'reset_mask'], {}), '(tf.where, reset_mask)\n', (16982, 17004), False, 'import functools\n'), ((17332, 17371), 'functools.partial', 'functools.partial', (['tf.where', 'reset_mask'], {}), '(tf.where, reset_mask)\n', (17349, 17371), False, 'import functools\n'), ((24126, 24158), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['reward_log_probs'], {}), '(reward_log_probs)\n', (24140, 24158), True, 'import tensorflow as tf\n'), ((24201, 24244), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['reward_reconstruction_error'], {}), '(reward_reconstruction_error)\n', (24215, 24244), True, 'import tensorflow as tf\n'), ((24847, 24881), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['discount_log_probs'], {}), '(discount_log_probs)\n', (24861, 24881), True, 'import tensorflow as tf\n'), ((24914, 24947), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['discount_accuracy'], {}), '(discount_accuracy)\n', (24928, 24947), True, 'import tensorflow as tf\n'), ((27268, 27307), 'functools.partial', 'functools.partial', (['tf.where', 'reset_mask'], {}), '(tf.where, reset_mask)\n', (27285, 27307), False, 'import functools\n'), ((28606, 28645), 'functools.partial', 'functools.partial', (['tf.where', 'reset_mask'], {}), '(tf.where, reset_mask)\n', (28623, 28645), False, 'import functools\n')] |
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import pickle
# Get the train features dataframe
playlist_features = pd.read_csv('../data/playlist_features_with_artists_train.csv', index_col=0, header=0)
playlist_list = playlist_features.index.values
# Set desired number of clusters
n_clusters = int(np.sqrt(len(playlist_features)))
print('Making clusters')
# Make clusters
kmeans = KMeans(n_clusters=n_clusters, verbose=0, algorithm='auto')
kmeans.fit(playlist_features)
print('Saving clusters')
# Saving the clusters
pickle.dump(kmeans, open('./kmeans_cluster_train.pkl', 'wb'))
cluster_centers = kmeans.cluster_centers_
np.savetxt('./kmeans_cluster_centers_train.csv', cluster_centers, delimiter=',')
# Saving the cluster label for each playlist in train (e.g., for track frequency table by cluster)
cluster_labels = kmeans.labels_
playlist_cluster_labels = np.column_stack((playlist_list, cluster_labels))
np.savetxt('./playlist_cluster_labels_train.csv', playlist_cluster_labels, delimiter=',', fmt='%i')
| [
"pandas.read_csv",
"sklearn.cluster.KMeans",
"numpy.savetxt",
"numpy.column_stack"
] | [((144, 234), 'pandas.read_csv', 'pd.read_csv', (['"""../data/playlist_features_with_artists_train.csv"""'], {'index_col': '(0)', 'header': '(0)'}), "('../data/playlist_features_with_artists_train.csv', index_col=0,\n header=0)\n", (155, 234), True, 'import pandas as pd\n'), ((413, 471), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'verbose': '(0)', 'algorithm': '"""auto"""'}), "(n_clusters=n_clusters, verbose=0, algorithm='auto')\n", (419, 471), False, 'from sklearn.cluster import KMeans\n'), ((655, 740), 'numpy.savetxt', 'np.savetxt', (['"""./kmeans_cluster_centers_train.csv"""', 'cluster_centers'], {'delimiter': '""","""'}), "('./kmeans_cluster_centers_train.csv', cluster_centers, delimiter=','\n )\n", (665, 740), True, 'import numpy as np\n'), ((894, 942), 'numpy.column_stack', 'np.column_stack', (['(playlist_list, cluster_labels)'], {}), '((playlist_list, cluster_labels))\n', (909, 942), True, 'import numpy as np\n'), ((943, 1046), 'numpy.savetxt', 'np.savetxt', (['"""./playlist_cluster_labels_train.csv"""', 'playlist_cluster_labels'], {'delimiter': '""","""', 'fmt': '"""%i"""'}), "('./playlist_cluster_labels_train.csv', playlist_cluster_labels,\n delimiter=',', fmt='%i')\n", (953, 1046), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from keras_nmf import NMFModel
class TestFitRandom(unittest.TestCase):
def test_decrease_loss(self):
nmf_model = NMFModel(99, 7, 4)
nmf_model.compile_model(learning_rate=0.5)
ii = np.random.randint(0, 99, 128)
jj = np.random.randint(0, 99, (128, 7))
y = np.random.uniform(1, 6, (128, 7, 1))
weights = np.random.uniform(1e-5, 1, (128, 7)).astype('float32')
history = nmf_model.fit(ii, jj, y, weights, epochs=100).history
self.assertLess(history['loss'][-1], history['loss'][0])
def test_overfit(self):
np.random.seed(1692)
n = 16
k = 8
n_neighbors = 4
n_pairs = 9
nmf_model = NMFModel(n, n_pairs, k)
nmf_model.compile_model(learning_rate=0.25)
latent = np.random.uniform(0, 4, size=(n, int(k / 2))) * np.random.randint(0, 2, (n, int(k / 2)))
X = latent @ latent.T
ii = np.arange(0, n).astype('int32')
jj = np.repeat(np.random.choice(n, n_pairs, replace=False)[None, :], n, 0)
y = X[ii[:, None], jj, None]
history = nmf_model.fit(ii, jj, y, epochs=1000, masking_weights=None).history
nearest_neighbors = nmf_model.get_nearest_neighbors(n_neighbors=n_neighbors)
learned_w = nmf_model.W.get_weights()[0]
self.assertGreater(len(learned_w[learned_w == 0]), 0)
self.assertLess(history['loss'][-1], 0.05)
self.assertEqual(len(nearest_neighbors), n)
for ii in range(n):
self.assertIn(ii, nearest_neighbors)
self.assertEqual(len(nearest_neighbors[ii]), n_neighbors)
| [
"numpy.random.uniform",
"numpy.random.seed",
"keras_nmf.NMFModel",
"numpy.random.randint",
"numpy.arange",
"numpy.random.choice"
] | [((164, 182), 'keras_nmf.NMFModel', 'NMFModel', (['(99)', '(7)', '(4)'], {}), '(99, 7, 4)\n', (172, 182), False, 'from keras_nmf import NMFModel\n'), ((248, 277), 'numpy.random.randint', 'np.random.randint', (['(0)', '(99)', '(128)'], {}), '(0, 99, 128)\n', (265, 277), True, 'import numpy as np\n'), ((291, 325), 'numpy.random.randint', 'np.random.randint', (['(0)', '(99)', '(128, 7)'], {}), '(0, 99, (128, 7))\n', (308, 325), True, 'import numpy as np\n'), ((338, 374), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(6)', '(128, 7, 1)'], {}), '(1, 6, (128, 7, 1))\n', (355, 374), True, 'import numpy as np\n'), ((623, 643), 'numpy.random.seed', 'np.random.seed', (['(1692)'], {}), '(1692)\n', (637, 643), True, 'import numpy as np\n'), ((737, 760), 'keras_nmf.NMFModel', 'NMFModel', (['n', 'n_pairs', 'k'], {}), '(n, n_pairs, k)\n', (745, 760), False, 'from keras_nmf import NMFModel\n'), ((393, 430), 'numpy.random.uniform', 'np.random.uniform', (['(1e-05)', '(1)', '(128, 7)'], {}), '(1e-05, 1, (128, 7))\n', (410, 430), True, 'import numpy as np\n'), ((964, 979), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (973, 979), True, 'import numpy as np\n'), ((1019, 1062), 'numpy.random.choice', 'np.random.choice', (['n', 'n_pairs'], {'replace': '(False)'}), '(n, n_pairs, replace=False)\n', (1035, 1062), True, 'import numpy as np\n')] |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: train.py
@time: 2019-04-19 11:52
"""
import pandas as pd
import numpy as np
from mayiutils.file_io.pickle_wrapper import PickleWrapper as picklew
from mayiutils.algorithm.algorithmset.calcPearson import calcPearson
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report, f1_score
import xgboost
import itertools
# from feature_selector import FeatureSelector
import lightgbm as lgb
if __name__ == '__main__':
mode = 9
df = picklew.loadFromFile('train_data2.pkl')
print(df.info())
# print(df.head())
X = df.values
print(X.shape)
y = picklew.loadFromFile('label.pkl')
# print(y.value_counts())
# y = y[:, np.newaxis]
# print(list(y))
y = np.array(list(y))
if mode == 9:
"""
结果评估
"""
pred = pd.read_csv('tt.csv', header=None)
# print(pred[:5])
df = pd.DataFrame()
df['score'] = pred.iloc[:, 1]
df['s0.4'] = 1
df.loc[df['score']<0.4, 's0.4']=0
print(df['s0.4'].value_counts())
print(classification_report(y, list(df['s0.4'])))
df['s0.5'] = 1
df.loc[df['score']<0.5, 's0.5']=0
print(df['s0.5'].value_counts())
print(classification_report(y, list(df['s0.5'])))
"""
0 421
1 141
Name: s0.5, dtype: int64
precision recall f1-score support
0 0.98 0.95 0.96 432
1 0.85 0.92 0.89 130
"""
df['s0.6'] = 1
df.loc[df['score']<0.6, 's0.6']=0
print(df['s0.6'].value_counts())
print(classification_report(y, list(df['s0.6'])))
df['s0.7'] = 1
df.loc[df['score']<0.7, 's0.7']=0
print(df['s0.7'].value_counts())
print(classification_report(y, list(df['s0.7'])))
if mode == 8:
"""
使用lightgbm, 输出概率
"""
### 数据转换
lgb_train = lgb.Dataset(X, y, free_raw_data=False)
lgb_eval = lgb.Dataset(X, y, reference=lgb_train, free_raw_data=False)
print('设置参数')
params = {
'boosting_type': 'gbdt',
'boosting': 'dart',
'objective': 'binary',
'metric': 'binary_logloss',
'learning_rate': 0.01,
'num_leaves': 25,
'max_depth': 3,
'max_bin': 10,
'min_data_in_leaf': 8,
'feature_fraction': 0.6,
'bagging_fraction': 1,
'bagging_freq': 0,
'lambda_l1': 0,
'lambda_l2': 0,
'min_split_gain': 0
}
print("开始训练")
gbm = lgb.train(params, # 参数字典
lgb_train, # 训练集
num_boost_round = 2000, # 迭代次数
valid_sets = lgb_eval, # 验证集
early_stopping_rounds = 30) # 早停系数
preds_offline = gbm.predict(X, num_iteration=gbm.best_iteration) # 输出概率
print(preds_offline)
pd.Series(preds_offline).to_csv('tt.csv')
if mode == 7:
"""
使用feature-selector得到特征重要性
"""
fs = FeatureSelector(data=df, labels=y)
fs.identify_collinear(correlation_threshold=0.975)
correlated_features = fs.ops['collinear']
print(correlated_features)
# fs.plot_collinear()
# fs.plot_collinear(plot_all=True)
print(fs.record_collinear)
# 4. Zero Importance Features
fs.identify_zero_importance(task='classification', eval_metric='auc',
n_iterations=10, early_stopping=True)
one_hot_features = fs.one_hot_features
base_features = fs.base_features
print('There are %d original features' % len(base_features))
print('There are %d one-hot features' % len(one_hot_features))
zero_importance_features = fs.ops['zero_importance']
print(zero_importance_features[:15])
# fs.plot_feature_importances(threshold=0.99, plot_n=12)
# print(fs.feature_importances)
# fs.feature_importances.to_csv('fs_rs.csv', encoding='gbk')
df_removed = fs.remove(methods=['collinear', 'zero_importance'])
print(df_removed.shape)
picklew.dump2File(df_removed, 'train_fs_removed.pkl')
if mode == 6:
"""
rf求特征重要性
"""
rfmodel = RandomForestClassifier(n_estimators=80)
rfmodel.fit(X, y)
rs = pd.Series(rfmodel.feature_importances_, index=df.columns).sort_values(ascending=False)
rs.to_csv('randomforest_rs.csv', encoding='gbk')
if mode == 5:
"""
计算皮尔逊相关系数
"""
r = np.apply_along_axis(lambda x: calcPearson(x, y), axis=0, arr=X)
print(r)
rs = pd.Series(r, index=df.columns).sort_values(ascending=False)
print(rs)
rs.to_csv('pearson_rs.csv', encoding='gbk')
if mode == 4:
"""
whole xgboost train
"""
model = xgboost.XGBClassifier(learning_rate=0.05, n_estimators=80, max_depth=7)
model.fit(X, y)
prediction = model.predict(X)
# print(prediction)
print(classification_report(y, prediction))
# f1 = f1_score(y, prediction)
print(model.feature_importances_)
rs = pd.Series(model.feature_importances_, index=df.columns).sort_values(ascending=False)
print(rs)
rs.to_csv('xgboost_rs.csv', encoding='gbk')
if mode == 3:
"""
xgboost
"""
skf = StratifiedKFold(n_splits=4)
lr = [0.05, 0.1, 0.2]
max_depth = [3, 5, 7]
n_estimators = [80, 100, 120]
# lr = [0.1, 0.12]
# max_depth = [5, 6, 7]
# n_estimators = [110, 120, 130]
for l, n, m in itertools.product(lr, n_estimators, max_depth):
print(l, n, m)
f1 = []
for train_index, test_index in skf.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
model = xgboost.XGBClassifier(learning_rate=l, n_estimators=n, max_depth=m)
model.fit(X_train, y_train)
prediction = model.predict(X_test)
# print(prediction)
# print(classification_report(y_test, prediction))
f1 = f1_score(y_test, prediction)
print(np.mean(f1))
if mode == 2:
"""
dt whole train
"""
clf = DecisionTreeClassifier(max_depth=4)
# 拟合模型
clf.fit(X, y)
y_p = clf.predict(X)
print(classification_report(y, y_p))
print(clf.feature_importances_)
rs = pd.Series(clf.feature_importances_, index=df.columns).sort_values(ascending=False)
print(rs)
rs.to_csv('dt_rs.csv', encoding='gbk')
# dot_data = tree.export_graphviz(clf, out_file=None,
# feature_names=df.columns,
# # class_names=iris.target_names,
# filled=True, rounded=True,
# special_characters=True)
# graph = graphviz.Source(dot_data)
# graph.view()
if mode == 1:
"""
dt
"""
skf = StratifiedKFold(n_splits=4)
max_depths = [3, 6, 9]
"""
0.7333333333333334
0.5925925925925926
0.5384615384615384
"""
max_depths = [2, 3, 4, 5]
"""
0.6575342465753423
0.7333333333333334
0.7540983606557378
0.6181818181818182
"""
for max_depth in max_depths:
f1 = []
for train_index, test_index in skf.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# 训练模型,限制树的最大深度
clf = DecisionTreeClassifier(max_depth=max_depth)
# 拟合模型
clf.fit(X_train, y_train)
y_p = clf.predict(X_test)
# print(classification_report(y_test, y_p))
f1 = f1_score(y_test, y_p)
print(np.mean(f1))
| [
"pandas.DataFrame",
"sklearn.ensemble.RandomForestClassifier",
"lightgbm.train",
"mayiutils.algorithm.algorithmset.calcPearson.calcPearson",
"pandas.read_csv",
"mayiutils.file_io.pickle_wrapper.PickleWrapper.loadFromFile",
"lightgbm.Dataset",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.c... | [((640, 679), 'mayiutils.file_io.pickle_wrapper.PickleWrapper.loadFromFile', 'picklew.loadFromFile', (['"""train_data2.pkl"""'], {}), "('train_data2.pkl')\n", (660, 679), True, 'from mayiutils.file_io.pickle_wrapper import PickleWrapper as picklew\n'), ((769, 802), 'mayiutils.file_io.pickle_wrapper.PickleWrapper.loadFromFile', 'picklew.loadFromFile', (['"""label.pkl"""'], {}), "('label.pkl')\n", (789, 802), True, 'from mayiutils.file_io.pickle_wrapper import PickleWrapper as picklew\n'), ((978, 1012), 'pandas.read_csv', 'pd.read_csv', (['"""tt.csv"""'], {'header': 'None'}), "('tt.csv', header=None)\n", (989, 1012), True, 'import pandas as pd\n'), ((1052, 1066), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1064, 1066), True, 'import pandas as pd\n'), ((2098, 2136), 'lightgbm.Dataset', 'lgb.Dataset', (['X', 'y'], {'free_raw_data': '(False)'}), '(X, y, free_raw_data=False)\n', (2109, 2136), True, 'import lightgbm as lgb\n'), ((2156, 2215), 'lightgbm.Dataset', 'lgb.Dataset', (['X', 'y'], {'reference': 'lgb_train', 'free_raw_data': '(False)'}), '(X, y, reference=lgb_train, free_raw_data=False)\n', (2167, 2215), True, 'import lightgbm as lgb\n'), ((2793, 2894), 'lightgbm.train', 'lgb.train', (['params', 'lgb_train'], {'num_boost_round': '(2000)', 'valid_sets': 'lgb_eval', 'early_stopping_rounds': '(30)'}), '(params, lgb_train, num_boost_round=2000, valid_sets=lgb_eval,\n early_stopping_rounds=30)\n', (2802, 2894), True, 'import lightgbm as lgb\n'), ((4330, 4383), 'mayiutils.file_io.pickle_wrapper.PickleWrapper.dump2File', 'picklew.dump2File', (['df_removed', '"""train_fs_removed.pkl"""'], {}), "(df_removed, 'train_fs_removed.pkl')\n", (4347, 4383), True, 'from mayiutils.file_io.pickle_wrapper import PickleWrapper as picklew\n'), ((4461, 4500), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(80)'}), '(n_estimators=80)\n', (4483, 4500), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5066, 5137), 'xgboost.XGBClassifier', 'xgboost.XGBClassifier', ([], {'learning_rate': '(0.05)', 'n_estimators': '(80)', 'max_depth': '(7)'}), '(learning_rate=0.05, n_estimators=80, max_depth=7)\n', (5087, 5137), False, 'import xgboost\n'), ((5601, 5628), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(4)'}), '(n_splits=4)\n', (5616, 5628), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((5850, 5896), 'itertools.product', 'itertools.product', (['lr', 'n_estimators', 'max_depth'], {}), '(lr, n_estimators, max_depth)\n', (5867, 5896), False, 'import itertools\n'), ((6585, 6620), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(4)'}), '(max_depth=4)\n', (6607, 6620), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((7409, 7436), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(4)'}), '(n_splits=4)\n', (7424, 7436), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((5242, 5278), 'sklearn.metrics.classification_report', 'classification_report', (['y', 'prediction'], {}), '(y, prediction)\n', (5263, 5278), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((6701, 6730), 'sklearn.metrics.classification_report', 'classification_report', (['y', 'y_p'], {}), '(y, y_p)\n', (6722, 6730), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((3101, 3125), 'pandas.Series', 'pd.Series', (['preds_offline'], {}), '(preds_offline)\n', (3110, 3125), True, 'import pandas as pd\n'), ((4540, 4597), 'pandas.Series', 'pd.Series', (['rfmodel.feature_importances_'], {'index': 'df.columns'}), '(rfmodel.feature_importances_, index=df.columns)\n', (4549, 4597), True, 'import pandas as pd\n'), ((4786, 4803), 'mayiutils.algorithm.algorithmset.calcPearson.calcPearson', 'calcPearson', (['x', 'y'], {}), '(x, y)\n', (4797, 4803), False, 'from mayiutils.algorithm.algorithmset.calcPearson import calcPearson\n'), ((4850, 4880), 'pandas.Series', 'pd.Series', (['r'], {'index': 'df.columns'}), '(r, index=df.columns)\n', (4859, 4880), True, 'import pandas as pd\n'), ((5374, 5429), 'pandas.Series', 'pd.Series', (['model.feature_importances_'], {'index': 'df.columns'}), '(model.feature_importances_, index=df.columns)\n', (5383, 5429), True, 'import pandas as pd\n'), ((6158, 6225), 'xgboost.XGBClassifier', 'xgboost.XGBClassifier', ([], {'learning_rate': 'l', 'n_estimators': 'n', 'max_depth': 'm'}), '(learning_rate=l, n_estimators=n, max_depth=m)\n', (6179, 6225), False, 'import xgboost\n'), ((6445, 6473), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'prediction'], {}), '(y_test, prediction)\n', (6453, 6473), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((6492, 6503), 'numpy.mean', 'np.mean', (['f1'], {}), '(f1)\n', (6499, 6503), True, 'import numpy as np\n'), ((6786, 6839), 'pandas.Series', 'pd.Series', (['clf.feature_importances_'], {'index': 'df.columns'}), '(clf.feature_importances_, index=df.columns)\n', (6795, 6839), True, 'import pandas as pd\n'), ((7990, 8033), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': 'max_depth'}), '(max_depth=max_depth)\n', (8012, 8033), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((8222, 8243), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_p'], {}), '(y_test, y_p)\n', (8230, 8243), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((8262, 8273), 'numpy.mean', 'np.mean', (['f1'], {}), '(f1)\n', (8269, 8273), True, 'import numpy as np\n')] |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
from arch.api import session
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.optim.gradient import hetero_linear_model_gradient
from federatedml.optim.gradient import hetero_lr_gradient_and_loss
from federatedml.secureprotol import PaillierEncrypt
class TestHeteroLogisticGradient(unittest.TestCase):
def setUp(self):
self.paillier_encrypt = PaillierEncrypt()
self.paillier_encrypt.generate_key()
# self.hetero_lr_gradient = HeteroLogisticGradient(self.paillier_encrypt)
self.hetero_lr_gradient = hetero_lr_gradient_and_loss.Guest()
size = 10
self.en_wx = session.parallelize([self.paillier_encrypt.encrypt(i) for i in range(size)], partition=48)
# self.en_wx = session.parallelize([self.paillier_encrypt.encrypt(i) for i in range(size)])
self.en_sum_wx_square = session.parallelize([self.paillier_encrypt.encrypt(np.square(i)) for i in range(size)],
partition=48)
self.wx = np.array([i for i in range(size)])
self.w = self.wx / np.array([1 for _ in range(size)])
self.data_inst = session.parallelize(
[Instance(features=np.array([1 for _ in range(size)]), label=pow(-1, i % 2)) for i in range(size)],
partition=48)
# test fore_gradient
self.fore_gradient_local = [-0.5, 0.75, 0, 1.25, 0.5, 1.75, 1, 2.25, 1.5, 2.75]
# test gradient
self.gradient = [1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125]
self.gradient_fit_intercept = [1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125]
self.loss = 4.505647
def test_compute_partition_gradient(self):
fore_gradient = self.en_wx.join(self.data_inst, lambda wx, d: 0.25 * wx - 0.5 * d.label)
sparse_data = self._make_sparse_data()
for fit_intercept in [True, False]:
dense_result = hetero_linear_model_gradient.compute_gradient(self.data_inst, fore_gradient, fit_intercept)
dense_result = [self.paillier_encrypt.decrypt(iterator) for iterator in dense_result]
if fit_intercept:
self.assertListEqual(dense_result, self.gradient_fit_intercept)
else:
self.assertListEqual(dense_result, self.gradient)
sparse_result = hetero_linear_model_gradient.compute_gradient(sparse_data, fore_gradient, fit_intercept)
sparse_result = [self.paillier_encrypt.decrypt(iterator) for iterator in sparse_result]
self.assertListEqual(dense_result, sparse_result)
def _make_sparse_data(self):
def trans_sparse(instance):
dense_features = instance.features
indices = [i for i in range(len(dense_features))]
sparse_features = SparseVector(indices=indices, data=dense_features, shape=len(dense_features))
return Instance(inst_id=None,
features=sparse_features,
label=instance.label)
return self.data_inst.mapValues(trans_sparse)
if __name__ == "__main__":
session.init("1111")
unittest.main()
session.stop()
| [
"unittest.main",
"federatedml.feature.instance.Instance",
"federatedml.optim.gradient.hetero_lr_gradient_and_loss.Guest",
"arch.api.session.init",
"arch.api.session.stop",
"numpy.square",
"federatedml.optim.gradient.hetero_linear_model_gradient.compute_gradient",
"federatedml.secureprotol.PaillierEncr... | [((3850, 3870), 'arch.api.session.init', 'session.init', (['"""1111"""'], {}), "('1111')\n", (3862, 3870), False, 'from arch.api import session\n'), ((3875, 3890), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3888, 3890), False, 'import unittest\n'), ((3895, 3909), 'arch.api.session.stop', 'session.stop', ([], {}), '()\n', (3907, 3909), False, 'from arch.api import session\n'), ((1088, 1105), 'federatedml.secureprotol.PaillierEncrypt', 'PaillierEncrypt', ([], {}), '()\n', (1103, 1105), False, 'from federatedml.secureprotol import PaillierEncrypt\n'), ((1267, 1302), 'federatedml.optim.gradient.hetero_lr_gradient_and_loss.Guest', 'hetero_lr_gradient_and_loss.Guest', ([], {}), '()\n', (1300, 1302), False, 'from federatedml.optim.gradient import hetero_lr_gradient_and_loss\n'), ((2666, 2761), 'federatedml.optim.gradient.hetero_linear_model_gradient.compute_gradient', 'hetero_linear_model_gradient.compute_gradient', (['self.data_inst', 'fore_gradient', 'fit_intercept'], {}), '(self.data_inst, fore_gradient,\n fit_intercept)\n', (2711, 2761), False, 'from federatedml.optim.gradient import hetero_linear_model_gradient\n'), ((3078, 3170), 'federatedml.optim.gradient.hetero_linear_model_gradient.compute_gradient', 'hetero_linear_model_gradient.compute_gradient', (['sparse_data', 'fore_gradient', 'fit_intercept'], {}), '(sparse_data, fore_gradient,\n fit_intercept)\n', (3123, 3170), False, 'from federatedml.optim.gradient import hetero_linear_model_gradient\n'), ((3635, 3705), 'federatedml.feature.instance.Instance', 'Instance', ([], {'inst_id': 'None', 'features': 'sparse_features', 'label': 'instance.label'}), '(inst_id=None, features=sparse_features, label=instance.label)\n', (3643, 3705), False, 'from federatedml.feature.instance import Instance\n'), ((1618, 1630), 'numpy.square', 'np.square', (['i'], {}), '(i)\n', (1627, 1630), True, 'import numpy as np\n')] |
"""
<NAME>
"""
import numpy as np
import pandas as pd
from gym import spaces
import matplotlib.pyplot as plt
from scipy import stats
from recsim import document
from recsim import user
from recsim.choice_model import MultinomialLogitChoiceModel,AbstractChoiceModel
from recsim.simulator import environment
from recsim.simulator.environment import SingleUserEnvironment
from recsim.simulator import recsim_gym
import os
import collections
import random
import data_preprocess
class CustomSingleUserEnviroment(SingleUserEnvironment):
"""Class to represent the custome environment with one user.
Attributes:
user_model: An instantiation of AbstractUserModel that represents a user.
document_sampler: An instantiation of AbstractDocumentSampler.
num_candidates: An integer representing the size of the candidate_set.
slate_size: An integer representing the slate size.
candidate_set: An instantiation of CandidateSet.
num_clusters: An integer representing the number of document clusters.
"""
def __init__(self,user_model,
document_sampler,
num_candidates,
slate_size,
resample_documents=True,
offline_mode = False,select_subset_func = None):
"""
:param user_model:
:param document_sampler:
:param num_candidates:
:param slate_size:
:param resample_documents:
:param offline_mode:
:param select_subset_func:
"""
super(CustomSingleUserEnviroment, self).__init__(user_model,
document_sampler,
num_candidates,
slate_size,
resample_documents)
self.offline_mode = offline_mode
self.select_subset_func = select_subset_func
self.constant_num_candidates = num_candidates
def check_num_candidate(self):
if (self._num_candidates > self._document_sampler.size()):
self._num_candidates = self._document_sampler.size()
def reset(self):
"""Resets the environment and return the first observation.
Returns:
user_obs: An array of floats representing observations of the user's
current state
doc_obs: An OrderedDict of document observations keyed by document ids
"""
self._user_model.reset()
user_obs = self._user_model.create_observation()
self._num_candidates = self.constant_num_candidates
# print("before num candidate ",self._num_candidates)
# only use a user's history record as recommendable items
if self.offline_mode and self.select_subset_func is not None:
user_history_records = self.select_subset_func(user_obs['user_id'])
if (len(user_history_records)<self._slate_size):
print("there is problem with current user : ",user_obs['user_id'])
self._document_sampler.update_dataset(user_history_records,self._num_candidates)
self.check_num_candidate()
self._do_resample_documents()
# print("after num candidate ", self._num_candidates)
# print("recommendable doc size: ",self._document_sampler.dataset.shape[0])
elif (not self.offline_mode and self._resample_documents):
self.check_num_candidate()
self._do_resample_documents()
self._current_documents = collections.OrderedDict(
self._candidate_set.create_observation())
return (user_obs, self._current_documents)
def step(self, slate):
"""Executes the action, returns next state observation and reward.
Args:
slate: An integer array of size slate_size, where each element is an index
into the set of current_documents presented
Returns:
user_obs: A gym observation representing the user's next state
doc_obs: A list of observations of the documents
responses: A list of AbstractResponse objects for each item in the slate
done: A boolean indicating whether the episode has terminated
"""
assert (len(slate) <= self._slate_size
), 'Received unexpectedly large slate size: expecting %s, got %s' % (
self._slate_size, len(slate))
# Get the documents associated with the slate
doc_ids = list(self._current_documents) # pytype: disable=attribute-error
mapped_slate = [doc_ids[x] for x in slate]
documents = self._candidate_set.get_documents(mapped_slate)
# Simulate the user's response
responses = self._user_model.simulate_response(documents)
# Update the user's state.
self._user_model.update_state(documents, responses)
# Update the documents' state.
self._document_sampler.update_state(documents, responses,self._resample_documents)
# Obtain next user state observation.
user_obs = self._user_model.create_observation()
# Check if reaches a terminal state and return.
done = self._user_model.is_terminal(remaind_recommendable_size=min(self._candidate_set.size(),self._document_sampler.size()),slate_size=self.slate_size)
# #update candidate set based on the response
self.update_candidate_set(documents, responses)
# Optionally, recreate the candidate set to simulate candidate
# generators for the next query.
if self._resample_documents:
self.check_num_candidate()
self._do_resample_documents()
# Create observation of candidate set.
self._current_documents = collections.OrderedDict(
self._candidate_set.create_observation())
return (user_obs, self._current_documents, responses, done)
def update_candidate_set(self,documens,responses):
for doc, response in zip(documens, responses):
if response.click:
self._candidate_set.remove_document(doc)
class LTSDocument(document.AbstractDocument):
def __init__(self, doc_id, embedding_features):
# print("input doc_id ",doc_id)
self.embedding_features = embedding_features
# doc_id is an integer representing the unique ID of this document
super(LTSDocument, self).__init__(doc_id)
def create_observation(self):
return {'doc_id':self._doc_id,'embedding_features':self.embedding_features}
def observation_space(self):
# return spaces.Box(shape=(len(self.embedding_features),), dtype=np.float32, low=0.0, high=1.0)
return spaces.Dict({
'doc_id': spaces.Discrete(100000),
'embedding_features':
spaces.Box(shape=(len(self.embedding_features),), dtype=np.float32, low=0.0, high=1.0)})
def __str__(self):
return "Document {} ".format(self._doc_id)
class LTSDocumentSampler(document.AbstractDocumentSampler):
def __init__(self, dataset ,num_candidate,doc_ctor=LTSDocument, **kwargs):
super(LTSDocumentSampler, self).__init__(doc_ctor, **kwargs)
self.dataset = dataset
self.count = 0
# self.num_candidate = num_candidate
self.max_num_recommendable_ids =num_candidate
self.recommendable_doc_ids = self.dataset[self.dataset.columns[0]].values
self.generate_list_recommendable_items()
def generate_list_recommendable_items(self):
doc_ids = self.dataset[self.dataset.columns[0]].values
self.max_num_recommendable_ids = min(self.max_num_recommendable_ids,len(doc_ids))
recommendable_doc_ids = np.random.choice(doc_ids, self.max_num_recommendable_ids, replace=False)
self.recommendable_doc_ids = recommendable_doc_ids
self.count = 0
def sample_document(self):
columns_id = self.dataset.columns[0]
current_item_index = self.recommendable_doc_ids[self.count]
current_item = self.dataset[self.dataset[columns_id] == current_item_index].values.flatten()
if len(current_item) == 0:
print("can not find this item in the dataset ")
print(" the current ids in the dataset : ",self.dataset[columns_id].values)
print("the current ids in recommendable list : ",self.recommendable_doc_ids)
doc_features = {}
doc_features['doc_id'] = int(current_item[0])
doc_features['embedding_features'] = current_item[1:]
self.count = (self.count+1)
# generate a new list of recommendable items after every resample step
if self.count == self.max_num_recommendable_ids:
self.generate_list_recommendable_items()
return self._doc_ctor(**doc_features)
def update_state(self, documents, responses,num_candidate = None,resampled = True):
"""Update document state (if needed) given user's (or users') responses.
remove those document (item spaces )
"""
#remove the documents that user selected in recommendable dataset
list_id = list()
id_col = self.dataset.columns[0]
for index in range(len(documents)):
doc = documents[index]
response = responses[index]
if response.click:
doc_obs = doc.create_observation()
list_id.append(doc_obs['doc_id'])
self.dataset = self.dataset[~self.dataset[id_col].isin(list_id)]
# print("new dataset size in doc sampler update : ",len(self.dataset))
# print(self.dataset[id_col].values)
# #remove the documets in the recommendable list
# self.recommendable_doc_ids
#start again at the beginning of the current recommendable list
# self.count = 0
if num_candidate is not None:
self.num_candidate = num_candidate
if resampled:
self.generate_list_recommendable_items()
#TODO: deal with case when we have to remove items in recommendable list IDs, not resample
def update_dataset(self,dataset,num_candidate= None):
self.dataset = dataset
# print("total number of items belong to this user : ",len(self.dataset))
self.count = 0
if num_candidate is not None:
self.max_num_recommendable_ids = num_candidate
self.generate_list_recommendable_items()
def size(self):
return len(self.dataset)
class LTSUserState(user.AbstractUserState):
def __init__(self, memory_discount,time_budget,user_info,offline_mode = True, offline_data = None ,corpus_features_dim = 30,history_record_size = 10):
## Transition model parameters
##############################
# self.memory_discount = memory_discount
# self.sensitivity = sensitivity
# self.innovation_stddev = innovation_stddev
## State variables
self.time_budget = time_budget
self.satisfaction = 0
self.corpus_features_dim = corpus_features_dim
self.history_record_size = history_record_size
self.user_id = user_info['userId']
self.user_recent_past_record_ids = user_info['record_ids']
self.user_recent_past_record = user_info['past_record']
# update the user recent history with new recommendation from left to right
self.state_update_index = 0
self.offline_mode = offline_mode
self.user_offline_record = offline_data
def create_observation(self):
"""User's state is not observable."""
return {'user_id':self.user_id,'record_ids': np.array(self.user_recent_past_record_ids), 'past_record': np.array(self.user_recent_past_record)}
def observation_space(self):
return spaces.Dict({
'user_id':spaces.Discrete(100000),
'record_ids':
spaces.Box(shape=(self.history_record_size,), dtype=np.int8, low=0, high=1000000),
'past_record':
spaces.Box(shape=(self.history_record_size, self.corpus_features_dim), dtype=np.float32, low=-10.0, high=10.0)
})
# scoring function for use in the choice model -- the user is more likely to
def score_document(self, doc_obs):
doc_id = doc_obs['doc_id']
embedding_features = doc_obs['embedding_features']
if self.offline_mode:
match_record_rating = self.user_offline_record[(self.user_offline_record['userId'] == self.user_id) & (self.user_offline_record['movieId'] == doc_id)]['rating']
if (len(match_record_rating) == 1):
# print("score properly")
# print(match_record_rating.values)
return match_record_rating.values
return 0
#jsut sum all the features and avergae for now
# likely = np.sum(embedding_features)
# return 1 - likely
def update_time_buget(self):
self.time_budget -=1
def is_offline(self):
return self.offline_mode
def get_user_offline_record(self):
return self.user_offline_record
def update_user_history_record(self,new_doc_id, new_doc_feature):
n = len(self.user_recent_past_record_ids)
for index in range(1,n):
self.user_recent_past_record_ids[index-1] = self.user_recent_past_record_ids[index]
self.user_recent_past_record[index-1] = self.user_recent_past_record[index]
self.user_recent_past_record_ids[n-1] = new_doc_id
self.user_recent_past_record[n - 1] = new_doc_feature
class LTSStaticUserSampler(user.AbstractUserSampler):
def __init__(self, user_recent_history_data,corpus_data,offline_data = None,offline_mode = True,memory_discount=0.9,
time_budget=4,history_size = 10,doc_feature_size = 30,
user_ctor=LTSUserState,seed = 0,random = True,time_budget_range = None,
**kwargs):
super(LTSStaticUserSampler, self).__init__(user_ctor, **kwargs)
self.corpus_data = corpus_data
self.user_recent_history_data = user_recent_history_data
self.posible_user_ids = np.unique(self.user_recent_history_data['userId'].values)
self.seed = seed
self.doc_feature_size = doc_feature_size
self.history_size = history_size
self.offline_mode = offline_mode
self.offline_data = offline_data
self.time_budget_range = time_budget_range
self._state_parameters = {'memory_discount': memory_discount,
'time_budget': time_budget,
'offline_mode':self.offline_mode,
'offline_data':self.offline_data
}
self.random = random
self.current_user_index = 0
def sample_user(self):
if (self.random):
pick_user_id = np.random.choice(self.posible_user_ids, 1)[0]
else:
#go through all of a user in the database ( shuffle everytime we go through the list
if (self.current_user_index == 0):
np.random.shuffle(self.posible_user_ids)
pick_user_id = self.posible_user_ids[self.current_user_index]
self.current_user_index = (self.current_user_index+1)%len(self.posible_user_ids)
# pick one user out of list of possible user to perform the study
history_data = self.user_recent_history_data[self.user_recent_history_data['userId'] == pick_user_id].sort_values(by=['timestamp'])
# create a matrix for user history doc features
past_record = np.zeros((self.history_size,self.doc_feature_size))
past_record_ids = history_data['movieId'].values
for index in range(self.history_size):
current_move_id = past_record_ids[index]
current_movie_embedding_features = self.corpus_data[self.corpus_data['id'] == current_move_id]
past_record[index] = current_movie_embedding_features.values[0,1:]
user_info = {
'userId': pick_user_id,
'record_ids': past_record_ids,
'past_record': past_record
}
if self.time_budget_range is not None:
random_time_budget = random.randint(self.time_budget_range[0],self.time_budget_range[1]+1)
self._state_parameters['time_budget'] = random_time_budget
self._state_parameters['user_info'] = user_info
return self._user_ctor(**self._state_parameters)
class LTSResponse(user.AbstractResponse):
# The maximum degree of engagement.
MAX_ENGAGEMENT_MAGNITUDE = 100.0
def __init__(self, click=False, engagement=0.0,rating = -1):
self.click = click
self.engagement = engagement
self.rating = rating
def create_observation(self):
return {'click': int(self.click), 'engagement': np.array(self.engagement),'rating':int(self.rating)}
@classmethod
def response_space(cls):
# `engagement` feature range is [0, MAX_ENGAGEMENT_MAGNITUDE]
return spaces.Dict({
'click':
spaces.Discrete(2),
'engagement':
spaces.Box(
low=0.0,
high=cls.MAX_ENGAGEMENT_MAGNITUDE,
shape=tuple(),
dtype=np.float32),
'rating':
spaces.Discrete(6),
})
def update_response(self,click,rating,engagement):
self.click = click
self.engagement = engagement
self.rating = rating
class AbstractRatingModel(object):
"""Abstract class to represent the user rating model.
Each user has a rating model.
"""
_scores = None
def score_documents(self, user_state, doc_obs):
"""Computes unnormalized scores of documents in the slate given user state.
Args:
user_state: An instance of AbstractUserState.
doc_obs: A numpy array that represents the observation of all documents in
the slate.
Attributes:
scores: A numpy array that stores the scores of all documents.
"""
@property
def scores(self):
return self._scores
def choose_item(self,rating_pivot):
"""Returns selected index of documents in the slate such that the rating >= rating_pivot.
Returns:
selected_indexs: list og integer indicating which items were chosen, or None if
there were no items.
"""
class HistoryChoiceModel(AbstractRatingModel):
"""A historical choice model.
Args:
choice_features: a dict that stores the features used in choice model:
`no_click_mass`: a float indicating the mass given to a no click option.
"""
def __init__(self, choice_features):
self._no_click_mass = choice_features.get('no_click_mass', -float('Inf'))
def score_documents(self, user_state, doc_obs):
scores = np.array([])
if user_state.is_offline():
user_id = user_state.create_observation()['user_id']
for doc in doc_obs:
scores = np.append(scores, user_state.score_document(doc))
self._scores = scores
def choose_item(self,rating_pivot):
select_index_list = np.argwhere(self._scores >= rating_pivot)
return select_index_list
class UserModel(user.AbstractUserModel):
def __init__(self, sampler, offline_mode = True ,rating_pivot = 4,slate_size = 10,response_ctor = LTSResponse):
super(UserModel, self).__init__(response_ctor,sampler,slate_size)
self.offline_mode = offline_mode
self.rating_pivot = rating_pivot
if (self.offline_mode):
# print("use history model")
self.response_model = HistoryChoiceModel({})
else:
self.response_model = MultinomialLogitChoiceModel({})
def simulate_response(self, slate_documents):
# List of empty responses
responses = [self._response_model_ctor() for _ in slate_documents]
# Get click from of choice model.
self.response_model.score_documents(
self._user_state, [doc.create_observation() for doc in slate_documents])
scores = self.response_model.scores
# print("possible scores : ",scores)
for index in range(len(scores)):
self.generate_response(slate_documents[index],responses[index],scores[index])
return responses
def update_state(self, slate_documents, responses):
"""
:param slate_documents: doc object
:param responses: response object
:return:
"""
# print("current slate documents to update : ",slate_documents[0])
# print("current responses to update : ",responses[0].create_observation())
for doc, response in zip(slate_documents, responses):
if response.click:
self._user_state.satisfaction = 0
doc_obs = doc.create_observation()
doc_id = doc_obs['doc_id']
doc_features = doc_obs['embedding_features']
self._user_state.update_user_history_record(doc_id,doc_features)
self._user_state.update_time_buget()
def is_terminal(self,remaind_recommendable_size = -1,slate_size = -1):
"""Returns a boolean indicating if the session is over."""
if (remaind_recommendable_size < slate_size or self._user_state.time_budget <= 0):
return True
return False
def generate_response(self, doc, response,rating_score):
clicked = False
if (rating_score>= self.rating_pivot):
clicked = True
# print("response click : ",clicked)
engagement = 0
response.update_response(clicked,rating_score,engagement)
def clicked_engagement_reward(responses):
"""
this function will calculate the reward
:param responses:
:return:
"""
reward = 0.0
total = len(responses)
for response in responses:
reward = reward+((response.rating-3)/2.0)
#normalize rating between -1 and 1
# reward = reward
return reward/total
def select_dataset(dataset,user_data):
def user_history_documents(user_id):
history_data = user_data[user_data['userId'] == user_id]
past_record_ids = history_data['movieId'].values
# print('user past : ',past_record_ids)
new_data = dataset[dataset['id'].isin(past_record_ids)]
# print(len(new_data))
return new_data
return user_history_documents
def test_custom_env():
path = '../master_capston/the-movies-dataset/'
features_embedding_movies = pd.read_csv(os.path.join(path, 'movie_embedding_features.csv'))
# this mean the number of items in the recommendation return from the agent
slate_size = 3
# i am assuming this number mean the # of possible items to send to the agent for recommend for each slate
num_candidates = 11
format_data = data_preprocess.load_data(path)
features_embedding_movies = pd.read_csv(os.path.join(path, 'movie_embedding_features.csv'))
positive_user_ids, positive_history_data = data_preprocess.get_user_positive(format_data)
# generate train and test set
train_set, test_set = data_preprocess.generate_train_test_data(positive_history_data)
users_history_data, train_set = data_preprocess.create_recent_history(train_set,
embedding_features_data=features_embedding_movies)
offline_mode = True
rating_pivot = 4
sampler = LTSDocumentSampler(dataset=features_embedding_movies,num_candidate=num_candidates)
user_sampler = LTSStaticUserSampler(users_history_data ,features_embedding_movies,offline_data=test_set,offline_mode=offline_mode)
# need to handle where we update dataset with num candidate< available
func = select_dataset(features_embedding_movies, test_set)
LTSUserModel = UserModel(user_sampler, offline_mode=offline_mode,rating_pivot=rating_pivot,slate_size=slate_size, response_ctor=LTSResponse)
ltsenv = CustomSingleUserEnviroment(
LTSUserModel,
sampler,
num_candidates,
slate_size,
resample_documents=False, offline_mode=True, select_subset_func=func)
lts_gym_env = recsim_gym.RecSimGymEnv(ltsenv, clicked_engagement_reward)
observation_0 = lts_gym_env.reset()
print("current user : ",observation_0['user']['user_id'])
print("current history of user items :", observation_0['user']['record_ids'])
print("candidate recommend docs ids : ", observation_0['doc'].keys())
done = False
while(not done):
# for i in range(4):
recommendation_slate_0 = [0, 1, 2]
observation_1, reward, done, _ = lts_gym_env.step(recommendation_slate_0)
print("response : ", observation_1['response'])
print("reward : ",reward)
print("next history of recommend items :", observation_1['user']['record_ids'])
print("total remaind candidate items to recommend : ",len(observation_1['doc'].keys()))
print("docs ids : ", observation_1['doc'].keys())
# test_custom_env() | [
"data_preprocess.get_user_positive",
"random.randint",
"numpy.random.shuffle",
"data_preprocess.load_data",
"numpy.zeros",
"gym.spaces.Discrete",
"numpy.argwhere",
"numpy.array",
"recsim.choice_model.MultinomialLogitChoiceModel",
"gym.spaces.Box",
"numpy.random.choice",
"data_preprocess.create... | [((22816, 22847), 'data_preprocess.load_data', 'data_preprocess.load_data', (['path'], {}), '(path)\n', (22841, 22847), False, 'import data_preprocess\n'), ((22991, 23037), 'data_preprocess.get_user_positive', 'data_preprocess.get_user_positive', (['format_data'], {}), '(format_data)\n', (23024, 23037), False, 'import data_preprocess\n'), ((23098, 23161), 'data_preprocess.generate_train_test_data', 'data_preprocess.generate_train_test_data', (['positive_history_data'], {}), '(positive_history_data)\n', (23138, 23161), False, 'import data_preprocess\n'), ((23198, 23302), 'data_preprocess.create_recent_history', 'data_preprocess.create_recent_history', (['train_set'], {'embedding_features_data': 'features_embedding_movies'}), '(train_set, embedding_features_data=\n features_embedding_movies)\n', (23235, 23302), False, 'import data_preprocess\n'), ((24154, 24212), 'recsim.simulator.recsim_gym.RecSimGymEnv', 'recsim_gym.RecSimGymEnv', (['ltsenv', 'clicked_engagement_reward'], {}), '(ltsenv, clicked_engagement_reward)\n', (24177, 24212), False, 'from recsim.simulator import recsim_gym\n'), ((7594, 7666), 'numpy.random.choice', 'np.random.choice', (['doc_ids', 'self.max_num_recommendable_ids'], {'replace': '(False)'}), '(doc_ids, self.max_num_recommendable_ids, replace=False)\n', (7610, 7666), True, 'import numpy as np\n'), ((13992, 14049), 'numpy.unique', 'np.unique', (["self.user_recent_history_data['userId'].values"], {}), "(self.user_recent_history_data['userId'].values)\n", (14001, 14049), True, 'import numpy as np\n'), ((15471, 15523), 'numpy.zeros', 'np.zeros', (['(self.history_size, self.doc_feature_size)'], {}), '((self.history_size, self.doc_feature_size))\n', (15479, 15523), True, 'import numpy as np\n'), ((18782, 18794), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (18790, 18794), True, 'import numpy as np\n'), ((19105, 19146), 'numpy.argwhere', 'np.argwhere', (['(self._scores >= rating_pivot)'], {}), '(self._scores >= rating_pivot)\n', (19116, 19146), True, 'import numpy as np\n'), ((22509, 22559), 'os.path.join', 'os.path.join', (['path', '"""movie_embedding_features.csv"""'], {}), "(path, 'movie_embedding_features.csv')\n", (22521, 22559), False, 'import os\n'), ((22892, 22942), 'os.path.join', 'os.path.join', (['path', '"""movie_embedding_features.csv"""'], {}), "(path, 'movie_embedding_features.csv')\n", (22904, 22942), False, 'import os\n'), ((11506, 11548), 'numpy.array', 'np.array', (['self.user_recent_past_record_ids'], {}), '(self.user_recent_past_record_ids)\n', (11514, 11548), True, 'import numpy as np\n'), ((11565, 11603), 'numpy.array', 'np.array', (['self.user_recent_past_record'], {}), '(self.user_recent_past_record)\n', (11573, 11603), True, 'import numpy as np\n'), ((16098, 16170), 'random.randint', 'random.randint', (['self.time_budget_range[0]', '(self.time_budget_range[1] + 1)'], {}), '(self.time_budget_range[0], self.time_budget_range[1] + 1)\n', (16112, 16170), False, 'import random\n'), ((16724, 16749), 'numpy.array', 'np.array', (['self.engagement'], {}), '(self.engagement)\n', (16732, 16749), True, 'import numpy as np\n'), ((19676, 19707), 'recsim.choice_model.MultinomialLogitChoiceModel', 'MultinomialLogitChoiceModel', (['{}'], {}), '({})\n', (19703, 19707), False, 'from recsim.choice_model import MultinomialLogitChoiceModel, AbstractChoiceModel\n'), ((6625, 6648), 'gym.spaces.Discrete', 'spaces.Discrete', (['(100000)'], {}), '(100000)\n', (6640, 6648), False, 'from gym import spaces\n'), ((11690, 11713), 'gym.spaces.Discrete', 'spaces.Discrete', (['(100000)'], {}), '(100000)\n', (11705, 11713), False, 'from gym import spaces\n'), ((11757, 11843), 'gym.spaces.Box', 'spaces.Box', ([], {'shape': '(self.history_record_size,)', 'dtype': 'np.int8', 'low': '(0)', 'high': '(1000000)'}), '(shape=(self.history_record_size,), dtype=np.int8, low=0, high=\n 1000000)\n', (11767, 11843), False, 'from gym import spaces\n'), ((11883, 11997), 'gym.spaces.Box', 'spaces.Box', ([], {'shape': '(self.history_record_size, self.corpus_features_dim)', 'dtype': 'np.float32', 'low': '(-10.0)', 'high': '(10.0)'}), '(shape=(self.history_record_size, self.corpus_features_dim),\n dtype=np.float32, low=-10.0, high=10.0)\n', (11893, 11997), False, 'from gym import spaces\n'), ((14748, 14790), 'numpy.random.choice', 'np.random.choice', (['self.posible_user_ids', '(1)'], {}), '(self.posible_user_ids, 1)\n', (14764, 14790), True, 'import numpy as np\n'), ((14968, 15008), 'numpy.random.shuffle', 'np.random.shuffle', (['self.posible_user_ids'], {}), '(self.posible_user_ids)\n', (14985, 15008), True, 'import numpy as np\n'), ((16956, 16974), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (16971, 16974), False, 'from gym import spaces\n'), ((17222, 17240), 'gym.spaces.Discrete', 'spaces.Discrete', (['(6)'], {}), '(6)\n', (17237, 17240), False, 'from gym import spaces\n')] |
import argparse
import os
from os.path import join
from pathflowai.utils import run_preprocessing_pipeline, generate_patch_pipeline, img2npy_, create_zero_mask
import click
import dask
import time
CONTEXT_SETTINGS = dict(help_option_names=['-h','--help'], max_content_width=90)
@click.group(context_settings= CONTEXT_SETTINGS)
@click.version_option(version='0.1')
def preprocessing():
pass
def output_if_exists(filename):
"""Returns file name if the file exists
Parameters
----------
filename : str
File in question.
Returns
-------
str
Filename.
"""
if os.path.exists(filename):
return filename
return None
@preprocessing.command()
@click.option('-npy', '--img2npy', is_flag=True, help='Image to numpy for faster read.', show_default=True)
@click.option('-b', '--basename', default='A01', help='Basename of patches.', type=click.Path(exists=False), show_default=True)
@click.option('-i', '--input_dir', default='./inputs/', help='Input directory for patches.', type=click.Path(exists=False), show_default=True)
@click.option('-a', '--annotations', default=[], multiple=True, help='Annotations in image in order.', type=click.Path(exists=False), show_default=True)
@click.option('-pr', '--preprocess', is_flag=True, help='Run preprocessing pipeline.', show_default=True)
@click.option('-pa', '--patches', is_flag=True, help='Add patches to SQL.', show_default=True)
@click.option('-t', '--threshold', default=0.05, help='Threshold to remove non-purple slides.', show_default=True)
@click.option('-ps', '--patch_size', default=224, help='Patch size.', show_default=True)
@click.option('-it', '--intensity_threshold', default=100., help='Intensity threshold to rate a pixel as non-white.', show_default=True)
@click.option('-g', '--generate_finetune_segmentation', is_flag=True, help='Generate patches for one segmentation mask class for targeted finetuning.', show_default=True)
@click.option('-tc', '--target_segmentation_class', default=0, help='Segmentation Class to finetune on, output patches to another db.', show_default=True)
@click.option('-tt', '--target_threshold', default=0., help='Threshold to include target for segmentation if saving one class.', show_default=True)
@click.option('-odb', '--out_db', default='./patch_info.db', help='Output patch database.', type=click.Path(exists=False), show_default=True)
@click.option('-am', '--adjust_mask', is_flag=True, help='Remove additional background regions from annotation mask.', show_default=True)
@click.option('-nn', '--n_neighbors', default=5, help='If adjusting mask, number of neighbors connectivity to remove.', show_default=True)
@click.option('-bp', '--basic_preprocess', is_flag=True, help='Basic preprocessing pipeline, annotation areas are not saved. Used for benchmarking tool against comparable pipelines', show_default=True)
@click.option('-ei', '--entire_image', is_flag=True, help='Store entire image in central db rather than patches.', show_default=True)
@click.option('-nz', '--no_zarr', is_flag=True, help='Don\'t save zarr format file.', show_default=True)
@click.option('-pka', '--pkl_annot', is_flag=True, help='Look for .annot.pkl pickle files instead of xml annotations.', show_default=True)
@click.option('-ta', '--transpose_annotations', is_flag=True, help='Transpose annotations.', show_default=True)
@click.option('-gtm', '--get_tissue_mask', is_flag=True, help='Build tissue mask instead of intensity thresholding.', show_default=True)
@click.option('-ot', '--otsu', is_flag=True, help='Utilize otsu method to decide intensity threshold.', show_default=True)
@click.option('-cm', '--compression', default=8., help='If find tissue mask, how much to downsample image.', show_default=True)
@click.option('-ch', '--return_convex_hull', is_flag=True, help='Return convex hull of tissue mask.', show_default=True)
@click.option('-kh', '--keep_holes', is_flag=True, help='Keep holes tissue mask.', show_default=True)
@click.option('-mhs', '--max_hole_size', default=0, help='If removing holes, what is maximum allowed size to remain.', show_default=True)
@click.option('-gbc', '--gray_before_close', is_flag=True, help='Filter grays before binary closing operation.', show_default=True)
@click.option('-kl', '--kernel', default=61, help='Binary closing kernel.', show_default=True)
@click.option('-mos', '--min_object_size', default=100000, help='Remove all connected components smaller than this size.', show_default=True)
@click.option('-bs', '--blur_size', default=0, help='How much to blur tissue mask.', show_default=True)
def preprocess_pipeline(img2npy,basename,input_dir,annotations,preprocess,patches,threshold,patch_size, intensity_threshold, generate_finetune_segmentation, target_segmentation_class, target_threshold, out_db, adjust_mask, n_neighbors, basic_preprocess, entire_image, no_zarr, pkl_annot, transpose_annotations,get_tissue_mask,otsu,compression,return_convex_hull, keep_holes, max_hole_size, gray_before_close, kernel, min_object_size, blur_size):
"""Preprocessing pipeline that accomplishes 3 things. 1: storage into ZARR format, 2: optional mask adjustment, 3: storage of patch-level information into SQL DB"""
for ext in ['.npy','.svs','.tiff','.tif', '.vms', '.vmu', '.ndpi', '.scn', '.mrxs', '.svslide', '.bif', '.jpeg', '.png', '.h5']:
svs_file = output_if_exists(join(input_dir,'{}{}'.format(basename,ext)))
if svs_file != None:
break
if img2npy and not svs_file.endswith('.npy'):
svs_file = img2npy_(input_dir,basename, svs_file)
xml_file = output_if_exists(join(input_dir,'{}{}'.format(basename,".xml" if not pkl_annot else ".annot.pkl")))
npy_mask = output_if_exists(join(input_dir,'{}_mask.npy'.format(basename)))
out_zarr = join(input_dir,'{}.zarr'.format(basename))
out_pkl = join(input_dir,'{}_mask.pkl'.format(basename))
adj_npy=''
start=time.time()
if preprocess:
run_preprocessing_pipeline(svs_file=svs_file,
xml_file=xml_file,
npy_mask=npy_mask,
annotations=annotations,
out_zarr=out_zarr,
out_pkl=out_pkl,
no_zarr=no_zarr,
transpose_annotations=transpose_annotations)
if npy_mask==None and xml_file==None:
print('Generating Zero Mask')
npy_mask=join(input_dir,'{}_mask.npz'.format(basename))
target_segmentation_class=1
generate_finetune_segmentation=True
create_zero_mask(npy_mask,out_zarr if not no_zarr else svs_file,out_pkl)
preprocess_point = time.time()
print('Data dump took {}'.format(preprocess_point-start))
if adjust_mask:
from pathflowai.utils import adjust_mask
adj_dir=join(input_dir,'adjusted_masks')
adj_npy=join(adj_dir,os.path.basename(npy_mask))
os.makedirs(adj_dir,exist_ok=True)
if not os.path.exists(adj_npy):
adjust_mask(npy_mask, out_zarr if not no_zarr else svs_file, adj_npy, n_neighbors)
adjust_point = time.time()
print('Adjust took {}'.format(adjust_point-preprocess_point))
if patches: # ADD EXPORT TO SQL, TABLE NAME IS PATCH SIZE
generate_patch_pipeline(basename,
input_dir=input_dir,
annotations=annotations,
threshold=threshold,
patch_size=patch_size,
out_db=out_db,
generate_finetune_segmentation=generate_finetune_segmentation,
target_class=target_segmentation_class,
intensity_threshold=intensity_threshold,
target_threshold=target_threshold,
adj_mask=adj_npy,
basic_preprocess=basic_preprocess,
entire_image=entire_image,
svs_file=svs_file,
transpose_annotations=transpose_annotations,
get_tissue_mask=get_tissue_mask,
otsu=otsu,
compression=compression,
return_convex_hull=return_convex_hull,
keep_holes=keep_holes,
max_hole_size=max_hole_size,
gray_before_close=gray_before_close,
kernel=kernel,
min_object_size=min_object_size,
blur_size=blur_size)
patch_point = time.time()
print('Patches took {}'.format(patch_point-adjust_point))
@preprocessing.command()
@click.option('-i', '--mask_dir', default='./inputs/', help='Input directory for masks.', type=click.Path(exists=False), show_default=True)
@click.option('-o', '--output_dir', default='./outputs/', help='Output directory for new masks.', type=click.Path(exists=False), show_default=True)
@click.option('-fr', '--from_annotations', default=[], multiple=True, help='Annotations to switch from.', show_default=True)
@click.option('-to', '--to_annotations', default=[], multiple=True, help='Annotations to switch to.', show_default=True)
def alter_masks(mask_dir, output_dir, from_annotations, to_annotations):
"""Map list of values to other values in mask."""
import glob
from pathflowai.utils import npy2da
import numpy as np
from dask.distributed import Client
assert len(from_annotations)==len(to_annotations)
c=Client()
from_annotations=list(map(int,from_annotations))
to_annotations=list(map(int,to_annotations))
os.makedirs(output_dir,exist_ok=True)
masks=glob.glob(join(mask_dir,'*_mask.npy'))
from_to=list(zip(from_annotations,to_annotations))
for mask in masks:
output_mask=join(output_dir,os.path.basename(mask))
arr=npy2da(mask)
for fr,to in from_to:
arr[arr==fr]=to
np.save(output_mask,arr.compute())
@preprocessing.command()
@click.option('-i', '--input_patch_db', default='patch_info_input.db', help='Input db.', type=click.Path(exists=False), show_default=True)
@click.option('-o', '--output_patch_db', default='patch_info_output.db', help='Output db.', type=click.Path(exists=False), show_default=True)
@click.option('-b', '--basename', default='A01', help='Basename.', type=click.Path(exists=False), show_default=True)
@click.option('-ps', '--patch_size', default=224, help='Patch size.', show_default=True)
def remove_basename_from_db(input_patch_db, output_patch_db, basename, patch_size):
"""Removes basename/ID from SQL DB."""
import sqlite3
import numpy as np, pandas as pd
os.makedirs(output_patch_db[:output_patch_db.rfind('/')],exist_ok=True)
conn = sqlite3.connect(input_patch_db)
df=pd.read_sql('select * from "{}";'.format(patch_size),con=conn)
conn.close()
df=df.loc[df['ID']!=basename]
conn = sqlite3.connect(output_patch_db)
df.set_index('index').to_sql(str(patch_size), con=conn, if_exists='replace')
conn.close()
@preprocessing.command()
@click.option('-i', '--input_patch_db', default='patch_info_input.db', help='Input db.', type=click.Path(exists=False), show_default=True)
@click.option('-o', '--output_patch_db', default='patch_info_output.db', help='Output db.', type=click.Path(exists=False), show_default=True)
@click.option('-fr', '--from_annotations', default=[], multiple=True, help='Annotations to switch from.', show_default=True)
@click.option('-to', '--to_annotations', default=[], multiple=True, help='Annotations to switch to.', show_default=True)
@click.option('-ps', '--patch_size', default=224, help='Patch size.', show_default=True)
@click.option('-rb', '--remove_background_annotation', default='', help='If selected, removes 100\% background patches based on this annotation.', type=click.Path(exists=False), show_default=True)
@click.option('-ma', '--max_background_area', default=0.05, help='Max background area before exclusion.', show_default=True)
def collapse_annotations(input_patch_db, output_patch_db, from_annotations, to_annotations, patch_size, remove_background_annotation, max_background_area):
"""Adds annotation classes areas to other annotation classes in SQL DB when getting rid of some annotation classes."""
import sqlite3
import numpy as np, pandas as pd
assert len(from_annotations)==len(to_annotations)
from_annotations=list(map(str,from_annotations))
to_annotations=list(map(str,to_annotations))
os.makedirs(output_patch_db[:output_patch_db.rfind('/')],exist_ok=True)
conn = sqlite3.connect(input_patch_db)
df=pd.read_sql('select * from "{}";'.format(patch_size),con=conn)
conn.close()
from_to=zip(from_annotations,to_annotations)
if remove_background_annotation:
df=df.loc[df[remove_background_annotation]<=(1.-max_background_area)]
for fr,to in from_to:
df.loc[:,to]+=df[fr]
df=df[[col for col in list(df) if col not in from_annotations]]
annotations = list(df.iloc[:,6:])
df=df.rename(columns={annot:str(i) for i, annot in enumerate(annotations)})
annotations = list(df.iloc[:,6:])
df.loc[:,'annotation']=np.vectorize(lambda i: annotations[df.iloc[i,6:].values.argmax()])(np.arange(df.shape[0]))
df.loc[:,'index']=np.arange(df.shape[0])
conn = sqlite3.connect(output_patch_db)
#print(df)
df.set_index('index').to_sql(str(patch_size), con=conn, if_exists='replace')
conn.close()
if __name__ == '__main__':
from dask.distributed import Client
dask.config.set({'temporary_dir':'tmp/',
'distributed.worker.local_dir':'tmp/',
'distributed.scheduler.allowed-failures':20})#'distributed.worker.num-workers':20}):
c=Client(processes=False)
preprocessing()
c.close()
| [
"pathflowai.utils.npy2da",
"click.version_option",
"click.option",
"numpy.arange",
"click.Path",
"os.path.join",
"pathflowai.utils.adjust_mask",
"dask.distributed.Client",
"os.path.exists",
"dask.config.set",
"click.group",
"os.path.basename",
"pathflowai.utils.create_zero_mask",
"sqlite3.... | [((281, 327), 'click.group', 'click.group', ([], {'context_settings': 'CONTEXT_SETTINGS'}), '(context_settings=CONTEXT_SETTINGS)\n', (292, 327), False, 'import click\n'), ((330, 365), 'click.version_option', 'click.version_option', ([], {'version': '"""0.1"""'}), "(version='0.1')\n", (350, 365), False, 'import click\n'), ((659, 770), 'click.option', 'click.option', (['"""-npy"""', '"""--img2npy"""'], {'is_flag': '(True)', 'help': '"""Image to numpy for faster read."""', 'show_default': '(True)'}), "('-npy', '--img2npy', is_flag=True, help=\n 'Image to numpy for faster read.', show_default=True)\n", (671, 770), False, 'import click\n'), ((1191, 1300), 'click.option', 'click.option', (['"""-pr"""', '"""--preprocess"""'], {'is_flag': '(True)', 'help': '"""Run preprocessing pipeline."""', 'show_default': '(True)'}), "('-pr', '--preprocess', is_flag=True, help=\n 'Run preprocessing pipeline.', show_default=True)\n", (1203, 1300), False, 'import click\n'), ((1297, 1394), 'click.option', 'click.option', (['"""-pa"""', '"""--patches"""'], {'is_flag': '(True)', 'help': '"""Add patches to SQL."""', 'show_default': '(True)'}), "('-pa', '--patches', is_flag=True, help='Add patches to SQL.',\n show_default=True)\n", (1309, 1394), False, 'import click\n'), ((1392, 1510), 'click.option', 'click.option', (['"""-t"""', '"""--threshold"""'], {'default': '(0.05)', 'help': '"""Threshold to remove non-purple slides."""', 'show_default': '(True)'}), "('-t', '--threshold', default=0.05, help=\n 'Threshold to remove non-purple slides.', show_default=True)\n", (1404, 1510), False, 'import click\n'), ((1508, 1599), 'click.option', 'click.option', (['"""-ps"""', '"""--patch_size"""'], {'default': '(224)', 'help': '"""Patch size."""', 'show_default': '(True)'}), "('-ps', '--patch_size', default=224, help='Patch size.',\n show_default=True)\n", (1520, 1599), False, 'import click\n'), ((1598, 1739), 'click.option', 'click.option', (['"""-it"""', '"""--intensity_threshold"""'], {'default': '(100.0)', 'help': '"""Intensity threshold to rate a pixel as non-white."""', 'show_default': '(True)'}), "('-it', '--intensity_threshold', default=100.0, help=\n 'Intensity threshold to rate a pixel as non-white.', show_default=True)\n", (1610, 1739), False, 'import click\n'), ((1736, 1915), 'click.option', 'click.option', (['"""-g"""', '"""--generate_finetune_segmentation"""'], {'is_flag': '(True)', 'help': '"""Generate patches for one segmentation mask class for targeted finetuning."""', 'show_default': '(True)'}), "('-g', '--generate_finetune_segmentation', is_flag=True, help=\n 'Generate patches for one segmentation mask class for targeted finetuning.'\n , show_default=True)\n", (1748, 1915), False, 'import click\n'), ((1907, 2069), 'click.option', 'click.option', (['"""-tc"""', '"""--target_segmentation_class"""'], {'default': '(0)', 'help': '"""Segmentation Class to finetune on, output patches to another db."""', 'show_default': '(True)'}), "('-tc', '--target_segmentation_class', default=0, help=\n 'Segmentation Class to finetune on, output patches to another db.',\n show_default=True)\n", (1919, 2069), False, 'import click\n'), ((2063, 2219), 'click.option', 'click.option', (['"""-tt"""', '"""--target_threshold"""'], {'default': '(0.0)', 'help': '"""Threshold to include target for segmentation if saving one class."""', 'show_default': '(True)'}), "('-tt', '--target_threshold', default=0.0, help=\n 'Threshold to include target for segmentation if saving one class.',\n show_default=True)\n", (2075, 2219), False, 'import click\n'), ((2354, 2499), 'click.option', 'click.option', (['"""-am"""', '"""--adjust_mask"""'], {'is_flag': '(True)', 'help': '"""Remove additional background regions from annotation mask."""', 'show_default': '(True)'}), "('-am', '--adjust_mask', is_flag=True, help=\n 'Remove additional background regions from annotation mask.',\n show_default=True)\n", (2366, 2499), False, 'import click\n'), ((2492, 2638), 'click.option', 'click.option', (['"""-nn"""', '"""--n_neighbors"""'], {'default': '(5)', 'help': '"""If adjusting mask, number of neighbors connectivity to remove."""', 'show_default': '(True)'}), "('-nn', '--n_neighbors', default=5, help=\n 'If adjusting mask, number of neighbors connectivity to remove.',\n show_default=True)\n", (2504, 2638), False, 'import click\n'), ((2632, 2842), 'click.option', 'click.option', (['"""-bp"""', '"""--basic_preprocess"""'], {'is_flag': '(True)', 'help': '"""Basic preprocessing pipeline, annotation areas are not saved. Used for benchmarking tool against comparable pipelines"""', 'show_default': '(True)'}), "('-bp', '--basic_preprocess', is_flag=True, help=\n 'Basic preprocessing pipeline, annotation areas are not saved. Used for benchmarking tool against comparable pipelines'\n , show_default=True)\n", (2644, 2842), False, 'import click\n'), ((2834, 2971), 'click.option', 'click.option', (['"""-ei"""', '"""--entire_image"""'], {'is_flag': '(True)', 'help': '"""Store entire image in central db rather than patches."""', 'show_default': '(True)'}), "('-ei', '--entire_image', is_flag=True, help=\n 'Store entire image in central db rather than patches.', show_default=True)\n", (2846, 2971), False, 'import click\n'), ((2968, 3075), 'click.option', 'click.option', (['"""-nz"""', '"""--no_zarr"""'], {'is_flag': '(True)', 'help': '"""Don\'t save zarr format file."""', 'show_default': '(True)'}), '(\'-nz\', \'--no_zarr\', is_flag=True, help=\n "Don\'t save zarr format file.", show_default=True)\n', (2980, 3075), False, 'import click\n'), ((3073, 3219), 'click.option', 'click.option', (['"""-pka"""', '"""--pkl_annot"""'], {'is_flag': '(True)', 'help': '"""Look for .annot.pkl pickle files instead of xml annotations."""', 'show_default': '(True)'}), "('-pka', '--pkl_annot', is_flag=True, help=\n 'Look for .annot.pkl pickle files instead of xml annotations.',\n show_default=True)\n", (3085, 3219), False, 'import click\n'), ((3212, 3327), 'click.option', 'click.option', (['"""-ta"""', '"""--transpose_annotations"""'], {'is_flag': '(True)', 'help': '"""Transpose annotations."""', 'show_default': '(True)'}), "('-ta', '--transpose_annotations', is_flag=True, help=\n 'Transpose annotations.', show_default=True)\n", (3224, 3327), False, 'import click\n'), ((3324, 3464), 'click.option', 'click.option', (['"""-gtm"""', '"""--get_tissue_mask"""'], {'is_flag': '(True)', 'help': '"""Build tissue mask instead of intensity thresholding."""', 'show_default': '(True)'}), "('-gtm', '--get_tissue_mask', is_flag=True, help=\n 'Build tissue mask instead of intensity thresholding.', show_default=True)\n", (3336, 3464), False, 'import click\n'), ((3461, 3587), 'click.option', 'click.option', (['"""-ot"""', '"""--otsu"""'], {'is_flag': '(True)', 'help': '"""Utilize otsu method to decide intensity threshold."""', 'show_default': '(True)'}), "('-ot', '--otsu', is_flag=True, help=\n 'Utilize otsu method to decide intensity threshold.', show_default=True)\n", (3473, 3587), False, 'import click\n'), ((3584, 3716), 'click.option', 'click.option', (['"""-cm"""', '"""--compression"""'], {'default': '(8.0)', 'help': '"""If find tissue mask, how much to downsample image."""', 'show_default': '(True)'}), "('-cm', '--compression', default=8.0, help=\n 'If find tissue mask, how much to downsample image.', show_default=True)\n", (3596, 3716), False, 'import click\n'), ((3713, 3837), 'click.option', 'click.option', (['"""-ch"""', '"""--return_convex_hull"""'], {'is_flag': '(True)', 'help': '"""Return convex hull of tissue mask."""', 'show_default': '(True)'}), "('-ch', '--return_convex_hull', is_flag=True, help=\n 'Return convex hull of tissue mask.', show_default=True)\n", (3725, 3837), False, 'import click\n'), ((3834, 3939), 'click.option', 'click.option', (['"""-kh"""', '"""--keep_holes"""'], {'is_flag': '(True)', 'help': '"""Keep holes tissue mask."""', 'show_default': '(True)'}), "('-kh', '--keep_holes', is_flag=True, help=\n 'Keep holes tissue mask.', show_default=True)\n", (3846, 3939), False, 'import click\n'), ((3936, 4081), 'click.option', 'click.option', (['"""-mhs"""', '"""--max_hole_size"""'], {'default': '(0)', 'help': '"""If removing holes, what is maximum allowed size to remain."""', 'show_default': '(True)'}), "('-mhs', '--max_hole_size', default=0, help=\n 'If removing holes, what is maximum allowed size to remain.',\n show_default=True)\n", (3948, 4081), False, 'import click\n'), ((4075, 4210), 'click.option', 'click.option', (['"""-gbc"""', '"""--gray_before_close"""'], {'is_flag': '(True)', 'help': '"""Filter grays before binary closing operation."""', 'show_default': '(True)'}), "('-gbc', '--gray_before_close', is_flag=True, help=\n 'Filter grays before binary closing operation.', show_default=True)\n", (4087, 4210), False, 'import click\n'), ((4207, 4304), 'click.option', 'click.option', (['"""-kl"""', '"""--kernel"""'], {'default': '(61)', 'help': '"""Binary closing kernel."""', 'show_default': '(True)'}), "('-kl', '--kernel', default=61, help='Binary closing kernel.',\n show_default=True)\n", (4219, 4304), False, 'import click\n'), ((4303, 4453), 'click.option', 'click.option', (['"""-mos"""', '"""--min_object_size"""'], {'default': '(100000)', 'help': '"""Remove all connected components smaller than this size."""', 'show_default': '(True)'}), "('-mos', '--min_object_size', default=100000, help=\n 'Remove all connected components smaller than this size.', show_default\n =True)\n", (4315, 4453), False, 'import click\n'), ((4446, 4553), 'click.option', 'click.option', (['"""-bs"""', '"""--blur_size"""'], {'default': '(0)', 'help': '"""How much to blur tissue mask."""', 'show_default': '(True)'}), "('-bs', '--blur_size', default=0, help=\n 'How much to blur tissue mask.', show_default=True)\n", (4458, 4553), False, 'import click\n'), ((8260, 8388), 'click.option', 'click.option', (['"""-fr"""', '"""--from_annotations"""'], {'default': '[]', 'multiple': '(True)', 'help': '"""Annotations to switch from."""', 'show_default': '(True)'}), "('-fr', '--from_annotations', default=[], multiple=True, help=\n 'Annotations to switch from.', show_default=True)\n", (8272, 8388), False, 'import click\n'), ((8385, 8509), 'click.option', 'click.option', (['"""-to"""', '"""--to_annotations"""'], {'default': '[]', 'multiple': '(True)', 'help': '"""Annotations to switch to."""', 'show_default': '(True)'}), "('-to', '--to_annotations', default=[], multiple=True, help=\n 'Annotations to switch to.', show_default=True)\n", (8397, 8509), False, 'import click\n'), ((9630, 9721), 'click.option', 'click.option', (['"""-ps"""', '"""--patch_size"""'], {'default': '(224)', 'help': '"""Patch size."""', 'show_default': '(True)'}), "('-ps', '--patch_size', default=224, help='Patch size.',\n show_default=True)\n", (9642, 9721), False, 'import click\n'), ((10560, 10688), 'click.option', 'click.option', (['"""-fr"""', '"""--from_annotations"""'], {'default': '[]', 'multiple': '(True)', 'help': '"""Annotations to switch from."""', 'show_default': '(True)'}), "('-fr', '--from_annotations', default=[], multiple=True, help=\n 'Annotations to switch from.', show_default=True)\n", (10572, 10688), False, 'import click\n'), ((10685, 10809), 'click.option', 'click.option', (['"""-to"""', '"""--to_annotations"""'], {'default': '[]', 'multiple': '(True)', 'help': '"""Annotations to switch to."""', 'show_default': '(True)'}), "('-to', '--to_annotations', default=[], multiple=True, help=\n 'Annotations to switch to.', show_default=True)\n", (10697, 10809), False, 'import click\n'), ((10806, 10897), 'click.option', 'click.option', (['"""-ps"""', '"""--patch_size"""'], {'default': '(224)', 'help': '"""Patch size."""', 'show_default': '(True)'}), "('-ps', '--patch_size', default=224, help='Patch size.',\n show_default=True)\n", (10818, 10897), False, 'import click\n'), ((11093, 11221), 'click.option', 'click.option', (['"""-ma"""', '"""--max_background_area"""'], {'default': '(0.05)', 'help': '"""Max background area before exclusion."""', 'show_default': '(True)'}), "('-ma', '--max_background_area', default=0.05, help=\n 'Max background area before exclusion.', show_default=True)\n", (11105, 11221), False, 'import click\n'), ((575, 599), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (589, 599), False, 'import os\n'), ((5824, 5835), 'time.time', 'time.time', ([], {}), '()\n', (5833, 5835), False, 'import time\n'), ((6426, 6437), 'time.time', 'time.time', ([], {}), '()\n', (6435, 6437), False, 'import time\n'), ((6825, 6836), 'time.time', 'time.time', ([], {}), '()\n', (6834, 6836), False, 'import time\n'), ((7874, 7885), 'time.time', 'time.time', ([], {}), '()\n', (7883, 7885), False, 'import time\n'), ((8790, 8798), 'dask.distributed.Client', 'Client', ([], {}), '()\n', (8796, 8798), False, 'from dask.distributed import Client\n'), ((8896, 8934), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (8907, 8934), False, 'import os\n'), ((9974, 10005), 'sqlite3.connect', 'sqlite3.connect', (['input_patch_db'], {}), '(input_patch_db)\n', (9989, 10005), False, 'import sqlite3\n'), ((10126, 10158), 'sqlite3.connect', 'sqlite3.connect', (['output_patch_db'], {}), '(output_patch_db)\n', (10141, 10158), False, 'import sqlite3\n'), ((11772, 11803), 'sqlite3.connect', 'sqlite3.connect', (['input_patch_db'], {}), '(input_patch_db)\n', (11787, 11803), False, 'import sqlite3\n'), ((12429, 12451), 'numpy.arange', 'np.arange', (['df.shape[0]'], {}), '(df.shape[0])\n', (12438, 12451), True, 'import numpy as np, pandas as pd\n'), ((12460, 12492), 'sqlite3.connect', 'sqlite3.connect', (['output_patch_db'], {}), '(output_patch_db)\n', (12475, 12492), False, 'import sqlite3\n'), ((12664, 12796), 'dask.config.set', 'dask.config.set', (["{'temporary_dir': 'tmp/', 'distributed.worker.local_dir': 'tmp/',\n 'distributed.scheduler.allowed-failures': 20}"], {}), "({'temporary_dir': 'tmp/', 'distributed.worker.local_dir':\n 'tmp/', 'distributed.scheduler.allowed-failures': 20})\n", (12679, 12796), False, 'import dask\n'), ((12842, 12865), 'dask.distributed.Client', 'Client', ([], {'processes': '(False)'}), '(processes=False)\n', (12848, 12865), False, 'from dask.distributed import Client\n'), ((5461, 5500), 'pathflowai.utils.img2npy_', 'img2npy_', (['input_dir', 'basename', 'svs_file'], {}), '(input_dir, basename, svs_file)\n', (5469, 5500), False, 'from pathflowai.utils import run_preprocessing_pipeline, generate_patch_pipeline, img2npy_, create_zero_mask\n'), ((5854, 6069), 'pathflowai.utils.run_preprocessing_pipeline', 'run_preprocessing_pipeline', ([], {'svs_file': 'svs_file', 'xml_file': 'xml_file', 'npy_mask': 'npy_mask', 'annotations': 'annotations', 'out_zarr': 'out_zarr', 'out_pkl': 'out_pkl', 'no_zarr': 'no_zarr', 'transpose_annotations': 'transpose_annotations'}), '(svs_file=svs_file, xml_file=xml_file, npy_mask=\n npy_mask, annotations=annotations, out_zarr=out_zarr, out_pkl=out_pkl,\n no_zarr=no_zarr, transpose_annotations=transpose_annotations)\n', (5880, 6069), False, 'from pathflowai.utils import run_preprocessing_pipeline, generate_patch_pipeline, img2npy_, create_zero_mask\n'), ((6331, 6405), 'pathflowai.utils.create_zero_mask', 'create_zero_mask', (['npy_mask', '(out_zarr if not no_zarr else svs_file)', 'out_pkl'], {}), '(npy_mask, out_zarr if not no_zarr else svs_file, out_pkl)\n', (6347, 6405), False, 'from pathflowai.utils import run_preprocessing_pipeline, generate_patch_pipeline, img2npy_, create_zero_mask\n'), ((6568, 6601), 'os.path.join', 'join', (['input_dir', '"""adjusted_masks"""'], {}), "(input_dir, 'adjusted_masks')\n", (6572, 6601), False, 'from os.path import join\n'), ((6654, 6689), 'os.makedirs', 'os.makedirs', (['adj_dir'], {'exist_ok': '(True)'}), '(adj_dir, exist_ok=True)\n', (6665, 6689), False, 'import os\n'), ((6963, 7733), 'pathflowai.utils.generate_patch_pipeline', 'generate_patch_pipeline', (['basename'], {'input_dir': 'input_dir', 'annotations': 'annotations', 'threshold': 'threshold', 'patch_size': 'patch_size', 'out_db': 'out_db', 'generate_finetune_segmentation': 'generate_finetune_segmentation', 'target_class': 'target_segmentation_class', 'intensity_threshold': 'intensity_threshold', 'target_threshold': 'target_threshold', 'adj_mask': 'adj_npy', 'basic_preprocess': 'basic_preprocess', 'entire_image': 'entire_image', 'svs_file': 'svs_file', 'transpose_annotations': 'transpose_annotations', 'get_tissue_mask': 'get_tissue_mask', 'otsu': 'otsu', 'compression': 'compression', 'return_convex_hull': 'return_convex_hull', 'keep_holes': 'keep_holes', 'max_hole_size': 'max_hole_size', 'gray_before_close': 'gray_before_close', 'kernel': 'kernel', 'min_object_size': 'min_object_size', 'blur_size': 'blur_size'}), '(basename, input_dir=input_dir, annotations=\n annotations, threshold=threshold, patch_size=patch_size, out_db=out_db,\n generate_finetune_segmentation=generate_finetune_segmentation,\n target_class=target_segmentation_class, intensity_threshold=\n intensity_threshold, target_threshold=target_threshold, adj_mask=\n adj_npy, basic_preprocess=basic_preprocess, entire_image=entire_image,\n svs_file=svs_file, transpose_annotations=transpose_annotations,\n get_tissue_mask=get_tissue_mask, otsu=otsu, compression=compression,\n return_convex_hull=return_convex_hull, keep_holes=keep_holes,\n max_hole_size=max_hole_size, gray_before_close=gray_before_close,\n kernel=kernel, min_object_size=min_object_size, blur_size=blur_size)\n', (6986, 7733), False, 'from pathflowai.utils import run_preprocessing_pipeline, generate_patch_pipeline, img2npy_, create_zero_mask\n'), ((849, 873), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (859, 873), False, 'import click\n'), ((992, 1016), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (1002, 1016), False, 'import click\n'), ((1145, 1169), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (1155, 1169), False, 'import click\n'), ((2308, 2332), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (2318, 2332), False, 'import click\n'), ((8951, 8979), 'os.path.join', 'join', (['mask_dir', '"""*_mask.npy"""'], {}), "(mask_dir, '*_mask.npy')\n", (8955, 8979), False, 'from os.path import join\n'), ((9112, 9124), 'pathflowai.utils.npy2da', 'npy2da', (['mask'], {}), '(mask)\n', (9118, 9124), False, 'from pathflowai.utils import npy2da\n'), ((8066, 8090), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (8076, 8090), False, 'import click\n'), ((8214, 8238), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (8224, 8238), False, 'import click\n'), ((9325, 9349), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (9335, 9349), False, 'import click\n'), ((9467, 9491), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (9477, 9491), False, 'import click\n'), ((9584, 9608), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (9594, 9608), False, 'import click\n'), ((12386, 12408), 'numpy.arange', 'np.arange', (['df.shape[0]'], {}), '(df.shape[0])\n', (12395, 12408), True, 'import numpy as np, pandas as pd\n'), ((10372, 10396), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (10382, 10396), False, 'import click\n'), ((10514, 10538), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (10524, 10538), False, 'import click\n'), ((11047, 11071), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (11057, 11071), False, 'import click\n'), ((6624, 6650), 'os.path.basename', 'os.path.basename', (['npy_mask'], {}), '(npy_mask)\n', (6640, 6650), False, 'import os\n'), ((6698, 6721), 'os.path.exists', 'os.path.exists', (['adj_npy'], {}), '(adj_npy)\n', (6712, 6721), False, 'import os\n'), ((6726, 6812), 'pathflowai.utils.adjust_mask', 'adjust_mask', (['npy_mask', '(out_zarr if not no_zarr else svs_file)', 'adj_npy', 'n_neighbors'], {}), '(npy_mask, out_zarr if not no_zarr else svs_file, adj_npy,\n n_neighbors)\n', (6737, 6812), False, 'from pathflowai.utils import adjust_mask\n'), ((9082, 9104), 'os.path.basename', 'os.path.basename', (['mask'], {}), '(mask)\n', (9098, 9104), False, 'import os\n')] |
import sys
sys.path.insert(0, '/home/liyongjing/Egolee_2021/programs/RepVGG-main')
import torch
import cv2
import os
import numpy as np
from repvgg import get_RepVGG_func_by_name
import torchvision.transforms as transforms
from PIL import Image, ImageOps
import random
class RepVGGTorchInfer(object):
def __init__(self, arch, weights):
repvgg_build_func = get_RepVGG_func_by_name(arch)
model = repvgg_build_func(deploy=True)
if torch.cuda.is_available():
model = model.cuda()
self.model_w = weights
checkpoint = torch.load(weights)
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
ckpt = {k.replace('module.', ''):v for k,v in checkpoint.items()} # strip the names
model.load_state_dict(ckpt)
self.model = model.eval()
self.short_size = 256
self.dst_w = 224
self.dst_h = 224
self.input_size = [self.dst_h, self.dst_w]
self.mean = np.array([123.675, 116.28, 103.53])
self.std = np.array([58.395, 57.12, 57.375])
self.std_inv = 1 / self.std
self.img_t = None
self.result = dict()
def crop_img_short_size(self, cv_img):
# resize the short size
h, w, _ = cv_img.shape
if h >= w:
h = int(h * self.short_size / w)
w = int(self.short_size)
else:
w = int(w * self.short_size / h)
h = int(self.short_size)
cv_img = cv2.resize(cv_img, (w, h), cv2.INTER_LINEAR)
# center crop
y1 = max(0, int(round((h - self.input_size[1]) / 2.)))
x1 = max(0, int(round((w - self.input_size[0]) / 2.)))
y2 = min(h-1, y1 + self.input_size[1])
x2 = min(w-1, x1 + self.input_size[0])
cv_img = cv_img[y1:y2, x1:x2, :]
return cv_img
def crop_img_long_size(self, cv_img):
long_size = max(cv_img.shape[:2])
pad_h = (long_size - cv_img.shape[0]) // 2
pad_w = (long_size - cv_img.shape[1]) // 2
img_input = np.ones((long_size, long_size, 3), dtype=np.uint8) * 0
img_input[pad_h:cv_img.shape[0] + pad_h, pad_w:cv_img.shape[1] + pad_w, :] = cv_img
img_input = cv2.resize(img_input, (self.input_size[1], self.input_size[0]), cv2.INTER_LINEAR)
return img_input
def crop_img_long_size2(self, cv_img):
ignore_resize = False
long_side = max(self.input_size)
w, h, _ = cv_img.shape
if (w >= h and w == long_side) or (h >= w and h == long_side):
ignore_resize = True
else:
if w > h:
width = long_side
height = int(long_side * h / w)
else:
height = long_side
width = int(long_side * w / h)
if not ignore_resize:
cv_img = cv2.resize(cv_img, (height, width), cv2.INTER_LINEAR)
long_size = max(cv_img.shape[:2])
pad_h = (long_size - cv_img.shape[1]) // 2
pad_w = (long_size - cv_img.shape[0]) // 2
pad_t = pad_h
pad_d = pad_h
pad_l = pad_w
pad_r = pad_w
if (long_size - cv_img.shape[0]) % 2 != 0:
pad_l = pad_l + 1
if (long_size - cv_img.shape[1]) % 2 != 0:
pad_t = pad_t + 1
img_crop_padding = cv2.copyMakeBorder(cv_img, pad_l, pad_r, pad_t, pad_d, cv2.BORDER_CONSTANT, value=[0, 0, 0])
return img_crop_padding
def infer_cv_img(self, cv_img):
# cv_img = self.crop_img_short_size(cv_img)
cv_img = self.crop_img_long_size2(cv_img)
assert list(cv_img.shape[:2]) == self.input_size
# cv2.namedWindow("cv_img", 0)
# cv2.imshow("cv_img", cv_img)
# normalize
cv_img = cv_img.copy().astype(np.float32)
self.mean = np.float64(self.mean.reshape(1, -1))
self.std_inv = 1 / np.float64(self.std.reshape(1, -1))
if True:
cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB, cv_img) # inplace
cv2.subtract(cv_img, self.mean, cv_img) # inplace
cv2.multiply(cv_img, self.std_inv, cv_img) # inplace
self.img_t = cv_img.transpose(2, 0, 1) # to C, H, W
self.img_t = np.ascontiguousarray(self.img_t)
self.img_t = np.expand_dims(self.img_t, axis=0)
self.img_t = torch.from_numpy(self.img_t)
if torch.cuda.is_available():
self.img_t = self.img_t.cuda()
output = self.model(self.img_t)
output = output.cpu().detach().numpy()
# softmax
tmp = np.max(output, axis=1)
output -= tmp.reshape((output.shape[0],1))
output = np.exp(output)
tmp = np.sum(output, axis=1)
output /= tmp.reshape((output.shape[0], 1))
pred_score = np.max(output, axis=1)[0]
pred_label = np.argmax(output, axis=1)[0]
self.result.update({'pred_label': pred_label, 'pred_score': float(pred_score)})
return self.result
def infer_pil_img(self, pil_img):
from local_files.local_transformer import ResizeCenterCropPaddingShort
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
center_crop_pad_short_size = ResizeCenterCropPaddingShort((224, -1), interpolation=cv2.INTER_LINEAR,
backend='cv2')
# center_crop_pad_short_size = ResizeCenterCropPaddingShort((224, -1), interpolation=Image.BILINEAR,
# backend='torch')
transforms_compose = transforms.Compose([center_crop_pad_short_size, transforms.ToTensor(), normalize,])
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
center_crop_pad_short_size = ResizeCenterCropPaddingShort((224, -1), interpolation=Image.BILINEAR,
backend='torch')
pil_img = transforms_compose(pil_img)
pil_img = pil_img.unsqueeze(dim=0)
if torch.cuda.is_available():
pil_img = pil_img.cuda()
output = self.model(pil_img)
output = torch.nn.functional.softmax(output, dim=1)
output = output.cpu().detach().numpy()
pred_score = np.max(output, axis=1)[0]
pred_label = np.argmax(output, axis=1)[0]
self.result.update({'pred_label': pred_label, 'pred_score': float(pred_score)})
return self.result
def onnx_exprot(self):
self.model = self.model.cpu()
img_dry = torch.zeros((1, 3, self.dst_h, self.dst_w))
with torch.no_grad():
y = self.model(img_dry) # forward
try:
import onnx
print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
f = self.model_w.replace('.pth', '.onnx') # filename
# print(model.t)
torch.onnx.export(self.model, img_dry, f, verbose=False, opset_version=11, \
input_names=['images'], output_names=['output'])
# Checks
onnx_model = onnx.load(f) # load onnx model
onnx.checker.check_model(onnx_model) # check onnx model
print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
# simpily onnx
from onnxsim import simplify
model_simp, check = simplify(onnx_model)
assert check, "Simplified ONNX model could not be validated"
f2 = f.replace('.onnx', '_sim.onnx') # filename
onnx.save(model_simp, f2)
print('====ONNX SIM export success, saved as %s' % f2)
from onnx import shape_inference
f3 = f2.replace('.onnx', '_shape.onnx') # filename
onnx.save(onnx.shape_inference.infer_shapes(onnx.load(f2)), f3)
print('====ONNX shape inference export success, saved as %s' % f3)
print('ONNX export success, saved as %s' % f)
except Exception as e:
print('ONNX export failure: %s' % e)
def print_model_names(self):
for name, paras, in self.model.named_modules():
print(name)
def diff_size_input_test(self, input_img):
output = self.model(input_img)
return output
def infer_images(images_dir, arch, weights):
infer_model = RepVGGTorchInfer(arch, weights)
img_names = [f for f in os.listdir(images_dir) if os.path.splitext(f)[-1] in ['.jpg']]
for img_name in img_names:
img_path = os.path.join(images_dir, img_name)
# img = cv2.imread(img_path)
# pred_result = infer_model.infer_cv_img(img)
pil_image = Image.open(img_path)
pred_result = infer_model.infer_pil_img(pil_image)
img = cv2.cvtColor(np.asarray(pil_image), cv2.COLOR_RGB2BGR)
# print(pred_result)
cv2.namedWindow('img')
cv2.imshow('img', img)
print(pred_result)
if pred_result['pred_label'] != 0:
cv2.waitKey(0)
# exit(1)
def export_onnx(arch, weights):
infer_model = RepVGGTorchInfer(arch, weights)
infer_model.onnx_exprot()
def print_model_names(arch, weights):
infer_model = RepVGGTorchInfer(arch, weights)
infer_model.print_model_names()
def test_diff_size_input(arch, weights):
infer_model = RepVGGTorchInfer(arch, weights)
for i in range(20):
w_random = random.randint(1, 20) * 32
h_random = random.randint(1, 20) * 32
input_t = torch.ones((1, 3, h_random, w_random))
input_t = input_t.cuda()
output = infer_model.diff_size_input_test(input_t)
print('*'*20)
print('input_t shape:{}'.format(input_t.shape))
print('output shape:{}'.format(output.shape))
if __name__ == '__main__':
input_dir = '/home/liyongjing/Egolee_2021/data/TrainData/train_fall_down/rep_vgg_format/val/fall_down'
arch = 'RepVGG-B2'
weights = '/home/liyongjing/Egolee_2021/programs/RepVGG-main/trained_model/RepVggB2-padding-short-size/RepVGG-B2-deploy.pth'
infer_images(input_dir, arch, weights)
# export_onnx(arch, weights)
# print_model_names(arch, weights)
# test_diff_size_input(arch, weights)
| [
"numpy.sum",
"numpy.argmax",
"numpy.ones",
"numpy.exp",
"local_files.local_transformer.ResizeCenterCropPaddingShort",
"repvgg.get_RepVGG_func_by_name",
"torchvision.transforms.Normalize",
"cv2.imshow",
"os.path.join",
"torch.no_grad",
"torch.ones",
"cv2.subtract",
"random.randint",
"onnx.s... | [((11, 82), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/liyongjing/Egolee_2021/programs/RepVGG-main"""'], {}), "(0, '/home/liyongjing/Egolee_2021/programs/RepVGG-main')\n", (26, 82), False, 'import sys\n'), ((371, 400), 'repvgg.get_RepVGG_func_by_name', 'get_RepVGG_func_by_name', (['arch'], {}), '(arch)\n', (394, 400), False, 'from repvgg import get_RepVGG_func_by_name\n'), ((460, 485), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (483, 485), False, 'import torch\n'), ((573, 592), 'torch.load', 'torch.load', (['weights'], {}), '(weights)\n', (583, 592), False, 'import torch\n'), ((1000, 1035), 'numpy.array', 'np.array', (['[123.675, 116.28, 103.53]'], {}), '([123.675, 116.28, 103.53])\n', (1008, 1035), True, 'import numpy as np\n'), ((1055, 1088), 'numpy.array', 'np.array', (['[58.395, 57.12, 57.375]'], {}), '([58.395, 57.12, 57.375])\n', (1063, 1088), True, 'import numpy as np\n'), ((1503, 1547), 'cv2.resize', 'cv2.resize', (['cv_img', '(w, h)', 'cv2.INTER_LINEAR'], {}), '(cv_img, (w, h), cv2.INTER_LINEAR)\n', (1513, 1547), False, 'import cv2\n'), ((2230, 2316), 'cv2.resize', 'cv2.resize', (['img_input', '(self.input_size[1], self.input_size[0])', 'cv2.INTER_LINEAR'], {}), '(img_input, (self.input_size[1], self.input_size[0]), cv2.\n INTER_LINEAR)\n', (2240, 2316), False, 'import cv2\n'), ((3338, 3434), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['cv_img', 'pad_l', 'pad_r', 'pad_t', 'pad_d', 'cv2.BORDER_CONSTANT'], {'value': '[0, 0, 0]'}), '(cv_img, pad_l, pad_r, pad_t, pad_d, cv2.BORDER_CONSTANT,\n value=[0, 0, 0])\n', (3356, 3434), False, 'import cv2\n'), ((4026, 4065), 'cv2.subtract', 'cv2.subtract', (['cv_img', 'self.mean', 'cv_img'], {}), '(cv_img, self.mean, cv_img)\n', (4038, 4065), False, 'import cv2\n'), ((4085, 4127), 'cv2.multiply', 'cv2.multiply', (['cv_img', 'self.std_inv', 'cv_img'], {}), '(cv_img, self.std_inv, cv_img)\n', (4097, 4127), False, 'import cv2\n'), ((4222, 4254), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['self.img_t'], {}), '(self.img_t)\n', (4242, 4254), True, 'import numpy as np\n'), ((4277, 4311), 'numpy.expand_dims', 'np.expand_dims', (['self.img_t'], {'axis': '(0)'}), '(self.img_t, axis=0)\n', (4291, 4311), True, 'import numpy as np\n'), ((4333, 4361), 'torch.from_numpy', 'torch.from_numpy', (['self.img_t'], {}), '(self.img_t)\n', (4349, 4361), False, 'import torch\n'), ((4373, 4398), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4396, 4398), False, 'import torch\n'), ((4564, 4586), 'numpy.max', 'np.max', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (4570, 4586), True, 'import numpy as np\n'), ((4655, 4669), 'numpy.exp', 'np.exp', (['output'], {}), '(output)\n', (4661, 4669), True, 'import numpy as np\n'), ((4684, 4706), 'numpy.sum', 'np.sum', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (4690, 4706), True, 'import numpy as np\n'), ((5110, 5185), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (5130, 5185), True, 'import torchvision.transforms as transforms\n'), ((5264, 5354), 'local_files.local_transformer.ResizeCenterCropPaddingShort', 'ResizeCenterCropPaddingShort', (['(224, -1)'], {'interpolation': 'cv2.INTER_LINEAR', 'backend': '"""cv2"""'}), "((224, -1), interpolation=cv2.INTER_LINEAR,\n backend='cv2')\n", (5292, 5354), False, 'from local_files.local_transformer import ResizeCenterCropPaddingShort\n'), ((5738, 5768), 'os.path.join', 'os.path.join', (['args.data', '"""val"""'], {}), "(args.data, 'val')\n", (5750, 5768), False, 'import os\n'), ((5789, 5864), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (5809, 5864), True, 'import torchvision.transforms as transforms\n'), ((5943, 6033), 'local_files.local_transformer.ResizeCenterCropPaddingShort', 'ResizeCenterCropPaddingShort', (['(224, -1)'], {'interpolation': 'Image.BILINEAR', 'backend': '"""torch"""'}), "((224, -1), interpolation=Image.BILINEAR,\n backend='torch')\n", (5971, 6033), False, 'from local_files.local_transformer import ResizeCenterCropPaddingShort\n'), ((6201, 6226), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6224, 6226), False, 'import torch\n'), ((6320, 6362), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (6347, 6362), False, 'import torch\n'), ((6707, 6750), 'torch.zeros', 'torch.zeros', (['(1, 3, self.dst_h, self.dst_w)'], {}), '((1, 3, self.dst_h, self.dst_w))\n', (6718, 6750), False, 'import torch\n'), ((8679, 8713), 'os.path.join', 'os.path.join', (['images_dir', 'img_name'], {}), '(images_dir, img_name)\n', (8691, 8713), False, 'import os\n'), ((8826, 8846), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (8836, 8846), False, 'from PIL import Image, ImageOps\n'), ((9013, 9035), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img"""'], {}), "('img')\n", (9028, 9035), False, 'import cv2\n'), ((9044, 9066), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (9054, 9066), False, 'import cv2\n'), ((9649, 9687), 'torch.ones', 'torch.ones', (['(1, 3, h_random, w_random)'], {}), '((1, 3, h_random, w_random))\n', (9659, 9687), False, 'import torch\n'), ((2063, 2113), 'numpy.ones', 'np.ones', (['(long_size, long_size, 3)'], {'dtype': 'np.uint8'}), '((long_size, long_size, 3), dtype=np.uint8)\n', (2070, 2113), True, 'import numpy as np\n'), ((2858, 2911), 'cv2.resize', 'cv2.resize', (['cv_img', '(height, width)', 'cv2.INTER_LINEAR'], {}), '(cv_img, (height, width), cv2.INTER_LINEAR)\n', (2868, 2911), False, 'import cv2\n'), ((3959, 4006), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_img', 'cv2.COLOR_BGR2RGB', 'cv_img'], {}), '(cv_img, cv2.COLOR_BGR2RGB, cv_img)\n', (3971, 4006), False, 'import cv2\n'), ((4781, 4803), 'numpy.max', 'np.max', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (4787, 4803), True, 'import numpy as np\n'), ((4828, 4853), 'numpy.argmax', 'np.argmax', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (4837, 4853), True, 'import numpy as np\n'), ((6432, 6454), 'numpy.max', 'np.max', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (6438, 6454), True, 'import numpy as np\n'), ((6479, 6504), 'numpy.argmax', 'np.argmax', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (6488, 6504), True, 'import numpy as np\n'), ((6764, 6779), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6777, 6779), False, 'import torch\n'), ((7052, 7179), 'torch.onnx.export', 'torch.onnx.export', (['self.model', 'img_dry', 'f'], {'verbose': '(False)', 'opset_version': '(11)', 'input_names': "['images']", 'output_names': "['output']"}), "(self.model, img_dry, f, verbose=False, opset_version=11,\n input_names=['images'], output_names=['output'])\n", (7069, 7179), False, 'import torch\n'), ((7254, 7266), 'onnx.load', 'onnx.load', (['f'], {}), '(f)\n', (7263, 7266), False, 'import onnx\n'), ((7298, 7334), 'onnx.checker.check_model', 'onnx.checker.check_model', (['onnx_model'], {}), '(onnx_model)\n', (7322, 7334), False, 'import onnx\n'), ((7553, 7573), 'onnxsim.simplify', 'simplify', (['onnx_model'], {}), '(onnx_model)\n', (7561, 7573), False, 'from onnxsim import simplify\n'), ((7721, 7746), 'onnx.save', 'onnx.save', (['model_simp', 'f2'], {}), '(model_simp, f2)\n', (7730, 7746), False, 'import onnx\n'), ((8566, 8588), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (8576, 8588), False, 'import os\n'), ((8933, 8954), 'numpy.asarray', 'np.asarray', (['pil_image'], {}), '(pil_image)\n', (8943, 8954), True, 'import numpy as np\n'), ((9149, 9163), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (9160, 9163), False, 'import cv2\n'), ((9558, 9579), 'random.randint', 'random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (9572, 9579), False, 'import random\n'), ((9604, 9625), 'random.randint', 'random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (9618, 9625), False, 'import random\n'), ((5684, 5705), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5703, 5705), True, 'import torchvision.transforms as transforms\n'), ((7373, 7418), 'onnx.helper.printable_graph', 'onnx.helper.printable_graph', (['onnx_model.graph'], {}), '(onnx_model.graph)\n', (7400, 7418), False, 'import onnx\n'), ((7980, 7993), 'onnx.load', 'onnx.load', (['f2'], {}), '(f2)\n', (7989, 7993), False, 'import onnx\n'), ((8592, 8611), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (8608, 8611), False, 'import os\n')] |
# Author <NAME>
# July 2019
# Disclaimer: I am not responsible for the consequences of using this script or getting you banned from Riot Games.
# This is only for educational purpose. Be warned the risk for its use.
import sys
from enum import Enum
import time
import cv2
import numpy
from PIL import ImageGrab
from PIL import Image
import pyautogui
import random
import pytweening
# Setting a bezier curve on mouse movement to simulate an human
# Code from: https://github.com/asweigart/pyautogui/issues/80
def getPointOnCurve(x1, y1, x2, y2, n, tween=None, offset=0):
if getPointOnCurve.tween and getPointOnCurve.offset:
tween = getPointOnCurve.tween
offset = getPointOnCurve.offset
x = ((x2 - x1) * n) + x1
y = ((y2 - y1) * n) + y1
if tween and offset:
offset = (n - tween(n)) * offset
if abs(x2 - x1) > abs(y2 - y1):
y += offset
else:
x += offset
return (x, y)
getPointOnCurve.tween = None
getPointOnCurve.offset = 0
def set_curve(func, tween=None, offset=0):
func.tween = tween
func.offset = offset
pyautogui.getPointOnLine = getPointOnCurve
set_curve(getPointOnCurve, pytweening.easeInOutCubic, 300)
# Images
class _img():
matchsearch = cv2.imread("images/matchsearch.jpg")
inqueue = cv2.imread("images/inqueue.jpg")
loadingscreen = cv2.imread("images/loadingscreen.jpg")
matchfound = cv2.imread("images/matchfound.jpg")
gamestarted = cv2.imread("images/gamestarted.jpg")
defeat = cv2.imread("images/defeat.jpg")
playagain = cv2.imread("images/playagain.jpg")
#presurrender = cv2.imread("images/presurrender.jpg")
surrender = cv2.imread("images/surrender.jpg")
class Stage(Enum):
LauncherMenu = 1
InQueue = 2
MatchAccepted = 3
LoadingScreen = 4
GameStarted = 5
GameFinished = 6
# Init
threshold = 0.8
matchesCount = 0
secondsToWait = 600
_stage = Stage(int(sys.argv[1])) if len(sys.argv) == 2 else Stage.LauncherMenu
startedLoading = None
def log(message):
print(time.strftime("%d/%m/%Y %H:%M:%S >_"), message)
def GrabScreenshot():
return ImageGrab.grab().convert("RGB")
def LookFor(item, screenshot):
screen = cv2.cvtColor(numpy.array(screenshot), cv2.COLOR_RGB2BGR)
result = cv2.matchTemplate(screen, item, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
if max_val >= threshold:
return max_loc
return None
def Click(loc, item, randomReturn = True):
x, y = loc
item_height,item_width,tq = item.shape
pyautogui.moveTo(x+(item_width/2), y+(item_height/2),.5)
pyautogui.mouseDown()
time.sleep(.3)
pyautogui.mouseUp()
if randomReturn:
#Randomize idle mouse returned position to not look like a robot
pyautogui.moveTo(500+random.randint(0,700), 400, .5)
def progress(count, totalSeconds):
width = 70
completed = int(count * width / totalSeconds)
display = '#' * completed + '_' * (width - completed)
timeLeft = time.strftime("%M:%S", time.gmtime(totalSeconds-count))
sys.stdout.write('[%s] %s minutes left\r' % (display, timeLeft))
sys.stdout.flush()
def surrender():
log("Trying to surrender")
pyautogui.mouseDown()
time.sleep(.3)
pyautogui.mouseUp()
pyautogui.press('enter')
time.sleep(.5)
pyautogui.typewrite('/')
time.sleep(.5)
pyautogui.typewrite('f')
time.sleep(.5)
pyautogui.typewrite('f')
time.sleep(.5)
pyautogui.press('enter')
def WaitForMatchEnd():
log("Waiting for match to end")
progress(0,secondsToWait)
for x in range(secondsToWait):
time.sleep(1)
progress(x+1, secondsToWait)
sys.stdout.write('\n')
log("Done, but waiting a random 0-5 extra seconds to not be like a robot []-D")
time.sleep(random.randint(0,5))
while True:
time.sleep(1)
screenshot = GrabScreenshot()
if _stage == Stage.LauncherMenu:
log("Looking for play button")
_loc = LookFor(_img.playagain, screenshot)
if _loc != None:
Click(_loc, _img.playagain)
else:
_loc = LookFor(_img.matchsearch, screenshot)
if _loc != None:
Click(_loc, _img.matchsearch)
_stage = Stage.InQueue
log("Looking for a match")
next
if _stage == Stage.InQueue:
_loc = LookFor(_img.matchfound, screenshot)
if _loc != None:
Click(_loc, _img.matchfound)
log("Match accepted")
else:
_loc = LookFor(_img.loadingscreen, screenshot)
if _loc != None:
_stage = Stage.LoadingScreen
log("On Loading Screen")
startedLoading = time.time()
time.sleep(60)
next
if _stage == Stage.LoadingScreen:
_loc = LookFor(_img.gamestarted, screenshot)
if _loc != None:
Click(_loc, _img.gamestarted, False)
_stage = Stage.GameStarted
log("Game Started")
else:
if startedLoading != None and (time.time() - startedLoading) >= 300:
log("5 Minutes has passed and still the game has not stared, something went wrong.")
# WIP - check and kill League process and then login again.
if _stage == Stage.GameStarted:
WaitForMatchEnd()
_stage = Stage.GameFinished
next
if _stage == Stage.GameFinished:
surrender()
stillInGame = True
while stillInGame:
time.sleep(1)
screenshot = GrabScreenshot()
_loc = LookFor(_img.surrender, screenshot)
if _loc != None:
Click(_loc, _img.surrender)
log("Surrender button clicked, waiting 5 seconds to verify")
time.sleep(5)
else:
_loc = LookFor(_img.playagain, screenshot)
if _loc != None:
stillInGame = False
else:
_loc = LookFor(_img.matchsearch, screenshot)
if _loc != None:
stillInGame = False
else:
#So, there's no button? maybe the surrender wasn't triggered
surrender()
matchesCount += 1
log("Match done. Tokens farmed: " + str(matchesCount * 4) + ", for a total " + str(matchesCount) + " matches." )
_stage = Stage.LauncherMenu | [
"cv2.matchTemplate",
"sys.stdout.write",
"random.randint",
"pyautogui.typewrite",
"PIL.ImageGrab.grab",
"pyautogui.press",
"pyautogui.mouseUp",
"time.gmtime",
"time.strftime",
"time.sleep",
"time.time",
"cv2.imread",
"sys.stdout.flush",
"numpy.array",
"cv2.minMaxLoc",
"pyautogui.mouseD... | [((1316, 1352), 'cv2.imread', 'cv2.imread', (['"""images/matchsearch.jpg"""'], {}), "('images/matchsearch.jpg')\n", (1326, 1352), False, 'import cv2\n'), ((1368, 1400), 'cv2.imread', 'cv2.imread', (['"""images/inqueue.jpg"""'], {}), "('images/inqueue.jpg')\n", (1378, 1400), False, 'import cv2\n'), ((1422, 1460), 'cv2.imread', 'cv2.imread', (['"""images/loadingscreen.jpg"""'], {}), "('images/loadingscreen.jpg')\n", (1432, 1460), False, 'import cv2\n'), ((1479, 1514), 'cv2.imread', 'cv2.imread', (['"""images/matchfound.jpg"""'], {}), "('images/matchfound.jpg')\n", (1489, 1514), False, 'import cv2\n'), ((1534, 1570), 'cv2.imread', 'cv2.imread', (['"""images/gamestarted.jpg"""'], {}), "('images/gamestarted.jpg')\n", (1544, 1570), False, 'import cv2\n'), ((1585, 1616), 'cv2.imread', 'cv2.imread', (['"""images/defeat.jpg"""'], {}), "('images/defeat.jpg')\n", (1595, 1616), False, 'import cv2\n'), ((1634, 1668), 'cv2.imread', 'cv2.imread', (['"""images/playagain.jpg"""'], {}), "('images/playagain.jpg')\n", (1644, 1668), False, 'import cv2\n'), ((1745, 1779), 'cv2.imread', 'cv2.imread', (['"""images/surrender.jpg"""'], {}), "('images/surrender.jpg')\n", (1755, 1779), False, 'import cv2\n'), ((2375, 2428), 'cv2.matchTemplate', 'cv2.matchTemplate', (['screen', 'item', 'cv2.TM_CCOEFF_NORMED'], {}), '(screen, item, cv2.TM_CCOEFF_NORMED)\n', (2392, 2428), False, 'import cv2\n'), ((2471, 2492), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['result'], {}), '(result)\n', (2484, 2492), False, 'import cv2\n'), ((2675, 2737), 'pyautogui.moveTo', 'pyautogui.moveTo', (['(x + item_width / 2)', '(y + item_height / 2)', '(0.5)'], {}), '(x + item_width / 2, y + item_height / 2, 0.5)\n', (2691, 2737), False, 'import pyautogui\n'), ((2737, 2758), 'pyautogui.mouseDown', 'pyautogui.mouseDown', ([], {}), '()\n', (2756, 2758), False, 'import pyautogui\n'), ((2764, 2779), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2774, 2779), False, 'import time\n'), ((2784, 2803), 'pyautogui.mouseUp', 'pyautogui.mouseUp', ([], {}), '()\n', (2801, 2803), False, 'import pyautogui\n'), ((3203, 3267), 'sys.stdout.write', 'sys.stdout.write', (["('[%s] %s minutes left\\r' % (display, timeLeft))"], {}), "('[%s] %s minutes left\\r' % (display, timeLeft))\n", (3219, 3267), False, 'import sys\n'), ((3273, 3291), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3289, 3291), False, 'import sys\n'), ((3349, 3370), 'pyautogui.mouseDown', 'pyautogui.mouseDown', ([], {}), '()\n', (3368, 3370), False, 'import pyautogui\n'), ((3376, 3391), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (3386, 3391), False, 'import time\n'), ((3396, 3415), 'pyautogui.mouseUp', 'pyautogui.mouseUp', ([], {}), '()\n', (3413, 3415), False, 'import pyautogui\n'), ((3421, 3445), 'pyautogui.press', 'pyautogui.press', (['"""enter"""'], {}), "('enter')\n", (3436, 3445), False, 'import pyautogui\n'), ((3451, 3466), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3461, 3466), False, 'import time\n'), ((3471, 3495), 'pyautogui.typewrite', 'pyautogui.typewrite', (['"""/"""'], {}), "('/')\n", (3490, 3495), False, 'import pyautogui\n'), ((3501, 3516), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3511, 3516), False, 'import time\n'), ((3521, 3545), 'pyautogui.typewrite', 'pyautogui.typewrite', (['"""f"""'], {}), "('f')\n", (3540, 3545), False, 'import pyautogui\n'), ((3551, 3566), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3561, 3566), False, 'import time\n'), ((3571, 3595), 'pyautogui.typewrite', 'pyautogui.typewrite', (['"""f"""'], {}), "('f')\n", (3590, 3595), False, 'import pyautogui\n'), ((3601, 3616), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3611, 3616), False, 'import time\n'), ((3621, 3645), 'pyautogui.press', 'pyautogui.press', (['"""enter"""'], {}), "('enter')\n", (3636, 3645), False, 'import pyautogui\n'), ((3842, 3864), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (3858, 3864), False, 'import sys\n'), ((4007, 4020), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4017, 4020), False, 'import time\n'), ((2131, 2168), 'time.strftime', 'time.strftime', (['"""%d/%m/%Y %H:%M:%S >_"""'], {}), "('%d/%m/%Y %H:%M:%S >_')\n", (2144, 2168), False, 'import time\n'), ((2317, 2340), 'numpy.array', 'numpy.array', (['screenshot'], {}), '(screenshot)\n', (2328, 2340), False, 'import numpy\n'), ((3165, 3198), 'time.gmtime', 'time.gmtime', (['(totalSeconds - count)'], {}), '(totalSeconds - count)\n', (3176, 3198), False, 'import time\n'), ((3785, 3798), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3795, 3798), False, 'import time\n'), ((3966, 3986), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (3980, 3986), False, 'import random\n'), ((2216, 2232), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {}), '()\n', (2230, 2232), False, 'from PIL import ImageGrab\n'), ((5750, 5763), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5760, 5763), False, 'import time\n'), ((2930, 2952), 'random.randint', 'random.randint', (['(0)', '(700)'], {}), '(0, 700)\n', (2944, 2952), False, 'import random\n'), ((4921, 4932), 'time.time', 'time.time', ([], {}), '()\n', (4930, 4932), False, 'import time\n'), ((4950, 4964), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (4960, 4964), False, 'import time\n'), ((6033, 6046), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6043, 6046), False, 'import time\n'), ((5286, 5297), 'time.time', 'time.time', ([], {}), '()\n', (5295, 5297), False, 'import time\n')] |
#!venv/bin/python3
#main.py
import numpy as np
import cv2
import time
#
from muralia.utils import (
imread,
imshow,
imwrite,
resize_image,
resize_format,
crop_image
)
from muralia.pdi import (
compare_dist,
correlation_matrix,
create_small_images,
generate_mosaic,
correlation_matrix_resize,
generate_mosaic_resize,
create_photos
)
from muralia.files import (
files_from_dir,
is_file_exist,
join_path,
mkdir,
load_list,
save_list,
mk_all_dirs
)
from muralia.position import(
distances_to_point
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def format_number(i):
return '%04d'%(i)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def main():
# folders
set_path = 'set_images' # folder de imagenes pequenas # INPUT
output_path = 'output_images' # folder donde se almancenan los archivos salida # INPUT
__ = mk_all_dirs('output_images', root=True)
# files
main_image = 'main_images/jennifer.jpg' # imagen original # INPUT
correlation_file = join_path(output_path,'correlation_file') # archivo de la matrix de correlacion
path_output_filename_small = join_path(output_path,'path_output_filename_small.txt') # lista de miniarchivos
output_path_mosaic = join_path(output_path,'mosaic_output.png') # mosaico
path_output_main_image = join_path(output_path,'main_image/output_main.png') # salida output
output_filenames_list_pos = join_path(output_path, 'filenames_and_positions.txt')
output_photo_path = join_path(output_path, 'output_photo_path')
# --------------------------------------------------------------------------
# se escoge el tamano apropiado para las imagenes pequenas
#little_shape = (590, 590, 3)
little_shape = (32,32,3)
shape_images = (36,64) #16:9
#little_shape = (295, 295, 3)
#shape_images = (36, 64) #16:9
#shape_images = (28,21) #4:3
#shape_images = (24,24) #1:1
# --------------------------------------------------------------------------
# se leen las imagenes pequenas
if (is_file_exist(path_output_filename_small)):
set_files = load_list(path_output_filename_small)
else:
set_files = files_from_dir(set_path, root=False)
set_files.sort()
save_list(path_output_filename_small, set_files)
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
#resize_files = files_from_dir(resize_path, root=True)
#resize_files.sort()
new_set_path = [join_path(set_path, item) for item in set_files]
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
if not is_file_exist(correlation_file + '.npz'):
print('creating a correlation matrix and save... ')
pos_list = list(range(8))
corr_mat, pos_mat = correlation_matrix_resize(main_image, path_output_main_image, new_set_path, little_shape, shape_images, pos_list)
np.savez_compressed('output_images/correlation_file', a=corr_mat, b=pos_mat)
else:
print('load correlation matrix... ')
loaded = np.load(correlation_file + '.npz')
corr_mat = loaded['a']
pos_mat = loaded['b']
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
generate_mosaic_resize(shape_images, corr_mat, new_set_path, pos_mat, little_shape, output_path_mosaic, output_filenames_list_pos)
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
create_photos(output_photo_path, output_filenames_list_pos)
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
print('success!')
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
if __name__ == '__main__':
main()
| [
"muralia.pdi.generate_mosaic_resize",
"muralia.files.save_list",
"numpy.load",
"muralia.pdi.create_photos",
"numpy.savez_compressed",
"muralia.files.load_list",
"muralia.files.join_path",
"muralia.files.mk_all_dirs",
"muralia.pdi.correlation_matrix_resize",
"muralia.files.files_from_dir",
"mural... | [((1149, 1188), 'muralia.files.mk_all_dirs', 'mk_all_dirs', (['"""output_images"""'], {'root': '(True)'}), "('output_images', root=True)\n", (1160, 1188), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((1295, 1337), 'muralia.files.join_path', 'join_path', (['output_path', '"""correlation_file"""'], {}), "(output_path, 'correlation_file')\n", (1304, 1337), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((1408, 1464), 'muralia.files.join_path', 'join_path', (['output_path', '"""path_output_filename_small.txt"""'], {}), "(output_path, 'path_output_filename_small.txt')\n", (1417, 1464), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((1513, 1556), 'muralia.files.join_path', 'join_path', (['output_path', '"""mosaic_output.png"""'], {}), "(output_path, 'mosaic_output.png')\n", (1522, 1556), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((1595, 1647), 'muralia.files.join_path', 'join_path', (['output_path', '"""main_image/output_main.png"""'], {}), "(output_path, 'main_image/output_main.png')\n", (1604, 1647), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((1695, 1748), 'muralia.files.join_path', 'join_path', (['output_path', '"""filenames_and_positions.txt"""'], {}), "(output_path, 'filenames_and_positions.txt')\n", (1704, 1748), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((1773, 1816), 'muralia.files.join_path', 'join_path', (['output_path', '"""output_photo_path"""'], {}), "(output_path, 'output_photo_path')\n", (1782, 1816), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((2320, 2361), 'muralia.files.is_file_exist', 'is_file_exist', (['path_output_filename_small'], {}), '(path_output_filename_small)\n', (2333, 2361), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((3757, 3891), 'muralia.pdi.generate_mosaic_resize', 'generate_mosaic_resize', (['shape_images', 'corr_mat', 'new_set_path', 'pos_mat', 'little_shape', 'output_path_mosaic', 'output_filenames_list_pos'], {}), '(shape_images, corr_mat, new_set_path, pos_mat,\n little_shape, output_path_mosaic, output_filenames_list_pos)\n', (3779, 3891), False, 'from muralia.pdi import compare_dist, correlation_matrix, create_small_images, generate_mosaic, correlation_matrix_resize, generate_mosaic_resize, create_photos\n'), ((4054, 4113), 'muralia.pdi.create_photos', 'create_photos', (['output_photo_path', 'output_filenames_list_pos'], {}), '(output_photo_path, output_filenames_list_pos)\n', (4067, 4113), False, 'from muralia.pdi import compare_dist, correlation_matrix, create_small_images, generate_mosaic, correlation_matrix_resize, generate_mosaic_resize, create_photos\n'), ((2384, 2421), 'muralia.files.load_list', 'load_list', (['path_output_filename_small'], {}), '(path_output_filename_small)\n', (2393, 2421), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((2452, 2488), 'muralia.files.files_from_dir', 'files_from_dir', (['set_path'], {'root': '(False)'}), '(set_path, root=False)\n', (2466, 2488), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((2522, 2570), 'muralia.files.save_list', 'save_list', (['path_output_filename_small', 'set_files'], {}), '(path_output_filename_small, set_files)\n', (2531, 2570), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((2837, 2862), 'muralia.files.join_path', 'join_path', (['set_path', 'item'], {}), '(set_path, item)\n', (2846, 2862), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((3059, 3099), 'muralia.files.is_file_exist', 'is_file_exist', (["(correlation_file + '.npz')"], {}), "(correlation_file + '.npz')\n", (3072, 3099), False, 'from muralia.files import files_from_dir, is_file_exist, join_path, mkdir, load_list, save_list, mk_all_dirs\n'), ((3224, 3341), 'muralia.pdi.correlation_matrix_resize', 'correlation_matrix_resize', (['main_image', 'path_output_main_image', 'new_set_path', 'little_shape', 'shape_images', 'pos_list'], {}), '(main_image, path_output_main_image, new_set_path,\n little_shape, shape_images, pos_list)\n', (3249, 3341), False, 'from muralia.pdi import compare_dist, correlation_matrix, create_small_images, generate_mosaic, correlation_matrix_resize, generate_mosaic_resize, create_photos\n'), ((3346, 3422), 'numpy.savez_compressed', 'np.savez_compressed', (['"""output_images/correlation_file"""'], {'a': 'corr_mat', 'b': 'pos_mat'}), "('output_images/correlation_file', a=corr_mat, b=pos_mat)\n", (3365, 3422), True, 'import numpy as np\n'), ((3495, 3529), 'numpy.load', 'np.load', (["(correlation_file + '.npz')"], {}), "(correlation_file + '.npz')\n", (3502, 3529), True, 'import numpy as np\n')] |
import copy
import os.path as osp
import numpy as np
import tensorflow as tf
from spektral.data import Graph
from spektral.data.utils import get_spec
from spektral.datasets.utils import DATASET_FOLDER
class Dataset:
"""
A container for Graph objects. This class can be extended to represent a
graph dataset.
Datasets can be accessed with indices (`dataset[0]` returns a `Graph`),
iterables (`dataset[[1, 2, 3]]` returns a `Dataset`) or slices
(`dataset[start:stop]` also returns a `Dataset`).
They can also be shuffled (`np.random.shuffle(dataset)` shuffles in-place),
and iterated over (`for graph in dataset: ...`).
They should generally behave like Numpy arrays for any operation that uses
simple 1D indexing.
Datasets have the following properties that automatically computed from the
graphs:
- `n_nodes`: the number of nodes in the dataset (returns `None` if the
number changes between graphs);
- `n_node_features`: the size of the node features (returns `None` if
the size changes between graphs or is not defined);
- `n_edge_features`: the size of the edge features (returns `None` if
the size changes between graphs or is not defined);
- `n_labels`: the size of the labels (returns `None` if the size changes
between graphs or is not defined); this is computed as the innermost
dimension of the labels (i.e., `y.shape[-1]`).
Any additional `kwargs` passed to the constructor will be automatically
assigned as instance attributes of the dataset.
Datasets also offer three main manipulation functions to apply callables to
their graphs:
- `apply(transform)`: replaces each graph with the output of
`transform(graph)`. This should always be a `Graph` object, although no
checks are made to ensure it (to give you more flexibility). See
`spektral.transforms` for some ready-to-use transforms.
For example: `apply(spektral.transforms.NormalizeAdj())` normalizes the
adjacency matrix of each graph in the dataset.
- `map(transform, reduce=None)`: returns a list containing the output
of `transform(graph)` for each graph. If `reduce` is a `callable`, then
returns `reduce(output_list)` instead of just `output_list`.
For instance: `map(lambda: g.n_nodes, reduce=np.mean)` will return the
average number of nodes in the dataset.
- `filter(function)`: removes from the dataset any graph for which
`function(graph)` returns `False`.
For example: `filter(lambda: g.n_nodes < 100)` removes from the dataset all
graphs bigger than 100 nodes.
You can extend this class to create your own dataset.
To create a `Dataset`, you must implement the `Dataset.read()` method, which
must return a list of `spektral.data.Graph` objects, e.g.,
```
class MyDataset(Dataset):
def read(self):
return [Graph(x=x, adj=adj, y=y) for x, adj, y in some_magic_list]
```
The class also offers a `download()` method that is automatically called
if the path returned by the `Dataset.path` attribute does not exists.
This defaults to `~/.spektral/datasets/ClassName/`.
You can implement this however you like, knowing that `download()` will be
called before `read()`. You can also override the `path` attribute to
whatever fits your needs.
Have a look at the `spektral.datasets` module for examples of popular
datasets already implemented.
**Arguments**
- `transforms`: a callable or list of callables that are automatically
applied to the graphs after loading the dataset.
"""
def __init__(self, transforms=None, **kwargs):
# Read extra kwargs
for k, v in kwargs.items():
setattr(self, k, v)
# Download data
if not osp.exists(self.path):
self.download()
# Read graphs
self.graphs = self.read()
if len(self.graphs) == 0:
raise ValueError('Datasets cannot be empty')
# Apply transforms
if transforms is not None:
if not isinstance(transforms, (list, tuple)) and callable(transforms):
transforms = [transforms]
elif not all([callable(t) for t in transforms]):
raise ValueError('`transforms` must be a callable or list of '
'callables')
else:
pass
for t in transforms:
self.apply(t)
def read(self):
raise NotImplementedError
def download(self):
pass
def apply(self, transform):
if not callable(transform):
raise ValueError('`transform` must be callable')
for i in range(len(self.graphs)):
self.graphs[i] = transform(self.graphs[i])
def map(self, transform, reduce=None):
if not callable(transform):
raise ValueError('`transform` must be callable')
if reduce is not None and not callable(reduce):
raise ValueError('`reduce` must be callable')
out = [transform(g) for g in self.graphs]
return reduce(out) if reduce is not None else out
def filter(self, function):
if not callable(function):
raise ValueError('`function` must be callable')
self.graphs = [g for g in self.graphs if function(g)]
def __getitem__(self, key):
if not (np.issubdtype(type(key), np.integer) or
isinstance(key, (slice, list, tuple, np.ndarray))):
raise ValueError('Unsupported key type: {}'.format(type(key)))
if np.issubdtype(type(key), np.integer):
return self.graphs[int(key)]
else:
dataset = copy.copy(self)
if isinstance(key, slice):
dataset.graphs = self.graphs[key]
else:
dataset.graphs = [self.graphs[i] for i in key]
return dataset
def __setitem__(self, key, value):
is_iterable = isinstance(value, (list, tuple))
if not isinstance(value, (Graph, list, tuple)):
raise ValueError('Datasets can only be assigned Graphs or '
'sequences of Graphs')
if is_iterable and not all([isinstance(v, Graph) for v in value]):
raise ValueError('Assigned sequence must contain only Graphs')
if is_iterable and isinstance(key, int):
raise ValueError('Cannot assign multiple Graphs to one location')
if not is_iterable and isinstance(key, (slice, list, tuple)):
raise ValueError('Cannot assign one Graph to multiple locations')
if not (isinstance(key, (int, slice, list, tuple))):
raise ValueError('Unsupported key type: {}'.format(type(key)))
if isinstance(key, int):
self.graphs[key] = value
else:
if isinstance(key, slice):
self.graphs[key] = value
else:
for i, k in enumerate(key):
self.graphs[k] = value[i]
def __len__(self):
return len(self.graphs)
def __repr__(self):
return '{}(n_graphs={})'.format(self.__class__.__name__, self.n_graphs)
@property
def path(self):
return osp.join(DATASET_FOLDER, self.__class__.__name__)
@property
def n_graphs(self):
return self.__len__()
@property
def n_nodes(self):
if len(self.graphs) == 1 or len(set([g.n_nodes for g in self.graphs])) == 1:
return self.graphs[0].n_nodes
else:
return None
@property
def n_node_features(self):
if len(self.graphs) == 1 or len(set([g.n_node_features for g in self.graphs])) == 1:
return self.graphs[0].n_node_features
else:
return None
@property
def n_edge_features(self):
if len(self.graphs) == 1 or len(set([g.n_edge_features for g in self.graphs])) == 1:
return self.graphs[0].n_edge_features
else:
return None
@property
def n_labels(self):
if len(self.graphs) == 1 or len(set([g.n_labels for g in self.graphs])) == 1:
return self.graphs[0].n_labels
else:
return None
@property
def signature(self):
"""
This property computes the signature of the dataset, which can be
passed to `spektral.data.utils.to_tf_signature(signature)` to compute
the TensorFlow signature. You can safely ignore this property unless
you are creating a custom `Loader`.
A signature consist of the TensorFlow TypeSpec, shape, and dtype of
all characteristic matrices of the graphs in the Dataset. This is
returned as a dictionary of dictionaries, with keys `x`, `a`, `e`, and
`y` for the four main data matrices.
Each sub-dictionary will have keys `spec`, `shape` and `dtype`.
"""
signature = {}
graph = self.graphs[0] # This is always non-empty
if graph.x is not None:
signature['x'] = dict()
signature['x']['spec'] = get_spec(graph.x)
signature['x']['shape'] = (None, self.n_node_features)
signature['x']['dtype'] = tf.as_dtype(graph.x.dtype)
if graph.a is not None:
signature['a'] = dict()
signature['a']['spec'] = get_spec(graph.a)
signature['a']['shape'] = (None, None)
signature['a']['dtype'] = tf.as_dtype(graph.a.dtype)
if graph.e is not None:
signature['e'] = dict()
signature['e']['spec'] = get_spec(graph.e)
signature['e']['shape'] = (None, self.n_edge_features)
signature['e']['dtype'] = tf.as_dtype(graph.e.dtype)
if graph.y is not None:
signature['y'] = dict()
signature['y']['spec'] = get_spec(graph.y)
signature['y']['shape'] = (self.n_labels,)
signature['y']['dtype'] = tf.as_dtype(np.array(graph.y).dtype)
return signature
| [
"tensorflow.as_dtype",
"os.path.exists",
"copy.copy",
"numpy.array",
"os.path.join",
"spektral.data.utils.get_spec"
] | [((7285, 7334), 'os.path.join', 'osp.join', (['DATASET_FOLDER', 'self.__class__.__name__'], {}), '(DATASET_FOLDER, self.__class__.__name__)\n', (7293, 7334), True, 'import os.path as osp\n'), ((3844, 3865), 'os.path.exists', 'osp.exists', (['self.path'], {}), '(self.path)\n', (3854, 3865), True, 'import os.path as osp\n'), ((5752, 5767), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (5761, 5767), False, 'import copy\n'), ((9139, 9156), 'spektral.data.utils.get_spec', 'get_spec', (['graph.x'], {}), '(graph.x)\n', (9147, 9156), False, 'from spektral.data.utils import get_spec\n'), ((9262, 9288), 'tensorflow.as_dtype', 'tf.as_dtype', (['graph.x.dtype'], {}), '(graph.x.dtype)\n', (9273, 9288), True, 'import tensorflow as tf\n'), ((9394, 9411), 'spektral.data.utils.get_spec', 'get_spec', (['graph.a'], {}), '(graph.a)\n', (9402, 9411), False, 'from spektral.data.utils import get_spec\n'), ((9501, 9527), 'tensorflow.as_dtype', 'tf.as_dtype', (['graph.a.dtype'], {}), '(graph.a.dtype)\n', (9512, 9527), True, 'import tensorflow as tf\n'), ((9633, 9650), 'spektral.data.utils.get_spec', 'get_spec', (['graph.e'], {}), '(graph.e)\n', (9641, 9650), False, 'from spektral.data.utils import get_spec\n'), ((9756, 9782), 'tensorflow.as_dtype', 'tf.as_dtype', (['graph.e.dtype'], {}), '(graph.e.dtype)\n', (9767, 9782), True, 'import tensorflow as tf\n'), ((9888, 9905), 'spektral.data.utils.get_spec', 'get_spec', (['graph.y'], {}), '(graph.y)\n', (9896, 9905), False, 'from spektral.data.utils import get_spec\n'), ((10011, 10028), 'numpy.array', 'np.array', (['graph.y'], {}), '(graph.y)\n', (10019, 10028), True, 'import numpy as np\n')] |
import unittest
import logging
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from darts.dataprocessing.transformers import Scaler
from darts.utils import timeseries_generation as tg
from darts import TimeSeries
class DataTransformerTestCase(unittest.TestCase):
__test__ = True
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL)
series1 = tg.random_walk_timeseries(length=100, column_name='series1') * 20 - 10.
series2 = series1.stack(tg.random_walk_timeseries(length=100) * 20 - 100.)
col_1 = series1.columns
def test_scaling(self):
self.series3 = self.series1[:1]
transformer1 = Scaler(MinMaxScaler(feature_range=(0, 2)))
transformer2 = Scaler(StandardScaler())
series1_tr1 = transformer1.fit_transform(self.series1)
series1_tr2 = transformer2.fit_transform(self.series1)
series3_tr2 = transformer2.transform(self.series3)
# should have the defined name above
self.assertEqual(self.series1.columns[0], 'series1')
# should keep columns pd.Index
self.assertEqual(self.col_1, series1_tr1.columns)
# should comply with scaling constraints
self.assertAlmostEqual(min(series1_tr1.values().flatten()), 0.)
self.assertAlmostEqual(max(series1_tr1.values().flatten()), 2.)
self.assertAlmostEqual(np.mean(series1_tr2.values().flatten()), 0.)
self.assertAlmostEqual(np.std(series1_tr2.values().flatten()), 1.)
# test inverse transform
series1_recovered = transformer2.inverse_transform(series1_tr2)
series3_recovered = transformer2.inverse_transform(series3_tr2)
np.testing.assert_almost_equal(series1_recovered.values().flatten(), self.series1.values().flatten())
self.assertEqual(series1_recovered.width, self.series1.width)
self.assertEqual(series3_recovered, series1_recovered[:1])
def test_multi_ts_scaling(self):
transformer1 = Scaler(MinMaxScaler(feature_range=(0, 2)))
transformer2 = Scaler(StandardScaler())
series_array = [self.series1, self.series2]
series_array_tr1 = transformer1.fit_transform(series_array)
series_array_tr2 = transformer2.fit_transform(series_array)
for index in range(len(series_array)):
self.assertAlmostEqual(min(series_array_tr1[index].values().flatten()), 0.)
self.assertAlmostEqual(max(series_array_tr1[index].values().flatten()), 2.)
self.assertAlmostEqual(np.mean(series_array_tr2[index].values().flatten()), 0.)
self.assertAlmostEqual(np.std(series_array_tr2[index].values().flatten()), 1.)
series_array_rec1 = transformer1.inverse_transform(series_array_tr1)
series_array_rec2 = transformer2.inverse_transform(series_array_tr2)
for index in range(len(series_array)):
np.testing.assert_almost_equal(series_array_rec1[index].values().flatten(),
series_array[index].values().flatten())
np.testing.assert_almost_equal(series_array_rec2[index].values().flatten(),
series_array[index].values().flatten())
def test_multivariate_stochastic_series(self):
scaler = Scaler(MinMaxScaler())
vals = np.random.rand(10,5,50)
s = TimeSeries.from_values(vals)
ss = scaler.fit_transform(s)
ssi = scaler.inverse_transform(ss)
# Test inverse transform
np.testing.assert_allclose(s.all_values(), ssi.all_values())
# Test that the transform is done per component (i.e max value over each component should be 1 and min 0)
np.testing.assert_allclose(np.array([ss.all_values(copy=False)[:,i,:].max() for i in range(ss.width)]),
np.array([1.] * ss.width))
np.testing.assert_allclose(np.array([ss.all_values(copy=False)[:,i,:].min() for i in range(ss.width)]),
np.array([0.] * ss.width))
def test_component_mask_transformation(self):
scaler = Scaler(MinMaxScaler())
# shape = (10, 3, 2)
vals = np.array([np.arange(6).reshape(3, 2)] * 10)
# scalers should only consider True columns
component_mask = np.array([True, False, True])
s = TimeSeries.from_values(vals)
ss = scaler.fit_transform(s, component_mask=component_mask)
ss_vals = ss.all_values(copy=False)
# test non-masked columns
self.assertTrue((ss_vals[:, 1, :] == vals[:, 1, :]).all())
# test masked columns
self.assertAlmostEqual(ss_vals[:, [0, 2], :].max(), 1.)
self.assertAlmostEqual(ss_vals[:, [0, 2], :].min(), 0.)
ssi = scaler.inverse_transform(ss, component_mask=component_mask)
# Test inverse transform
np.testing.assert_allclose(s.all_values(), ssi.all_values())
| [
"sklearn.preprocessing.StandardScaler",
"darts.TimeSeries.from_values",
"sklearn.preprocessing.MinMaxScaler",
"logging.disable",
"darts.utils.timeseries_generation.random_walk_timeseries",
"numpy.array",
"numpy.arange",
"numpy.random.rand"
] | [((372, 405), 'logging.disable', 'logging.disable', (['logging.CRITICAL'], {}), '(logging.CRITICAL)\n', (387, 405), False, 'import logging\n'), ((3346, 3371), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)', '(50)'], {}), '(10, 5, 50)\n', (3360, 3371), True, 'import numpy as np\n'), ((3382, 3410), 'darts.TimeSeries.from_values', 'TimeSeries.from_values', (['vals'], {}), '(vals)\n', (3404, 3410), False, 'from darts import TimeSeries\n'), ((4317, 4346), 'numpy.array', 'np.array', (['[True, False, True]'], {}), '([True, False, True])\n', (4325, 4346), True, 'import numpy as np\n'), ((4360, 4388), 'darts.TimeSeries.from_values', 'TimeSeries.from_values', (['vals'], {}), '(vals)\n', (4382, 4388), False, 'from darts import TimeSeries\n'), ((421, 481), 'darts.utils.timeseries_generation.random_walk_timeseries', 'tg.random_walk_timeseries', ([], {'length': '(100)', 'column_name': '"""series1"""'}), "(length=100, column_name='series1')\n", (446, 481), True, 'from darts.utils import timeseries_generation as tg\n'), ((700, 734), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 2)'}), '(feature_range=(0, 2))\n', (712, 734), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((766, 782), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (780, 782), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((2013, 2047), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 2)'}), '(feature_range=(0, 2))\n', (2025, 2047), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((2079, 2095), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2093, 2095), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((3315, 3329), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3327, 3329), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((3857, 3883), 'numpy.array', 'np.array', (['([1.0] * ss.width)'], {}), '([1.0] * ss.width)\n', (3865, 3883), True, 'import numpy as np\n'), ((4033, 4059), 'numpy.array', 'np.array', (['([0.0] * ss.width)'], {}), '([0.0] * ss.width)\n', (4041, 4059), True, 'import numpy as np\n'), ((4135, 4149), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4147, 4149), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((521, 558), 'darts.utils.timeseries_generation.random_walk_timeseries', 'tg.random_walk_timeseries', ([], {'length': '(100)'}), '(length=100)\n', (546, 558), True, 'from darts.utils import timeseries_generation as tg\n'), ((4205, 4217), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (4214, 4217), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 4 20:39:07 2020
@author: JianyuanZhai
"""
import pyomo.environ as pe
import numpy as np
import time
DOUBLE = np.float64
class DDCU_Nonuniform():
def __init__(self, intercept = True):
self.intercept = intercept
self.ddcu = DDCU_model._make_pyomo_ddcu_nonuniform(intercept)
self.solver = pe.SolverFactory('glpk')
self.time_underestimate = 0.
@staticmethod
def _minimize_1d(a, b, c):
if a > 0.:
check = -b/(2*a)
if check >= 0. and check <= 1.:
return check
elif check < 0.:
return 0.
elif check > 1.:
return 1.
elif 0. >= a >= -10.** -5:
if b > 0.:
return 0.
if b < 0.:
return 1.
else:
return 0.5
def update_solver(self, solver, option = {}):
self.solver = pe.SolverFactory(solver)
def _underestimate(self, all_X, all_Y):
time_start = time.time()
dim = all_X.shape[1]
sample_ind = list(range(len(all_Y)))
x_ind = list(range(dim))
x_dict = {}
for i in sample_ind:
for j in x_ind:
x_dict[(i,j)] = all_X[i,j]
if self.intercept:
data = {None:{'x_ind' : {None : x_ind} , 'sample_ind' : { None : sample_ind }, 'xs' : x_dict , 'ys' : dict(zip(sample_ind, all_Y))}}
else:
corner_point_ind = np.where((all_X == 0.).all(axis = 1))[0]
if len(corner_point_ind) > 1:
candidate = all_X[corner_point_ind]
intercept = min(candidate)
else:
intercept = float(all_Y[corner_point_ind])
data = {None:{'x_ind' : {None : x_ind} , 'sample_ind' : { None : sample_ind } ,'xs' : x_dict , 'ys' : dict(zip(sample_ind, all_Y)) , 'c' : {None : intercept}}}
model = self.ddcu.create_instance(data) # create an instance for abstract pyomo model
self.solver.solve(model)
a = np.array([round(pe.value(model.a[i]), 6) for i in model.x_ind])
if (a < 0.).any():
model.pprint()
b = np.array([pe.value(model.b[i]) for i in model.x_ind])
c = pe.value( model.c )
xopt = np.array([self._minimize_1d(a[i], b[i], c, ) for i in range(dim)])
flb_s = sum(a*xopt**2+b*xopt) + c
if abs(flb_s - min(all_Y)) <= 0.00001:
flb_s = min(all_Y)
self.time_underestimate += time.time() - time_start
return float(flb_s), np.array([xopt])
class DDCU_model:
"""
This class contains recipes to make pyomo models for different pyomo_models for underestimators
"""
@staticmethod
def _linear_obj_rule(model):
return sum((model.ys[i] - model.f[i]) for i in model.sample_ind)
@staticmethod
def _underestimating_con_rule(model, i):
return model.ys[i] - model.f[i] >= 0.0
@staticmethod
def _quadratic_nonuniform(model, i):
return model.f[i] == sum(model.a[j]*model.xs[i,j]**2 + model.b[j]*model.xs[i,j] for j in model.x_ind) + model.c
@staticmethod
def _exponential(model, i):
a = sum(model.a[j]*(model.xs[i,j]-model.b[j])**2 for j in model.x_ind)
return model.f[i] == pe.exp(a) + model.c
@staticmethod
def _make_pyomo_ddcu_nonuniform(intercept):
ddcu = pe.AbstractModel()
ddcu.sample_ind = pe.Set()
ddcu.x_ind = pe.Set()
ddcu.ys = pe.Param(ddcu.sample_ind)
ddcu.xs = pe.Param(ddcu.sample_ind,ddcu.x_ind)
ddcu.a = pe.Var(ddcu.x_ind,within = pe.NonNegativeReals, initialize=0.)
ddcu.b = pe.Var(ddcu.x_ind,within = pe.Reals)
ddcu.f = pe.Var(ddcu.sample_ind)
if intercept :
ddcu.c = pe.Var(within = pe.Reals)
else :
ddcu.c = pe.Param()
ddcu.obj = pe.Objective(rule = DDCU_model._linear_obj_rule)
ddcu.con1 = pe.Constraint(ddcu.sample_ind, rule = DDCU_model._underestimating_con_rule)
ddcu.con2 = pe.Constraint(ddcu.sample_ind, rule = DDCU_model._quadratic_nonuniform)
return ddcu
@staticmethod
def _make_pyomo_ddcu_exponential():
ddcu = pe.AbstractModel()
ddcu.sample_ind = pe.Set()
ddcu.x_ind = pe.Set()
ddcu.ys = pe.Param(ddcu.sample_ind)
ddcu.xs = pe.Param(ddcu.sample_ind,ddcu.x_ind)
ddcu.a = pe.Var(ddcu.x_ind,within = pe.NonNegativeReals)
#ddcu.b = pe.Param(ddcu.x_ind)
ddcu.b = pe.Var(ddcu.x_ind, within = pe.Reals)
ddcu.f = pe.Var(ddcu.sample_ind)
ddcu.c = pe.Var(within = pe.Reals)
ddcu.obj = pe.Objective(rule = DDCU_model._linear_obj_rule)
ddcu.con1 = pe.Constraint(ddcu.sample_ind, rule = DDCU_model._underestimating_con_rule)
ddcu.con2 = pe.Constraint(ddcu.sample_ind, rule = DDCU_model._exponential)
return ddcu
| [
"pyomo.environ.SolverFactory",
"pyomo.environ.Constraint",
"pyomo.environ.Var",
"pyomo.environ.value",
"time.time",
"pyomo.environ.Objective",
"pyomo.environ.exp",
"numpy.array",
"pyomo.environ.Param",
"pyomo.environ.AbstractModel",
"pyomo.environ.Set"
] | [((387, 411), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""glpk"""'], {}), "('glpk')\n", (403, 411), True, 'import pyomo.environ as pe\n'), ((1001, 1025), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['solver'], {}), '(solver)\n', (1017, 1025), True, 'import pyomo.environ as pe\n'), ((1091, 1102), 'time.time', 'time.time', ([], {}), '()\n', (1100, 1102), False, 'import time\n'), ((2359, 2376), 'pyomo.environ.value', 'pe.value', (['model.c'], {}), '(model.c)\n', (2367, 2376), True, 'import pyomo.environ as pe\n'), ((3507, 3525), 'pyomo.environ.AbstractModel', 'pe.AbstractModel', ([], {}), '()\n', (3523, 3525), True, 'import pyomo.environ as pe\n'), ((3552, 3560), 'pyomo.environ.Set', 'pe.Set', ([], {}), '()\n', (3558, 3560), True, 'import pyomo.environ as pe\n'), ((3582, 3590), 'pyomo.environ.Set', 'pe.Set', ([], {}), '()\n', (3588, 3590), True, 'import pyomo.environ as pe\n'), ((3611, 3636), 'pyomo.environ.Param', 'pe.Param', (['ddcu.sample_ind'], {}), '(ddcu.sample_ind)\n', (3619, 3636), True, 'import pyomo.environ as pe\n'), ((3655, 3692), 'pyomo.environ.Param', 'pe.Param', (['ddcu.sample_ind', 'ddcu.x_ind'], {}), '(ddcu.sample_ind, ddcu.x_ind)\n', (3663, 3692), True, 'import pyomo.environ as pe\n'), ((3709, 3771), 'pyomo.environ.Var', 'pe.Var', (['ddcu.x_ind'], {'within': 'pe.NonNegativeReals', 'initialize': '(0.0)'}), '(ddcu.x_ind, within=pe.NonNegativeReals, initialize=0.0)\n', (3715, 3771), True, 'import pyomo.environ as pe\n'), ((3789, 3824), 'pyomo.environ.Var', 'pe.Var', (['ddcu.x_ind'], {'within': 'pe.Reals'}), '(ddcu.x_ind, within=pe.Reals)\n', (3795, 3824), True, 'import pyomo.environ as pe\n'), ((3858, 3881), 'pyomo.environ.Var', 'pe.Var', (['ddcu.sample_ind'], {}), '(ddcu.sample_ind)\n', (3864, 3881), True, 'import pyomo.environ as pe\n'), ((4034, 4080), 'pyomo.environ.Objective', 'pe.Objective', ([], {'rule': 'DDCU_model._linear_obj_rule'}), '(rule=DDCU_model._linear_obj_rule)\n', (4046, 4080), True, 'import pyomo.environ as pe\n'), ((4103, 4176), 'pyomo.environ.Constraint', 'pe.Constraint', (['ddcu.sample_ind'], {'rule': 'DDCU_model._underestimating_con_rule'}), '(ddcu.sample_ind, rule=DDCU_model._underestimating_con_rule)\n', (4116, 4176), True, 'import pyomo.environ as pe\n'), ((4199, 4268), 'pyomo.environ.Constraint', 'pe.Constraint', (['ddcu.sample_ind'], {'rule': 'DDCU_model._quadratic_nonuniform'}), '(ddcu.sample_ind, rule=DDCU_model._quadratic_nonuniform)\n', (4212, 4268), True, 'import pyomo.environ as pe\n'), ((4376, 4394), 'pyomo.environ.AbstractModel', 'pe.AbstractModel', ([], {}), '()\n', (4392, 4394), True, 'import pyomo.environ as pe\n'), ((4421, 4429), 'pyomo.environ.Set', 'pe.Set', ([], {}), '()\n', (4427, 4429), True, 'import pyomo.environ as pe\n'), ((4451, 4459), 'pyomo.environ.Set', 'pe.Set', ([], {}), '()\n', (4457, 4459), True, 'import pyomo.environ as pe\n'), ((4480, 4505), 'pyomo.environ.Param', 'pe.Param', (['ddcu.sample_ind'], {}), '(ddcu.sample_ind)\n', (4488, 4505), True, 'import pyomo.environ as pe\n'), ((4524, 4561), 'pyomo.environ.Param', 'pe.Param', (['ddcu.sample_ind', 'ddcu.x_ind'], {}), '(ddcu.sample_ind, ddcu.x_ind)\n', (4532, 4561), True, 'import pyomo.environ as pe\n'), ((4578, 4624), 'pyomo.environ.Var', 'pe.Var', (['ddcu.x_ind'], {'within': 'pe.NonNegativeReals'}), '(ddcu.x_ind, within=pe.NonNegativeReals)\n', (4584, 4624), True, 'import pyomo.environ as pe\n'), ((4683, 4718), 'pyomo.environ.Var', 'pe.Var', (['ddcu.x_ind'], {'within': 'pe.Reals'}), '(ddcu.x_ind, within=pe.Reals)\n', (4689, 4718), True, 'import pyomo.environ as pe\n'), ((4752, 4775), 'pyomo.environ.Var', 'pe.Var', (['ddcu.sample_ind'], {}), '(ddcu.sample_ind)\n', (4758, 4775), True, 'import pyomo.environ as pe\n'), ((4793, 4816), 'pyomo.environ.Var', 'pe.Var', ([], {'within': 'pe.Reals'}), '(within=pe.Reals)\n', (4799, 4816), True, 'import pyomo.environ as pe\n'), ((4838, 4884), 'pyomo.environ.Objective', 'pe.Objective', ([], {'rule': 'DDCU_model._linear_obj_rule'}), '(rule=DDCU_model._linear_obj_rule)\n', (4850, 4884), True, 'import pyomo.environ as pe\n'), ((4907, 4980), 'pyomo.environ.Constraint', 'pe.Constraint', (['ddcu.sample_ind'], {'rule': 'DDCU_model._underestimating_con_rule'}), '(ddcu.sample_ind, rule=DDCU_model._underestimating_con_rule)\n', (4920, 4980), True, 'import pyomo.environ as pe\n'), ((5003, 5063), 'pyomo.environ.Constraint', 'pe.Constraint', (['ddcu.sample_ind'], {'rule': 'DDCU_model._exponential'}), '(ddcu.sample_ind, rule=DDCU_model._exponential)\n', (5016, 5063), True, 'import pyomo.environ as pe\n'), ((2616, 2627), 'time.time', 'time.time', ([], {}), '()\n', (2625, 2627), False, 'import time\n'), ((2670, 2686), 'numpy.array', 'np.array', (['[xopt]'], {}), '([xopt])\n', (2678, 2686), True, 'import numpy as np\n'), ((3926, 3949), 'pyomo.environ.Var', 'pe.Var', ([], {'within': 'pe.Reals'}), '(within=pe.Reals)\n', (3932, 3949), True, 'import pyomo.environ as pe\n'), ((3988, 3998), 'pyomo.environ.Param', 'pe.Param', ([], {}), '()\n', (3996, 3998), True, 'import pyomo.environ as pe\n'), ((2303, 2323), 'pyomo.environ.value', 'pe.value', (['model.b[i]'], {}), '(model.b[i])\n', (2311, 2323), True, 'import pyomo.environ as pe\n'), ((3399, 3408), 'pyomo.environ.exp', 'pe.exp', (['a'], {}), '(a)\n', (3405, 3408), True, 'import pyomo.environ as pe\n'), ((2179, 2199), 'pyomo.environ.value', 'pe.value', (['model.a[i]'], {}), '(model.a[i])\n', (2187, 2199), True, 'import pyomo.environ as pe\n')] |
import os
import sys
sys.path.append("/src")
import glob
import cv2 as cv
import json
import numpy as np
import tensorflow as tf
import pyquaternion
from utils import tf_utils, helpers, kitti_utils
from utils import box3dImageTransform as box_utils
def create_example(img, scan, label):
feature = {
'image/img': tf_utils.bytes_feature(img),
'image/orig': tf_utils.float_list_feature(label['orig'].reshape(-1, 1)),
'image/calib': tf_utils.float_list_feature(label['calib'].reshape(-1, 1)),
'scan/points': tf_utils.float_list_feature(scan[:, :3].reshape(-1, 1)),
'label/clf': tf_utils.int64_list_feature(label['clf'].reshape(-1, 1)),
'label/c_3d': tf_utils.float_list_feature(label['c_3d'].reshape(-1, 1)),
'label/bbox_3d': tf_utils.float_list_feature(label['bbox_3d'].reshape(-1, 1)),
'label/c_2d': tf_utils.float_list_feature(label['c_2d'].reshape(-1, 1)),
'label/bbox_2d': tf_utils.float_list_feature(label['bbox_2d'].reshape(-1, 1)),
'label/extent': tf_utils.float_list_feature(label['extent'].reshape(-1, 1)),
'label/rotation_i': tf_utils.float_list_feature(label['ri'].reshape(-1, 1)),
'label/rotation_j': tf_utils.float_list_feature(label['rj'].reshape(-1, 1)),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
CLASS_MAP = {
'car': 0,
'truck': 1,
'bus': 1,
'on rails': 1,
'train': 1,
'motorcycle': 1,
'bicycle': 1,
'caravan': 1,
'trailer': 1,
'dynamic': 1,
'tunnel': 1,
'ignore': 1
}
def create_records():
max_objects = cfg['max_objects']
for dataset, dataset_out in zip(cfg['datasets'], cfg['datasets_out']):
img_dir = os.path.join(cfg['in_dir'], 'leftImg8bit', dataset)
label_dir = os.path.join(cfg['in_dir'], 'gtBbox3d', dataset)
img_files = glob.glob(os.path.join(img_dir, '*/*.png'))
img_files = sorted(img_files)
label_files = glob.glob(os.path.join(label_dir, '*/*.json'))
label_files = sorted(label_files)
n_scenes = cfg['n_scenes'] if cfg['n_scenes'] > 0 else len(img_files)
bar = helpers.progbar(n_scenes)
bar.start()
with tf.io.TFRecordWriter(dataset_out) as writer:
for scene_id, (img_file, label_file) in enumerate(zip(img_files, label_files)):
if scene_id == n_scenes: break
bar.update(scene_id)
assert os.path.basename(img_file)[:-16] == os.path.basename(label_file)[:-14]
img_arr = cv.imread(img_file)
orig_img_size = img_arr.shape[:2]
img_arr = cv.resize(img_arr, (cfg['img_size'][1], cfg['img_size'][0]), interpolation=cv.INTER_CUBIC)
_, img = cv.imencode('.png', img_arr)
img = img.tobytes()
with open(label_file) as json_file:
label_dict = json.load(json_file)
if len(label_dict['objects']) == 0: continue
camera = box_utils.Camera(fx=label_dict['sensor']['fx'],
fy=label_dict['sensor']['fy'],
u0=label_dict['sensor']['u0'],
v0=label_dict['sensor']['v0'],
sensor_T_ISO_8855=label_dict['sensor']['sensor_T_ISO_8855'])
K_matrix = np.zeros((4, 4))
K_matrix[0][0] = label_dict['sensor']['fx']
K_matrix[0][2] = label_dict['sensor']['u0']
K_matrix[1][1] = label_dict['sensor']['fy']
K_matrix[1][2] = label_dict['sensor']['v0']
K_matrix[2][2] = 1
label = {}
label['calib'] = K_matrix
label['orig'] = np.array(orig_img_size).astype(np.float32)
label['clf'] = np.ones((max_objects, 1)) * 8
label['c_3d'] = np.zeros((max_objects, 3))
label['extent'] = np.zeros((max_objects, 3))
label['bbox_3d'] = np.zeros((max_objects, 8, 3))
label['bbox_2d'] = np.zeros((max_objects, 4))
label['c_2d'] = np.zeros((max_objects, 2))
label['ri'] = np.zeros((max_objects, 1))
label['rj'] = np.zeros((max_objects, 1))
for idx, obj in enumerate(label_dict['objects'][:max_objects]):
bbox = box_utils.Box3dImageTransform(camera)
bbox.initialize_box(center=obj['3d']['center'],
quaternion=obj['3d']['rotation'],
size=obj['3d']['dimensions'])
_, center_3d_cam, quaternion = bbox.get_parameters(coordinate_system=box_utils.CRS_S)
label['clf'][idx, 0] = CLASS_MAP[obj['label']]
label['c_3d'][idx, :] = center_3d_cam
label['extent'][idx, :] = [obj['3d']['dimensions'][2], # height
obj['3d']['dimensions'][1], # width
obj['3d']['dimensions'][0]] # length
vertices = bbox.get_vertices(coordinate_system=box_utils.CRS_S)
for idx_vertice, loc in enumerate(bbox.loc):
label['bbox_3d'][idx, idx_vertice, :] = vertices[loc]
"""
print(np.concatenate([vertices[loc], [1]], axis=0))
center = np.matmul(K_matrix, np.concatenate([vertices[loc], [1]]))
center = center[:2] / center[2]
img_arr[int(center[1]), int(center[0]), :] = (255, 255, 255)
cv.imshow("View0", img_arr)
cv.waitKey(0)
exit()"""
bbox_2d_xy_hw = obj['2d']['amodal']
bbox_2d_xy_xy = [bbox_2d_xy_hw[0], # x_1
bbox_2d_xy_hw[1], # y_1
bbox_2d_xy_hw[0]+bbox_2d_xy_hw[2], # x_1 + width
bbox_2d_xy_hw[1]+bbox_2d_xy_hw[3]] # y_1 + height
label['c_2d'][idx, :] = [(bbox_2d_xy_xy[0]+((bbox_2d_xy_xy[2]-bbox_2d_xy_xy[0])/2.))/orig_img_size[1],
(bbox_2d_xy_xy[1]+((bbox_2d_xy_xy[3]-bbox_2d_xy_xy[1])/2.))/orig_img_size[0]]
bbox_2d_xy_xy = np.array([np.clip(bbox_2d_xy_xy[0]/orig_img_size[1], 0, 1),
np.clip(bbox_2d_xy_xy[1]/orig_img_size[0], 0, 1),
np.clip(bbox_2d_xy_xy[2]/orig_img_size[1], 0, 1),
np.clip(bbox_2d_xy_xy[3]/orig_img_size[0], 0, 1)])
label['bbox_2d'][idx, :] = bbox_2d_xy_xy
yaw, pitch, roll = pyquaternion.Quaternion(quaternion).yaw_pitch_roll
label['ri'][idx, 0] = np.cos(pitch)
label['rj'][idx, 0] = np.sin(pitch)
scan = np.ones((5, 3)) # dummy points
label = kitti_utils.remove_dontcare(label)
tf_example = create_example(img, scan, label)
writer.write(tf_example.SerializeToString())
if __name__ == '__main__':
cfg = {
'in_dir': '/cityscapes',
'datasets': ['train', 'val'],
'datasets_out': ['/tfrecords/cityscapes_train.tfrecord', '/tfrecords/cityscapes_val.tfrecord'],
'n_scenes': -1,
'img_size': (1024, 2048),
'max_objects': 22,
}
create_records()
| [
"utils.kitti_utils.remove_dontcare",
"numpy.ones",
"numpy.clip",
"numpy.sin",
"cv2.imencode",
"os.path.join",
"sys.path.append",
"pyquaternion.Quaternion",
"utils.tf_utils.bytes_feature",
"cv2.resize",
"utils.box3dImageTransform.Camera",
"os.path.basename",
"tensorflow.train.Features",
"ut... | [((22, 45), 'sys.path.append', 'sys.path.append', (['"""/src"""'], {}), "('/src')\n", (37, 45), False, 'import sys\n'), ((329, 356), 'utils.tf_utils.bytes_feature', 'tf_utils.bytes_feature', (['img'], {}), '(img)\n', (351, 356), False, 'from utils import tf_utils, helpers, kitti_utils\n'), ((1732, 1783), 'os.path.join', 'os.path.join', (["cfg['in_dir']", '"""leftImg8bit"""', 'dataset'], {}), "(cfg['in_dir'], 'leftImg8bit', dataset)\n", (1744, 1783), False, 'import os\n'), ((1804, 1852), 'os.path.join', 'os.path.join', (["cfg['in_dir']", '"""gtBbox3d"""', 'dataset'], {}), "(cfg['in_dir'], 'gtBbox3d', dataset)\n", (1816, 1852), False, 'import os\n'), ((2161, 2186), 'utils.helpers.progbar', 'helpers.progbar', (['n_scenes'], {}), '(n_scenes)\n', (2176, 2186), False, 'from utils import tf_utils, helpers, kitti_utils\n'), ((1316, 1350), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (1333, 1350), True, 'import tensorflow as tf\n'), ((1884, 1916), 'os.path.join', 'os.path.join', (['img_dir', '"""*/*.png"""'], {}), "(img_dir, '*/*.png')\n", (1896, 1916), False, 'import os\n'), ((1989, 2024), 'os.path.join', 'os.path.join', (['label_dir', '"""*/*.json"""'], {}), "(label_dir, '*/*.json')\n", (2001, 2024), False, 'import os\n'), ((2221, 2254), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['dataset_out'], {}), '(dataset_out)\n', (2241, 2254), True, 'import tensorflow as tf\n'), ((2567, 2586), 'cv2.imread', 'cv.imread', (['img_file'], {}), '(img_file)\n', (2576, 2586), True, 'import cv2 as cv\n'), ((2663, 2758), 'cv2.resize', 'cv.resize', (['img_arr', "(cfg['img_size'][1], cfg['img_size'][0])"], {'interpolation': 'cv.INTER_CUBIC'}), "(img_arr, (cfg['img_size'][1], cfg['img_size'][0]), interpolation=\n cv.INTER_CUBIC)\n", (2672, 2758), True, 'import cv2 as cv\n'), ((2779, 2807), 'cv2.imencode', 'cv.imencode', (['""".png"""', 'img_arr'], {}), "('.png', img_arr)\n", (2790, 2807), True, 'import cv2 as cv\n'), ((3039, 3249), 'utils.box3dImageTransform.Camera', 'box_utils.Camera', ([], {'fx': "label_dict['sensor']['fx']", 'fy': "label_dict['sensor']['fy']", 'u0': "label_dict['sensor']['u0']", 'v0': "label_dict['sensor']['v0']", 'sensor_T_ISO_8855': "label_dict['sensor']['sensor_T_ISO_8855']"}), "(fx=label_dict['sensor']['fx'], fy=label_dict['sensor'][\n 'fy'], u0=label_dict['sensor']['u0'], v0=label_dict['sensor']['v0'],\n sensor_T_ISO_8855=label_dict['sensor']['sensor_T_ISO_8855'])\n", (3055, 3249), True, 'from utils import box3dImageTransform as box_utils\n'), ((3437, 3453), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (3445, 3453), True, 'import numpy as np\n'), ((3968, 3994), 'numpy.zeros', 'np.zeros', (['(max_objects, 3)'], {}), '((max_objects, 3))\n', (3976, 3994), True, 'import numpy as np\n'), ((4029, 4055), 'numpy.zeros', 'np.zeros', (['(max_objects, 3)'], {}), '((max_objects, 3))\n', (4037, 4055), True, 'import numpy as np\n'), ((4091, 4120), 'numpy.zeros', 'np.zeros', (['(max_objects, 8, 3)'], {}), '((max_objects, 8, 3))\n', (4099, 4120), True, 'import numpy as np\n'), ((4156, 4182), 'numpy.zeros', 'np.zeros', (['(max_objects, 4)'], {}), '((max_objects, 4))\n', (4164, 4182), True, 'import numpy as np\n'), ((4215, 4241), 'numpy.zeros', 'np.zeros', (['(max_objects, 2)'], {}), '((max_objects, 2))\n', (4223, 4241), True, 'import numpy as np\n'), ((4272, 4298), 'numpy.zeros', 'np.zeros', (['(max_objects, 1)'], {}), '((max_objects, 1))\n', (4280, 4298), True, 'import numpy as np\n'), ((4329, 4355), 'numpy.zeros', 'np.zeros', (['(max_objects, 1)'], {}), '((max_objects, 1))\n', (4337, 4355), True, 'import numpy as np\n'), ((7162, 7177), 'numpy.ones', 'np.ones', (['(5, 3)'], {}), '((5, 3))\n', (7169, 7177), True, 'import numpy as np\n'), ((7219, 7253), 'utils.kitti_utils.remove_dontcare', 'kitti_utils.remove_dontcare', (['label'], {}), '(label)\n', (7246, 7253), False, 'from utils import tf_utils, helpers, kitti_utils\n'), ((2930, 2950), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2939, 2950), False, 'import json\n'), ((3906, 3931), 'numpy.ones', 'np.ones', (['(max_objects, 1)'], {}), '((max_objects, 1))\n', (3913, 3931), True, 'import numpy as np\n'), ((4463, 4500), 'utils.box3dImageTransform.Box3dImageTransform', 'box_utils.Box3dImageTransform', (['camera'], {}), '(camera)\n', (4492, 4500), True, 'from utils import box3dImageTransform as box_utils\n'), ((7068, 7081), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (7074, 7081), True, 'import numpy as np\n'), ((7124, 7137), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (7130, 7137), True, 'import numpy as np\n'), ((2469, 2495), 'os.path.basename', 'os.path.basename', (['img_file'], {}), '(img_file)\n', (2485, 2495), False, 'import os\n'), ((2505, 2533), 'os.path.basename', 'os.path.basename', (['label_file'], {}), '(label_file)\n', (2521, 2533), False, 'import os\n'), ((3831, 3854), 'numpy.array', 'np.array', (['orig_img_size'], {}), '(orig_img_size)\n', (3839, 3854), True, 'import numpy as np\n'), ((6975, 7010), 'pyquaternion.Quaternion', 'pyquaternion.Quaternion', (['quaternion'], {}), '(quaternion)\n', (6998, 7010), False, 'import pyquaternion\n'), ((6534, 6584), 'numpy.clip', 'np.clip', (['(bbox_2d_xy_xy[0] / orig_img_size[1])', '(0)', '(1)'], {}), '(bbox_2d_xy_xy[0] / orig_img_size[1], 0, 1)\n', (6541, 6584), True, 'import numpy as np\n'), ((6630, 6680), 'numpy.clip', 'np.clip', (['(bbox_2d_xy_xy[1] / orig_img_size[0])', '(0)', '(1)'], {}), '(bbox_2d_xy_xy[1] / orig_img_size[0], 0, 1)\n', (6637, 6680), True, 'import numpy as np\n'), ((6726, 6776), 'numpy.clip', 'np.clip', (['(bbox_2d_xy_xy[2] / orig_img_size[1])', '(0)', '(1)'], {}), '(bbox_2d_xy_xy[2] / orig_img_size[1], 0, 1)\n', (6733, 6776), True, 'import numpy as np\n'), ((6822, 6872), 'numpy.clip', 'np.clip', (['(bbox_2d_xy_xy[3] / orig_img_size[0])', '(0)', '(1)'], {}), '(bbox_2d_xy_xy[3] / orig_img_size[0], 0, 1)\n', (6829, 6872), True, 'import numpy as np\n')] |
"""
Kinetic Reaction Scheme Functions for Fast Pyrolysis of Biomass.
Each function is for a particular kinetic scheme.
Reference for each scheme is provided as main author and publication year.
"""
# modules
# -----------------------------------------------------------------------------
import numpy as np
# Sadhukhan2009
# volatiles+gases, char, primary and secondary reactions
# -----------------------------------------------------------------------------
def kn1(T, pw, pc, pg, dt, i, H):
R = 0.008314 # universal gas constant, kJ/mol*K
# A as pre-factor (1/s) and E as activation energy (kJ/mol)
A1 = 168.4; E1 = 51.965 # biomass -> volatiles + gases
A2 = 13.2; E2 = 45.960 # biomass -> char
A3 = 5.7e6; E3 = 92.4 # (vol+gases)1 -> (vol+gases)2
# evaluate reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp(-E1 / (R * T[i])) # biomass -> volatiles + gases
K2 = A2 * np.exp(-E2 / (R * T[i])) # biomass -> char
K3 = A3 * np.exp(-E3 / (R * T[i])) # (vol+gases)1 -> (vol+gases)2
# determine reaction rate for each reaction, rho/s
rw = -(K1+K2) * pw[i-1] # wood rate
rg1 = K1 * pw[i-1] - K3*pg[i-1] * pc[i-1] # gas 1 rate
rc1 = K2 * pw[i-1] - K3*pg[i-1] * pc[i-1] # char 1 rate
rg2 = K3 * pg[i-1] * pc[i-1] # gas 2 rate
rc2 = K3 * pg[i-1] * pc[i-1] # char 2 rate
# update wood, char, gas concentration as a density, kg/m^3
pww = pw[i-1] + rw * dt # wood
pcc = pc[i-1] + (rc1 + rc2) * dt # char
pgg = pg[i-1] + (rg1 + rg2) * dt # gas
# calculate heat of generation term
rp = -K1*pww # rate of pyrolysis
g = H*rp # heat generation
# return the wood, char, gas concentration and the heat of generation
return pww, pcc, pgg, g
# Chan1985, Blasi1993b
# primary and secondary reactions
# -----------------------------------------------------------------------------
def kn2(T, pw, pc, pg, pt, dt, i, H):
R = 0.008314 # universal gas constant, kJ/mol*K
# A = pre-factor (1/s) and E = activation energy (kJ/mol)
A1 = 1.3e8; E1 = 140 # wood -> gas
A2 = 2e8; E2 = 133 # wood -> tar
A3 = 1.08e7; E3 = 121 # wood -> char
A4 = 4.28e6; E4 = 108 # tar -> gas
A5 = 1e6; E5 = 108 # tar -> char
# evaluate reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp(-E1 / (R * T[i])) # wood -> gas
K2 = A2 * np.exp(-E2 / (R * T[i])) # wood -> tar
K3 = A3 * np.exp(-E3 / (R * T[i])) # wood -> char
K4 = A4 * np.exp(-E4 / (R * T[i])) # tar -> gas
K5 = A5 * np.exp(-E5 / (R * T[i])) # tar -> char
# determine reaction rate for each reaction, rho/s
rww = -(K1+K2+K3) * pw[i-1] # wood rate
rwg = K1 * pw[i-1] # wood -> gas rate
rwt = K2 * pw[i-1] # wood -> tar rate
rwc = K3 * pw[i-1] # wood -> char rate
rtg = K4 * pt[i-1] # tar -> gas rate
rtc = K5 * pt[i-1] # tar -> char rate
# update wood, char, gas concentration as a density, kg/m^3
pww = pw[i-1] + rww*dt # wood
pgg = pg[i-1] + (rwg + rtg)*dt # gas
ptt = pt[i-1] + (rwt - rtg - rtc)*dt # tar
pcc = pc[i-1] + (rwc + rtc)*dt # char
# calculate heat of generation term
g = H*rww # heat generation, W/m^3
# return the wood, char, gas, tar concentration and the heat generation
return pww, pcc, pgg, ptt, g
# Chan1985
# moisture content, heat of vaporization, no secondary reactions
# -----------------------------------------------------------------------------
def kn3(T, pw, pc, pg, pt, pwa, pva, dt, i, H):
R = 0.008314 # universal gas constant, kJ/mol*K
# A = pre-factor (1/s) and E = activation energy (kJ/mol)
A1 = 1.3e8; E1 = 140 # wood -> gas
A2 = 2e8; E2 = 133 # wood -> tar
A3 = 1.08e7; E3 = 121 # wood -> char
Aw = 5.13e6; Ew = 87.9 # water -> vapor
# evaluate reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp(-E1 / (R * T[i])) # wood -> gas
K2 = A2 * np.exp(-E2 / (R * T[i])) # wood -> tar
K3 = A3 * np.exp(-E3 / (R * T[i])) # wood -> char
Kw = Aw * np.exp(-Ew / (R * T[i])) # water -> vapor
# determine reaction rate for each reaction, rho/s
rww = -(K1+K2+K3) * pw[i-1] # rate of wood pyrolysis
rwg = K1 * pw[i-1] # rate of wood -> gas
rwt = K2 * pw[i-1] # rate of wood -> tar
rwc = K3 * pw[i-1] # rate of wood -> char
rwa = -Kw * pwa[i-1] # rate of water vaporization
rva = Kw * pwa[i-1] # rate of water -> vapor
# update concentrations as a density, kg/m^3
pww = pw[i-1] + rww*dt # wood
pgg = pg[i-1] + rwg*dt # gas
ptt = pt[i-1] + rwt*dt # tar
pcc = pc[i-1] + rwc*dt # char
pwwa = pwa[i-1] + rwa*dt # water
pvva = pva[i-1] + rva*dt # vapor
# calculate heat of generation term
Hv = 2260000 # heat of vaporization, J/kg
g = H*rww + Hv*rwa # heat generation, W/m^3
# return wood, char, gas, tar, water, vapor concentration & heat generation
return pww, pcc, pgg, ptt, pwwa, pvva, g
# Chan1985, Blasi1993b
# moisture content, heat of vaporization, primary and secondary reactions
# -----------------------------------------------------------------------------
def kn4(T, pw, pc, pg, pt, pwa, pva, dt, i, H):
R = 0.008314 # universal gas constant, kJ/mol*K
# A = pre-factor (1/s) and E = activation energy (kJ/mol)
A1 = 1.3e8; E1 = 140 # wood -> gas
A2 = 2e8; E2 = 133 # wood -> tar
A3 = 1.08e7; E3 = 121 # wood -> char
A4 = 4.28e6; E4 = 108 # tar -> gas
A5 = 1e6; E5 = 108 # tar -> char
Aw = 5.13e6; Ew = 87.9 # water -> vapor
# evaluate reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp(-E1 / (R * T[i])) # wood -> gas
K2 = A2 * np.exp(-E2 / (R * T[i])) # wood -> tar
K3 = A3 * np.exp(-E3 / (R * T[i])) # wood -> char
K4 = A4 * np.exp(-E4 / (R * T[i])) # tar -> gas
K5 = A5 * np.exp(-E5 / (R * T[i])) # tar -> char
Kw = Aw * np.exp(-Ew / (R * T[i])) # water -> vapor
# determine reaction rate for each reaction, rho/s
rww = -(K1+K2+K3) * pw[i-1] # wood rate
rwg = K1 * pw[i-1] # wood -> gas rate
rwt = K2 * pw[i-1] # wood -> tar rate
rwc = K3 * pw[i-1] # wood -> char rate
rtg = K4 * pt[i-1] # tar -> gas rate
rtc = K5 * pt[i-1] # tar -> char rate
rwa = -Kw * pwa[i-1] # rate of water vaporization
rva = Kw * pwa[i-1] # rate of water -> vapor
# update wood, char, gas concentration as a density, kg/m^3
pww = pw[i-1] + rww*dt # wood
pgg = pg[i-1] + (rwg + rtg)*dt # gas
ptt = pt[i-1] + (rwt - rtg - rtc)*dt # tar
pcc = pc[i-1] + (rwc + rtc)*dt # char
pwwa = pwa[i-1] + rwa*dt # water
pvva = pva[i-1] + rva*dt # vapor
# calculate heat of generation term
Hv = 2260000 # heat of vaporization, J/kg
g = H*rww + Hv*rwa # heat generation, W/m^3
# return the wood, char, gas, tar concentration and the heat generation
return pww, pcc, pgg, ptt, pwwa, pvva, g
| [
"numpy.exp"
] | [((881, 905), 'numpy.exp', 'np.exp', (['(-E1 / (R * T[i]))'], {}), '(-E1 / (R * T[i]))\n', (887, 905), True, 'import numpy as np\n'), ((952, 976), 'numpy.exp', 'np.exp', (['(-E2 / (R * T[i]))'], {}), '(-E2 / (R * T[i]))\n', (958, 976), True, 'import numpy as np\n'), ((1010, 1034), 'numpy.exp', 'np.exp', (['(-E3 / (R * T[i]))'], {}), '(-E3 / (R * T[i]))\n', (1016, 1034), True, 'import numpy as np\n'), ((2469, 2493), 'numpy.exp', 'np.exp', (['(-E1 / (R * T[i]))'], {}), '(-E1 / (R * T[i]))\n', (2475, 2493), True, 'import numpy as np\n'), ((2523, 2547), 'numpy.exp', 'np.exp', (['(-E2 / (R * T[i]))'], {}), '(-E2 / (R * T[i]))\n', (2529, 2547), True, 'import numpy as np\n'), ((2577, 2601), 'numpy.exp', 'np.exp', (['(-E3 / (R * T[i]))'], {}), '(-E3 / (R * T[i]))\n', (2583, 2601), True, 'import numpy as np\n'), ((2632, 2656), 'numpy.exp', 'np.exp', (['(-E4 / (R * T[i]))'], {}), '(-E4 / (R * T[i]))\n', (2638, 2656), True, 'import numpy as np\n'), ((2685, 2709), 'numpy.exp', 'np.exp', (['(-E5 / (R * T[i]))'], {}), '(-E5 / (R * T[i]))\n', (2691, 2709), True, 'import numpy as np\n'), ((4183, 4207), 'numpy.exp', 'np.exp', (['(-E1 / (R * T[i]))'], {}), '(-E1 / (R * T[i]))\n', (4189, 4207), True, 'import numpy as np\n'), ((4237, 4261), 'numpy.exp', 'np.exp', (['(-E2 / (R * T[i]))'], {}), '(-E2 / (R * T[i]))\n', (4243, 4261), True, 'import numpy as np\n'), ((4291, 4315), 'numpy.exp', 'np.exp', (['(-E3 / (R * T[i]))'], {}), '(-E3 / (R * T[i]))\n', (4297, 4315), True, 'import numpy as np\n'), ((4346, 4370), 'numpy.exp', 'np.exp', (['(-Ew / (R * T[i]))'], {}), '(-Ew / (R * T[i]))\n', (4352, 4370), True, 'import numpy as np\n'), ((6099, 6123), 'numpy.exp', 'np.exp', (['(-E1 / (R * T[i]))'], {}), '(-E1 / (R * T[i]))\n', (6105, 6123), True, 'import numpy as np\n'), ((6153, 6177), 'numpy.exp', 'np.exp', (['(-E2 / (R * T[i]))'], {}), '(-E2 / (R * T[i]))\n', (6159, 6177), True, 'import numpy as np\n'), ((6207, 6231), 'numpy.exp', 'np.exp', (['(-E3 / (R * T[i]))'], {}), '(-E3 / (R * T[i]))\n', (6213, 6231), True, 'import numpy as np\n'), ((6262, 6286), 'numpy.exp', 'np.exp', (['(-E4 / (R * T[i]))'], {}), '(-E4 / (R * T[i]))\n', (6268, 6286), True, 'import numpy as np\n'), ((6315, 6339), 'numpy.exp', 'np.exp', (['(-E5 / (R * T[i]))'], {}), '(-E5 / (R * T[i]))\n', (6321, 6339), True, 'import numpy as np\n'), ((6369, 6393), 'numpy.exp', 'np.exp', (['(-Ew / (R * T[i]))'], {}), '(-Ew / (R * T[i]))\n', (6375, 6393), True, 'import numpy as np\n')] |
#
# Copyright (c) 2018-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from constants import PREDICTION_SERVICE, ERROR_SHAPE
from utils.grpc import infer, get_model_metadata, model_metadata_response
from utils.rest import infer_rest, get_model_metadata_response_rest
class TestSingleModelMappingInference():
def test_run_inference(self, age_gender_model_downloader,
create_grpc_channel,
start_server_with_mapping):
"""
<b>Description</b>
Submit request to gRPC interface serving a single resnet model
<b>input data</b>
- directory with the model in IR format
- docker image with ie-serving-py service
<b>fixtures used</b>
- model downloader
- service launching
<b>Expected results</b>
- response contains proper numpy shape
"""
_, ports = start_server_with_mapping
print("Downloaded model files:", age_gender_model_downloader)
# Connect to grpc service
stub = create_grpc_channel('localhost:{}'.format(ports["grpc_port"]),
PREDICTION_SERVICE)
imgs_v1_224 = np.ones((1, 3, 62, 62))
output = infer(imgs_v1_224, input_tensor='new_key', grpc_stub=stub,
model_spec_name='age_gender',
model_spec_version=None,
output_tensors=['age', 'gender'])
print("output shape", output['age'].shape)
print("output shape", output['gender'].shape)
assert output['age'].shape == (1, 1, 1, 1), ERROR_SHAPE
assert output['gender'].shape == (1, 2, 1, 1), ERROR_SHAPE
def test_get_model_metadata(self, age_gender_model_downloader,
create_grpc_channel,
start_server_with_mapping):
_, ports = start_server_with_mapping
print("Downloaded model files:", age_gender_model_downloader)
stub = create_grpc_channel('localhost:{}'.format(ports["grpc_port"]),
PREDICTION_SERVICE)
model_name = 'age_gender'
expected_input_metadata = {'new_key': {'dtype': 1,
'shape': [1, 3, 62, 62]}}
expected_output_metadata = {'age': {'dtype': 1,
'shape': [1, 1, 1, 1]},
'gender': {'dtype': 1,
'shape': [1, 2, 1, 1]}}
request = get_model_metadata(model_name=model_name)
response = stub.GetModelMetadata(request, 10)
print("response", response)
input_metadata, output_metadata = model_metadata_response(
response=response)
assert model_name == response.model_spec.name
assert expected_input_metadata == input_metadata
assert expected_output_metadata == output_metadata
@pytest.mark.parametrize("request_format",
[('row_name'), ('row_noname'),
('column_name'), ('column_noname')])
def test_run_inference_rest(self, age_gender_model_downloader,
start_server_with_mapping, request_format):
"""
<b>Description</b>
Submit request to REST API interface serving a single resnet model
<b>input data</b>
- directory with the model in IR format
- docker image with ie-serving-py service
<b>fixtures used</b>
- model downloader
- service launching
<b>Expected results</b>
- response contains proper numpy shape
"""
_, ports = start_server_with_mapping
print("Downloaded model files:", age_gender_model_downloader)
imgs_v1_224 = np.ones((1, 3, 62, 62))
rest_url = 'http://localhost:{}/v1/models/age_gender:predict'.format(
ports["rest_port"])
output = infer_rest(imgs_v1_224, input_tensor='new_key',
rest_url=rest_url,
output_tensors=['age', 'gender'],
request_format=request_format)
print("output shape", output['age'].shape)
print("output shape", output['gender'].shape)
print(output)
assert output['age'].shape == (1, 1, 1, 1), ERROR_SHAPE
assert output['gender'].shape == (1, 2, 1, 1), ERROR_SHAPE
def test_get_model_metadata_rest(self, age_gender_model_downloader,
start_server_with_mapping):
_, ports = start_server_with_mapping
print("Downloaded model files:", age_gender_model_downloader)
model_name = 'age_gender'
expected_input_metadata = {'new_key': {'dtype': 1,
'shape': [1, 3, 62, 62]}}
expected_output_metadata = {'age': {'dtype': 1,
'shape': [1, 1, 1, 1]},
'gender': {'dtype': 1,
'shape': [1, 2, 1, 1]}}
rest_url = 'http://localhost:{}/v1/models/age_gender/metadata'.format(
ports["rest_port"])
response = get_model_metadata_response_rest(rest_url)
print("response", response)
input_metadata, output_metadata = model_metadata_response(
response=response)
assert model_name == response.model_spec.name
assert expected_input_metadata == input_metadata
assert expected_output_metadata == output_metadata
| [
"numpy.ones",
"utils.grpc.model_metadata_response",
"utils.rest.get_model_metadata_response_rest",
"utils.rest.infer_rest",
"pytest.mark.parametrize",
"utils.grpc.infer",
"utils.grpc.get_model_metadata"
] | [((3616, 3721), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""request_format"""', "['row_name', 'row_noname', 'column_name', 'column_noname']"], {}), "('request_format', ['row_name', 'row_noname',\n 'column_name', 'column_noname'])\n", (3639, 3721), False, 'import pytest\n'), ((1806, 1829), 'numpy.ones', 'np.ones', (['(1, 3, 62, 62)'], {}), '((1, 3, 62, 62))\n', (1813, 1829), True, 'import numpy as np\n'), ((1850, 2002), 'utils.grpc.infer', 'infer', (['imgs_v1_224'], {'input_tensor': '"""new_key"""', 'grpc_stub': 'stub', 'model_spec_name': '"""age_gender"""', 'model_spec_version': 'None', 'output_tensors': "['age', 'gender']"}), "(imgs_v1_224, input_tensor='new_key', grpc_stub=stub, model_spec_name=\n 'age_gender', model_spec_version=None, output_tensors=['age', 'gender'])\n", (1855, 2002), False, 'from utils.grpc import infer, get_model_metadata, model_metadata_response\n'), ((3199, 3240), 'utils.grpc.get_model_metadata', 'get_model_metadata', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (3217, 3240), False, 'from utils.grpc import infer, get_model_metadata, model_metadata_response\n'), ((3376, 3418), 'utils.grpc.model_metadata_response', 'model_metadata_response', ([], {'response': 'response'}), '(response=response)\n', (3399, 3418), False, 'from utils.grpc import infer, get_model_metadata, model_metadata_response\n'), ((4549, 4572), 'numpy.ones', 'np.ones', (['(1, 3, 62, 62)'], {}), '((1, 3, 62, 62))\n', (4556, 4572), True, 'import numpy as np\n'), ((4710, 4845), 'utils.rest.infer_rest', 'infer_rest', (['imgs_v1_224'], {'input_tensor': '"""new_key"""', 'rest_url': 'rest_url', 'output_tensors': "['age', 'gender']", 'request_format': 'request_format'}), "(imgs_v1_224, input_tensor='new_key', rest_url=rest_url,\n output_tensors=['age', 'gender'], request_format=request_format)\n", (4720, 4845), False, 'from utils.rest import infer_rest, get_model_metadata_response_rest\n'), ((6021, 6063), 'utils.rest.get_model_metadata_response_rest', 'get_model_metadata_response_rest', (['rest_url'], {}), '(rest_url)\n', (6053, 6063), False, 'from utils.rest import infer_rest, get_model_metadata_response_rest\n'), ((6144, 6186), 'utils.grpc.model_metadata_response', 'model_metadata_response', ([], {'response': 'response'}), '(response=response)\n', (6167, 6186), False, 'from utils.grpc import infer, get_model_metadata, model_metadata_response\n')] |
import pandas as pd
from mne.event import define_target_events
import mne
import numpy as np
def listen_italian_epoch(raw, mat,Tmin, Tmax):
# extract trials of tmax second and remove the wrong answer trials and seperate the in three conditions
# start and end of an epoch in sec.
# ignore stimuli shorter than tmax ms
events = mne.find_events(raw, stim_channel='Trigger')
reference_id = 105 # speech onset
target_id = 106 # speech offset
sfreq = raw.info['sfreq'] # sampling rate
tmin = 0
new_id = 99 # the new event id for a hit. If None, reference_id is used.
fill_na = 105 # the fill value for misses
events_, lag = define_target_events(events, reference_id, target_id,sfreq, tmin, Tmax, new_id, fill_na)
events_ = np.where(events_[:,2] == 105)[0] +1
#behaviour (remove the wrong answer trials and seperate the in three conditions)
condition= mat['behaviour']['condition'][0]
response= mat['behaviour']['response'][0]
a = np.hstack((condition[0],response[0]))
df = pd.DataFrame({'condition':a[:,0],'response':a[:,1]})
df.index = df.index + 1
hyper = df.loc[(df['condition'] == 1) & (df['response'] == 0)]
normal = df.loc[(df['condition'] == 2) & (df['response'] == 0)]
hypo = df.loc[(df['condition'] == 3) & (df['response'] == 0)]
events = mne.find_events(raw, stim_channel='trial_no')
hyper = np.intersect1d(events_, hyper.index.values)
normal = np.intersect1d(events_, normal.index.values)
hypo = np.intersect1d(events_, hypo.index.values)
hyper = events[hyper-1]
hyper[:,2] = 1
normal = events[normal-1]
normal[:,2] = 2
hypo = events[hypo-1]
hypo[:,2] = 3
a = np.vstack((hyper,normal,hypo))
events = np.sort(a, axis=0)
# epoching
reject = dict(eeg=180e-6)
event_id = {'hyper': 1,'normal': 2,'hypo': 3}
# Set up indices of channels to include in analysis
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,misc=False)
epochs_params = dict(events=events, picks=picks,event_id=event_id, tmin=Tmin, tmax=Tmax,reject=reject,preload=True)
epochs = mne.Epochs(raw, **epochs_params)
return epochs | [
"pandas.DataFrame",
"mne.pick_types",
"numpy.hstack",
"mne.find_events",
"numpy.sort",
"mne.Epochs",
"mne.event.define_target_events",
"numpy.where",
"numpy.intersect1d",
"numpy.vstack"
] | [((333, 377), 'mne.find_events', 'mne.find_events', (['raw'], {'stim_channel': '"""Trigger"""'}), "(raw, stim_channel='Trigger')\n", (348, 377), False, 'import mne\n'), ((639, 732), 'mne.event.define_target_events', 'define_target_events', (['events', 'reference_id', 'target_id', 'sfreq', 'tmin', 'Tmax', 'new_id', 'fill_na'], {}), '(events, reference_id, target_id, sfreq, tmin, Tmax,\n new_id, fill_na)\n', (659, 732), False, 'from mne.event import define_target_events\n'), ((954, 992), 'numpy.hstack', 'np.hstack', (['(condition[0], response[0])'], {}), '((condition[0], response[0]))\n', (963, 992), True, 'import numpy as np\n'), ((999, 1056), 'pandas.DataFrame', 'pd.DataFrame', (["{'condition': a[:, 0], 'response': a[:, 1]}"], {}), "({'condition': a[:, 0], 'response': a[:, 1]})\n", (1011, 1056), True, 'import pandas as pd\n'), ((1284, 1329), 'mne.find_events', 'mne.find_events', (['raw'], {'stim_channel': '"""trial_no"""'}), "(raw, stim_channel='trial_no')\n", (1299, 1329), False, 'import mne\n'), ((1339, 1382), 'numpy.intersect1d', 'np.intersect1d', (['events_', 'hyper.index.values'], {}), '(events_, hyper.index.values)\n', (1353, 1382), True, 'import numpy as np\n'), ((1393, 1437), 'numpy.intersect1d', 'np.intersect1d', (['events_', 'normal.index.values'], {}), '(events_, normal.index.values)\n', (1407, 1437), True, 'import numpy as np\n'), ((1446, 1488), 'numpy.intersect1d', 'np.intersect1d', (['events_', 'hypo.index.values'], {}), '(events_, hypo.index.values)\n', (1460, 1488), True, 'import numpy as np\n'), ((1618, 1650), 'numpy.vstack', 'np.vstack', (['(hyper, normal, hypo)'], {}), '((hyper, normal, hypo))\n', (1627, 1650), True, 'import numpy as np\n'), ((1659, 1677), 'numpy.sort', 'np.sort', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (1666, 1677), True, 'import numpy as np\n'), ((1827, 1912), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '(False)', 'eeg': '(True)', 'stim': '(False)', 'eog': '(False)', 'misc': '(False)'}), '(raw.info, meg=False, eeg=True, stim=False, eog=False, misc=False\n )\n', (1841, 1912), False, 'import mne\n'), ((2034, 2066), 'mne.Epochs', 'mne.Epochs', (['raw'], {}), '(raw, **epochs_params)\n', (2044, 2066), False, 'import mne\n'), ((740, 770), 'numpy.where', 'np.where', (['(events_[:, 2] == 105)'], {}), '(events_[:, 2] == 105)\n', (748, 770), True, 'import numpy as np\n')] |
## @ingroup Methods-Weights-Buildups-Common
# prop.py
#
# Created: Jun 2017, <NAME>
# Modified: Apr 2018, J. Smart
# Mar 2020, <NAME>
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
from SUAVE.Attributes.Solids import (
Bidirectional_Carbon_Fiber, Carbon_Fiber_Honeycomb, Paint, Unidirectional_Carbon_Fiber, Aluminum, Epoxy, Nickel, Aluminum_Rib)
import numpy as np
import copy as cp
#-------------------------------------------------------------------------------
# Prop
#-------------------------------------------------------------------------------
## @ingroup Methods-Weights-Buildups-Common
def prop(prop,
maximum_lifting_thrust,
chord_to_radius_ratio = 0.1,
thickness_to_chord = 0.12,
root_to_radius_ratio = 0.1,
moment_to_lift_ratio = 0.02,
spanwise_analysis_points = 5,
safety_factor = 1.5,
margin_factor = 1.2,
forward_web_locations = [0.25, 0.35],
shear_center = 0.25,
speed_of_sound = 340.294,
tip_max_mach_number = 0.65):
"""weight = SUAVE.Methods.Weights.Buildups.Common.prop(
prop,
maximum_thrust,
chord_to_radius_ratio = 0.1,
thickness_to_chord = 0.12,
root_to_radius_ratio = 0.1,
moment_to_lift_ratio = 0.02,
spanwise_analysis_points = 5,
safety_factor = 1.5,
margin_factor = 1.2,
forward_web_locationss = [0.25, 0.35],
shear_center = 0.25,
speed_of_sound = 340.294,
tip_max_mach_number = 0.65)
Assumptions:
Calculates propeller blade pass for an eVTOL vehicle based on assumption
of a NACA airfoil prop, an assumed cm/cl, tip Mach limit, and structural
geometry.
Intended for use with the following SUAVE vehicle types, but may be used
elsewhere:
Electric Multicopter
Electric Vectored_Thrust
Electric Stopped Rotor
Originally written as part of an AA 290 project inteded for trade study
of the above vehicle types.
If vehicle model does not have material properties assigned, appropriate
assumptions are made based on SUAVE's Solids Attributes library.
Sources:
Project Vahana Conceptual Trade Study
Inputs:
prop SUAVE Propeller Data Structure
maximum_thrust Maximum Design Thrust [N]
chord_to_radius_ratio Chord to Blade Radius [Unitless]
thickness_to_chord Blade Thickness to Chord [Unitless]
root_to_radius_ratio Root Structure to Blade Radius [Unitless]
moment_to_lift_ratio Coeff. of Moment to Coeff. of Lift [Unitless]
spanwise_analysis_points Analysis Points for Sizing [Unitless]
safety_factor Design Safety Factor [Unitless]
margin_factor Allowable Extra Mass Fraction [Unitless]
forward_web_locationss Location of Forward Spar Webbing [m]
shear_center Location of Shear Center [m]
speed_of_sound Local Speed of Sound [m/s]
tip_max_mach_number Allowable Tip Mach Number [Unitless]
Outputs:
weight: Propeller Mass [kg]
Properties Used:
Material properties of imported SUAVE Solids
"""
#-------------------------------------------------------------------------------
# Unpack Inputs
#-------------------------------------------------------------------------------
rProp = prop.tip_radius
maxLiftingThrust = maximum_lifting_thrust
nBlades = prop.number_of_blades
chord = rProp * chord_to_radius_ratio
N = spanwise_analysis_points
SF = safety_factor
toc = thickness_to_chord
fwdWeb = cp.deepcopy(forward_web_locations)
xShear = shear_center
rootLength = rProp * root_to_radius_ratio
grace = margin_factor
sound = speed_of_sound
tipMach = tip_max_mach_number
cmocl = moment_to_lift_ratio
#-------------------------------------------------------------------------------
# Unpack Material Properties
#-------------------------------------------------------------------------------
try:
torsMat = prop.materials.skin_materials.torsion_carrier
except AttributeError:
torsMat = Bidirectional_Carbon_Fiber()
torsUSS = torsMat.ultimate_shear_strength
torsMGT = torsMat.minimum_gage_thickness
torsDen = torsMat.density
try:
shearMat = prop.materials.spar_materials.shear_carrier
except AttributeError:
shearMat = Bidirectional_Carbon_Fiber()
shearUSS = shearMat.ultimate_shear_strength
shearMGT = shearMat.minimum_gage_thickness
shearDen = shearMat.density
try:
bendMat = prop.materials.flap_materials.bending_carrier
except AttributeError:
bendMat = Unidirectional_Carbon_Fiber()
bendDen = bendMat.density
bendUTS = bendMat.ultimate_tensile_strength
bendUSS = bendMat.ultimate_shear_strength
try:
coreMat = prop.materials.skin_materials.core
except AttributeError:
coreMat = Carbon_Fiber_Honeycomb()
coreDen = coreMat.density
try:
ribMat = prop.materials.rib_materials.structural
except AttributeError:
ribMat = Aluminum_Rib()
ribWid = ribMat.minimum_width
ribMGT = ribMat.minimum_gage_thickness
ribDen = ribMat.density
try:
rootMat = prop.materials.root_materials.structural
except AttributeError:
rootMat = Aluminum()
rootDen = rootMat.density
rootUTS = rootMat.ultimate_tensile_strength
try:
leMat = prop.materials.skin_materials.leading_edge
except:
leMat = Nickel()
leDen = leMat.density
try:
glueMat = prop.materials.skin_materials.adhesive
except AttributeError:
glueMat = Epoxy()
glueMGT = glueMat.minimum_gage_thickness
glueDen = glueMat.density
try:
coverMat = prop.materials.skin_materials.cover
except:
coverMat = Paint()
coverMGT = coverMat.minimum_gage_thickness
coverDen = coverMat.density
#-------------------------------------------------------------------------------
# Airfoil
#-------------------------------------------------------------------------------
NACA = np.multiply(5 * toc, [0.2969, -0.1260, -0.3516, 0.2843, -0.1015])
coord = np.unique(fwdWeb+np.linspace(0,1,N).tolist())[:,np.newaxis]
coordMAT = np.concatenate((coord**0.5,coord,coord**2,coord**3,coord**4),axis=1)
nacaMAT = coordMAT.dot(NACA)[:, np.newaxis]
coord = np.concatenate((coord,nacaMAT),axis=1)
coord = np.concatenate((coord[-1:0:-1],coord.dot(np.array([[1.,0.],[0.,-1.]]))),axis=0)
coord[:,0] = coord[:,0] - xShear
#-------------------------------------------------------------------------------
# Beam Geometry
#-------------------------------------------------------------------------------
x = np.linspace(0,rProp,N)
dx = x[1] - x[0]
fwdWeb[:] = [round(loc - xShear,2) for loc in fwdWeb]
#-------------------------------------------------------------------------------
# Loads
#-------------------------------------------------------------------------------
omega = sound*tipMach/rProp # Propeller Angular Velocity
F = SF*3*(maxLiftingThrust/rProp**3)*(x**2)/nBlades # Force Distribution
Q = F * chord * cmocl # Torsion Distribution
#-------------------------------------------------------------------------------
# Initial Mass Estimates
#-------------------------------------------------------------------------------
box = coord * chord
skinLength = np.sum(np.sqrt(np.sum(np.diff(box,axis=0)**2,axis=1)))
maxThickness = (np.amax(box[:,1])-np.amin(box[:,1]))/2
rootBendingMoment = SF*maxLiftingThrust/nBlades*0.75*rProp
m = (bendDen*dx*rootBendingMoment/
(2*bendUSS*maxThickness))+ \
skinLength*shearMGT*dx*shearDen
m = m*np.ones(N)
error = 1 # Initialize Error
tolerance = 1e-8 # Mass Tolerance
massOld = np.sum(m)
#-------------------------------------------------------------------------------
# General Structural Properties
#-------------------------------------------------------------------------------
seg = [] # List of Structural Segments
# Torsion
enclosedArea = 0.5*np.abs(np.dot(box[:,0],np.roll(box[:,1],1))-
np.dot(box[:,1],np.roll(box[:,0],1))) # Shoelace Formula
# Flap Properties
box = coord # Box Initially Matches Airfoil
box = box[box[:,0]<=fwdWeb[1]] # Trim Coordinates Aft of Aft Web
box = box[box[:,0]>=fwdWeb[0]] # Trim Coordinates Fwd of Fwd Web
seg.append(box[box[:,1]>np.mean(box[:,1])]*chord) # Upper Fwd Segment
seg.append(box[box[:,1]<np.mean(box[:,1])]*chord) # Lower Fwd Segment
# Flap & Drag Inertia
capInertia = 0
capLength = 0
for i in range(0,2):
l = np.sqrt(np.sum(np.diff(seg[i],axis=0)**2,axis=1)) # Segment Lengths
c = (seg[i][1::]+seg[i][0::-1])/2 # Segment Centroids
capInertia += np.abs(np.sum(l*c[:,1] **2))
capLength += np.sum(l)
# Shear Properties
box = coord
box = box[box[:,0]<=fwdWeb[1]]
z = box[box[:,0]==fwdWeb[0],1]*chord
shearHeight = np.abs(z[0] - z[1])
# Core Properties
box = coord
box = box[box[:,0]>=fwdWeb[0]]
box = box*chord
coreArea = 0.5*np.abs(np.dot(box[:,0],np.roll(box[:,1],1))-
np.dot(box[:,1],np.roll(box[:,0],1))) # Shoelace Formula
# Shear/Moment Calculations
Vz = np.append(np.cumsum(( F[0:-1]*np.diff(x))[::-1])[::-1],0) # Bending Moment
Mx = np.append(np.cumsum((Vz[0:-1]*np.diff(x))[::-1])[::-1],0) # Torsion Moment
My = np.append(np.cumsum(( Q[0:-1]*np.diff(x))[::-1])[::-1],0) # Drag Moment
#-------------------------------------------------------------------------------
# Mass Calculation
#-------------------------------------------------------------------------------
while error > tolerance:
CF = (SF*omega**2*
np.append(np.cumsum(( m[0:-1]*np.diff(x)*x[0:-1])[::-1])[::-1],0)) # Centripetal Force
# Calculate Skin Weight Based on Torsion
tTorsion = My/(2*torsUSS*enclosedArea) # Torsion Skin Thickness
tTorsion = np.maximum(tTorsion,torsMGT*np.ones(N)) # Gage Constraint
mTorsion = tTorsion * skinLength * torsDen # Torsion Mass
# Calculate Flap Mass Based on Bending
tFlap = CF/(capLength*bendUTS) + \
Mx*np.amax(np.abs(box[:,1]))/(capInertia*bendUTS)
mFlap = tFlap*capLength*bendDen
mGlue = glueMGT*glueDen*capLength*np.ones(N)
# Calculate Web Mass Based on Shear
tShear = 1.5*Vz/(shearUSS*shearHeight)
tShear = np.maximum(tShear,shearMGT*np.ones(N))
mShear = tShear*shearHeight*shearDen
# Paint Weight
mPaint = skinLength*coverMGT*coverDen*np.ones(N)
# Core Mass
mCore = coreArea*coreDen*np.ones(N)
mGlue += glueMGT*glueDen*skinLength*np.ones(N)
# Leading Edge Protection
box = coord * chord
box = box[box[:,0]<(0.1*chord)]
leLength = np.sum(np.sqrt(np.sum(np.diff(box,axis=0)**2,axis=1)))
mLE = leLength*420e-6*leDen*np.ones(N)
# Section Mass
m = mTorsion + mCore + mFlap + mShear + mGlue + mPaint + mLE
# Rib Weight
mRib = (enclosedArea+skinLength*ribWid)*ribMGT*ribDen
# Root Fitting
box = coord * chord
rRoot = (np.amax(box[:,1])-np.amin(box[:,1]))/2
t = np.amax(CF)/(2*np.pi*rRoot*rootUTS) + \
np.amax(Mx)/(3*np.pi*rRoot**2*rootUTS)
mRoot = 2*np.pi*rRoot*t*rootLength*rootDen
# Total Weight
mass = nBlades*(np.sum(m[0:-1]*np.diff(x))+2*mRib+mRoot)
error = np.abs(mass-massOld)
massOld = mass
mass = mass * grace
return mass | [
"numpy.sum",
"numpy.abs",
"numpy.amin",
"SUAVE.Attributes.Solids.Aluminum",
"numpy.ones",
"numpy.mean",
"numpy.multiply",
"SUAVE.Attributes.Solids.Epoxy",
"SUAVE.Attributes.Solids.Bidirectional_Carbon_Fiber",
"numpy.linspace",
"SUAVE.Attributes.Solids.Carbon_Fiber_Honeycomb",
"copy.deepcopy",
... | [((4319, 4353), 'copy.deepcopy', 'cp.deepcopy', (['forward_web_locations'], {}), '(forward_web_locations)\n', (4330, 4353), True, 'import copy as cp\n'), ((6958, 7022), 'numpy.multiply', 'np.multiply', (['(5 * toc)', '[0.2969, -0.126, -0.3516, 0.2843, -0.1015]'], {}), '(5 * toc, [0.2969, -0.126, -0.3516, 0.2843, -0.1015])\n', (6969, 7022), True, 'import numpy as np\n'), ((7118, 7203), 'numpy.concatenate', 'np.concatenate', (['(coord ** 0.5, coord, coord ** 2, coord ** 3, coord ** 4)'], {'axis': '(1)'}), '((coord ** 0.5, coord, coord ** 2, coord ** 3, coord ** 4),\n axis=1)\n', (7132, 7203), True, 'import numpy as np\n'), ((7255, 7295), 'numpy.concatenate', 'np.concatenate', (['(coord, nacaMAT)'], {'axis': '(1)'}), '((coord, nacaMAT), axis=1)\n', (7269, 7295), True, 'import numpy as np\n'), ((7635, 7659), 'numpy.linspace', 'np.linspace', (['(0)', 'rProp', 'N'], {}), '(0, rProp, N)\n', (7646, 7659), True, 'import numpy as np\n'), ((8964, 8973), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (8970, 8973), True, 'import numpy as np\n'), ((10345, 10364), 'numpy.abs', 'np.abs', (['(z[0] - z[1])'], {}), '(z[0] - z[1])\n', (10351, 10364), True, 'import numpy as np\n'), ((8817, 8827), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (8824, 8827), True, 'import numpy as np\n'), ((10199, 10208), 'numpy.sum', 'np.sum', (['l'], {}), '(l)\n', (10205, 10208), True, 'import numpy as np\n'), ((12985, 13007), 'numpy.abs', 'np.abs', (['(mass - massOld)'], {}), '(mass - massOld)\n', (12991, 13007), True, 'import numpy as np\n'), ((4943, 4971), 'SUAVE.Attributes.Solids.Bidirectional_Carbon_Fiber', 'Bidirectional_Carbon_Fiber', ([], {}), '()\n', (4969, 4971), False, 'from SUAVE.Attributes.Solids import Bidirectional_Carbon_Fiber, Carbon_Fiber_Honeycomb, Paint, Unidirectional_Carbon_Fiber, Aluminum, Epoxy, Nickel, Aluminum_Rib\n'), ((5212, 5240), 'SUAVE.Attributes.Solids.Bidirectional_Carbon_Fiber', 'Bidirectional_Carbon_Fiber', ([], {}), '()\n', (5238, 5240), False, 'from SUAVE.Attributes.Solids import Bidirectional_Carbon_Fiber, Carbon_Fiber_Honeycomb, Paint, Unidirectional_Carbon_Fiber, Aluminum, Epoxy, Nickel, Aluminum_Rib\n'), ((5487, 5516), 'SUAVE.Attributes.Solids.Unidirectional_Carbon_Fiber', 'Unidirectional_Carbon_Fiber', ([], {}), '()\n', (5514, 5516), False, 'from SUAVE.Attributes.Solids import Bidirectional_Carbon_Fiber, Carbon_Fiber_Honeycomb, Paint, Unidirectional_Carbon_Fiber, Aluminum, Epoxy, Nickel, Aluminum_Rib\n'), ((5749, 5773), 'SUAVE.Attributes.Solids.Carbon_Fiber_Honeycomb', 'Carbon_Fiber_Honeycomb', ([], {}), '()\n', (5771, 5773), False, 'from SUAVE.Attributes.Solids import Bidirectional_Carbon_Fiber, Carbon_Fiber_Honeycomb, Paint, Unidirectional_Carbon_Fiber, Aluminum, Epoxy, Nickel, Aluminum_Rib\n'), ((5915, 5929), 'SUAVE.Attributes.Solids.Aluminum_Rib', 'Aluminum_Rib', ([], {}), '()\n', (5927, 5929), False, 'from SUAVE.Attributes.Solids import Bidirectional_Carbon_Fiber, Carbon_Fiber_Honeycomb, Paint, Unidirectional_Carbon_Fiber, Aluminum, Epoxy, Nickel, Aluminum_Rib\n'), ((6149, 6159), 'SUAVE.Attributes.Solids.Aluminum', 'Aluminum', ([], {}), '()\n', (6157, 6159), False, 'from SUAVE.Attributes.Solids import Bidirectional_Carbon_Fiber, Carbon_Fiber_Honeycomb, Paint, Unidirectional_Carbon_Fiber, Aluminum, Epoxy, Nickel, Aluminum_Rib\n'), ((6335, 6343), 'SUAVE.Attributes.Solids.Nickel', 'Nickel', ([], {}), '()\n', (6341, 6343), False, 'from SUAVE.Attributes.Solids import Bidirectional_Carbon_Fiber, Carbon_Fiber_Honeycomb, Paint, Unidirectional_Carbon_Fiber, Aluminum, Epoxy, Nickel, Aluminum_Rib\n'), ((6486, 6493), 'SUAVE.Attributes.Solids.Epoxy', 'Epoxy', ([], {}), '()\n', (6491, 6493), False, 'from SUAVE.Attributes.Solids import Bidirectional_Carbon_Fiber, Carbon_Fiber_Honeycomb, Paint, Unidirectional_Carbon_Fiber, Aluminum, Epoxy, Nickel, Aluminum_Rib\n'), ((6669, 6676), 'SUAVE.Attributes.Solids.Paint', 'Paint', ([], {}), '()\n', (6674, 6676), False, 'from SUAVE.Attributes.Solids import Bidirectional_Carbon_Fiber, Carbon_Fiber_Honeycomb, Paint, Unidirectional_Carbon_Fiber, Aluminum, Epoxy, Nickel, Aluminum_Rib\n'), ((8525, 8543), 'numpy.amax', 'np.amax', (['box[:, 1]'], {}), '(box[:, 1])\n', (8532, 8543), True, 'import numpy as np\n'), ((8543, 8561), 'numpy.amin', 'np.amin', (['box[:, 1]'], {}), '(box[:, 1])\n', (8550, 8561), True, 'import numpy as np\n'), ((10155, 10179), 'numpy.sum', 'np.sum', (['(l * c[:, 1] ** 2)'], {}), '(l * c[:, 1] ** 2)\n', (10161, 10179), True, 'import numpy as np\n'), ((11779, 11789), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (11786, 11789), True, 'import numpy as np\n'), ((12053, 12063), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (12060, 12063), True, 'import numpy as np\n'), ((12118, 12128), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (12125, 12128), True, 'import numpy as np\n'), ((12173, 12183), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (12180, 12183), True, 'import numpy as np\n'), ((12412, 12422), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (12419, 12422), True, 'import numpy as np\n'), ((7352, 7387), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, -1.0]]'], {}), '([[1.0, 0.0], [0.0, -1.0]])\n', (7360, 7387), True, 'import numpy as np\n'), ((11430, 11440), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (11437, 11440), True, 'import numpy as np\n'), ((11926, 11936), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (11933, 11936), True, 'import numpy as np\n'), ((12671, 12689), 'numpy.amax', 'np.amax', (['box[:, 1]'], {}), '(box[:, 1])\n', (12678, 12689), True, 'import numpy as np\n'), ((12689, 12707), 'numpy.amin', 'np.amin', (['box[:, 1]'], {}), '(box[:, 1])\n', (12696, 12707), True, 'import numpy as np\n'), ((12726, 12737), 'numpy.amax', 'np.amax', (['CF'], {}), '(CF)\n', (12733, 12737), True, 'import numpy as np\n'), ((12785, 12796), 'numpy.amax', 'np.amax', (['Mx'], {}), '(Mx)\n', (12792, 12796), True, 'import numpy as np\n'), ((8467, 8487), 'numpy.diff', 'np.diff', (['box'], {'axis': '(0)'}), '(box, axis=0)\n', (8474, 8487), True, 'import numpy as np\n'), ((9328, 9349), 'numpy.roll', 'np.roll', (['box[:, 1]', '(1)'], {}), '(box[:, 1], 1)\n', (9335, 9349), True, 'import numpy as np\n'), ((9374, 9395), 'numpy.roll', 'np.roll', (['box[:, 0]', '(1)'], {}), '(box[:, 0], 1)\n', (9381, 9395), True, 'import numpy as np\n'), ((9744, 9762), 'numpy.mean', 'np.mean', (['box[:, 1]'], {}), '(box[:, 1])\n', (9751, 9762), True, 'import numpy as np\n'), ((9820, 9838), 'numpy.mean', 'np.mean', (['box[:, 1]'], {}), '(box[:, 1])\n', (9827, 9838), True, 'import numpy as np\n'), ((9986, 10009), 'numpy.diff', 'np.diff', (['seg[i]'], {'axis': '(0)'}), '(seg[i], axis=0)\n', (9993, 10009), True, 'import numpy as np\n'), ((10501, 10522), 'numpy.roll', 'np.roll', (['box[:, 1]', '(1)'], {}), '(box[:, 1], 1)\n', (10508, 10522), True, 'import numpy as np\n'), ((10547, 10568), 'numpy.roll', 'np.roll', (['box[:, 0]', '(1)'], {}), '(box[:, 0], 1)\n', (10554, 10568), True, 'import numpy as np\n'), ((7058, 7078), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (7069, 7078), True, 'import numpy as np\n'), ((10682, 10692), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (10689, 10692), True, 'import numpy as np\n'), ((10767, 10777), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (10774, 10777), True, 'import numpy as np\n'), ((10852, 10862), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (10859, 10862), True, 'import numpy as np\n'), ((11658, 11675), 'numpy.abs', 'np.abs', (['box[:, 1]'], {}), '(box[:, 1])\n', (11664, 11675), True, 'import numpy as np\n'), ((12338, 12358), 'numpy.diff', 'np.diff', (['box'], {'axis': '(0)'}), '(box, axis=0)\n', (12345, 12358), True, 'import numpy as np\n'), ((12941, 12951), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (12948, 12951), True, 'import numpy as np\n'), ((11187, 11197), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (11194, 11197), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import copy
import time
import argparse
import os
import rbo
from library_models import *
from library_data import *
from scipy import stats
from collections import defaultdict, Counter
def filter_and_split(data=[]):
(users,counts) = np.unique(data[:,0],return_counts = True)
users = users[counts>=10]
sequence_dic,pert_dic = {int(user):[] for user in set(data[:,0])}, {int(user):[] for user in set(data[:,0])}
user_dic = {int(user):idx for (idx,user) in enumerate(users)}
new_data = []
for i in range(data.shape[0]):
if int(data[i,0]) in user_dic:
new_data.append([int(data[i,0]),int(data[i,1]),data[i,2],0])
new_data = np.array(new_data)
for i in range(new_data.shape[0]):
sequence_dic[int(new_data[i,0])].append([i,int(new_data[i,1]),new_data[i,2]])
test_len = 0
for user in sequence_dic.keys():
cur_test = int(0.1*len(sequence_dic[user]))
for i in range(cur_test):
interaction = sequence_dic[user].pop()
new_data[interaction[0],3] = 1
test_len += cur_test
new_data = new_data[np.argsort(new_data[:,2]),:]
print(data.shape,new_data.shape)
return new_data,test_len
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path',type=str,help='path of the dataset')
parser.add_argument('--gpu',default='0',type=str,help='GPU# will be used')
parser.add_argument('--attack_type',type=str, help = "Which attack will be tested")
parser.add_argument('--attack_kind',type=str, help = "Deletion, Replacement, or Injection attack")
parser.add_argument('--output',type=str, default = 'ltcross_output.txt', help = "Output file path")
parser.add_argument('--epochs', default=50, type = int, help='number of training epochs')
args = parser.parse_args()
num_pert = 1
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
raw_data = pd.read_csv(args.data_path, sep='\t', header=None)
data = raw_data.values[:,-3:]
final_metrics = [[],[],[],[],[],[]]
before_perf,after_perf = [[],[]],[[],[]]
f = open(args.output,'w')
(original_data,test_len) = filter_and_split(data=data)
[user2id, user_sequence_id, user_timediffs_sequence, user_previous_itemid_sequence,
item2id, item_sequence_id, item_timediffs_sequence,
timestamp_sequence, feature_sequence] = load_network(original_data)
num_interactions = len(user_sequence_id)
num_users = len(user2id)
num_items = len(item2id)+1 # one extra item for "none-of-these"
num_features = len(feature_sequence[0])
embedding_dim = 128
occurence_count = Counter(original_data[:,1])
popular_item = occurence_count.most_common(1)[0][0]
least_popular_item = occurence_count.most_common()[-1][0]
if args.attack_type == 'cas':
in_degree, num_child = np.zeros(original_data.shape[0]),np.zeros(original_data.shape[0])
user_dic,item_dic = defaultdict(list),defaultdict(list)
edges = defaultdict(list)
count = 0
for i in range(original_data.shape[0]):
in_degree[i]=-1
if original_data[i,3]==0:
count += 1
user,item = int(original_data[i,0]),int(original_data[i,1])
user_dic[user].append(i)
item_dic[item].append(i)
in_degree[i] = 0
for user in user_dic.keys():
cur_list = user_dic[user]
for i in range(len(cur_list)-1):
j,k = cur_list[i],cur_list[i+1]
in_degree[k] += 1
edges[j].append(k)
for item in item_dic.keys():
cur_list = item_dic[item]
for i in range(len(cur_list)-1):
j,k = cur_list[i],cur_list[i+1]
in_degree[k] += 1
edges[j].append(k)
queue = []
for i in range(original_data.shape[0]):
if in_degree[i] == 0:
queue.append(i)
while len(queue)!=0:
root = queue.pop(0)
check = np.zeros(original_data.shape[0])
check[root]=1
q2 = [root]
count2 = 1
while len(q2)!=0:
now = q2.pop(0)
for node in edges[now]:
if check[node]==0:
check[node]=1
q2.append(node)
count2 += 1
num_child[root] = count2
for iteration in range(10):
model = ltcross(embedding_dim, num_features, num_users, num_items,0).to(device)
print(num_users,num_items,num_features,num_interactions,model)
original_model = copy.deepcopy(model)
[original_probs,original_rank,temp,perf1] = model.traintest(data = original_data, perturbed_users = [], original_probs=-1, original_rank=-1, final_metrics = [],test_len = test_len,epochs = args.epochs,device=device)
perturbed_users = []
if args.attack_type == 'cas':
chosen = np.argsort(num_child)[-num_pert:] if num_pert!=0 else []
if args.attack_kind=='deletion':
tbd = []
for idx in chosen:
maxv,maxp = num_child[idx],idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[CASSATA & Deletion] chosen interaction {}=({},{},{}) with cascading score {}'.format(maxp,user,item,time,maxv),file=f,flush=True)
tbd.append(maxp)
if user not in perturbed_users:
perturbed_users.append(user)
new_data = np.delete(original_data,tbd,0)
elif args.attack_kind=='injection':
tbd,values = [],[]
for idx in chosen:
maxv,maxp = num_child[idx],idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[CASSATA & Injection] chosen interaction {}=({},{},{}) with cascading {}'.format(maxp,user,item,time,maxv),file=f,flush=True)
replacement = int(least_popular_item)
# replacement = np.random.choice(list(set(original_data[:,1])))
tbd.append(maxp)
values.append([user,replacement,time-1,0])
if user not in perturbed_users:
perturbed_users.append(user)
new_data = np.insert(original_data,tbd,values,axis=0)
else:
new_data = copy.deepcopy(original_data)
for idx in chosen:
maxv,maxp = num_child[idx],idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[CASSATA & Replacement] chosen interaction {}=({},{},{}) with cascading score {}'.format(maxp,user,item,time,maxv),file=f,flush=True)
replacement = int(least_popular_item)
# replacement = np.random.choice(list(set(original_data[:,1])))
new_data[maxp,1] = replacement
if user not in perturbed_users:
perturbed_users.append(user)
elif args.attack_type == 'opt':
final_contribution = torch.zeros(original_data.shape[0])
for i in range(original_data.shape[0]):
if original_data[i, 3] == 0:
grad1 = model.inter[:, i, :].squeeze()
grad2 = model.inter2[:,i,:].squeeze()
sum1 = torch.sqrt(torch.sum(torch.mul(grad1,grad1))).item()
sum2 = torch.sqrt(torch.sum(torch.mul(grad2,grad2))).item()
final_contribution[i] = (sum1*sum2)
chosen = np.argsort(final_contribution)[-num_pert:]
if args.attack_kind=='deletion':
tbd = []
for idx in chosen:
maxv,maxp = final_contribution[idx],idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[Delete] Largest self-influence interaction {}=({},{},{}) with influence sum {}'.format(maxp,user,item,time,maxv),file=f,flush=True)
tbd.append(maxp)
if user not in perturbed_users:
perturbed_users.append(user)
new_data = np.delete(original_data,tbd,0)
elif args.attack_kind=='injection':
tbd,values = [],[]
for idx in chosen:
maxv,maxp = final_contribution[idx],idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[Inject] Largest self-influence interaction {}=({},{},{}) with influence sum {}'.format(maxp,user,item,time,maxv),file=f,flush=True)
replacement = np.random.choice(list(set(original_data[:,1])))
tbd.append(maxp)
values.append([user,replacement,time-1,0])
if user not in perturbed_users:
perturbed_users.append(user)
new_data = np.insert(original_data,tbd,values,axis=0)
else:
new_data = copy.deepcopy(original_data)
for idx in chosen:
maxv,maxp = final_contribution[idx],idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[Replace] Largest self-influence interaction {}=({},{},{}) with influence sum {}'.format(maxp,user,item,time,maxv),file=f,flush=True)
replacement = np.random.choice(list(set(original_data[:,1])))
new_data[maxp,1] = replacement
if user not in perturbed_users:
perturbed_users.append(user)
elif args.attack_type=='random' or args.attack_type=='earliest':
candidates,candidates2 = [],[]
users = {}
items = {}
for i in range(original_data.shape[0]):
if original_data[i,3]==0:
user,item = int(original_data[i,0]),int(original_data[i,1])
if user not in users:
candidates2.append(i)
users[user] = i
candidates.append(i)
chosen = np.random.choice(candidates2,size = num_pert,replace=False) if args.attack_type == 'earliest' else np.random.choice(candidates,size = num_pert,replace=False)
if args.attack_kind=='deletion':
tbd = []
for idx in chosen:
maxp=idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[Delete] chosen interaction {}=({},{},{})'.format(maxp,user,item,time),file=f,flush=True)
tbd.append(maxp)
if user not in perturbed_users:
perturbed_users.append(user)
new_data = np.delete(original_data,tbd,0)
elif args.attack_kind=='injection':
tbd,values = [],[]
for idx in chosen:
maxp=idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[Inject] chosen interaction {}=({},{},{})'.format(maxp,user,item,time),file=f,flush=True)
replacement = np.random.choice(list(set(original_data[:,1])))
tbd.append(maxp)
values.append([user,replacement,time-1,0])
if user not in perturbed_users:
perturbed_users.append(user)
new_data = np.insert(original_data,tbd,values,axis=0)
else:
new_data = copy.deepcopy(original_data)
for idx in chosen:
maxp=idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[Replace] chosen interaction {}=({},{},{})'.format(maxp,user,item,time),file=f,flush=True)
replacement = np.random.choice(list(set(original_data[:,1])))
new_data[maxp,1] = replacement
if user not in perturbed_users:
perturbed_users.append(user)
else:
candidates = {}
for i in range(original_data.shape[0]):
if original_data[i,3]==0:
user = int(original_data[i,0])
candidates[user] = i
chosen = np.random.choice(list(candidates.values()),size = num_pert,replace=False)
if args.attack_kind=='deletion':
tbd = []
for idx in chosen:
maxp = idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[last&random deletion] perturbed interaction {}=({},{},{})'.format(maxp,user,item,time),file=f,flush=True)
tbd.append(maxp)
if user not in perturbed_users:
perturbed_users.append(user)
new_data = np.delete(original_data,tbd,0)
elif args.attack_kind=='injection':
tbd,values = [],[]
for idx in chosen:
maxp=idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[last&random injection] perturbed interaction {}=({},{},{})'.format(maxp,user,item,time),file=f,flush=True)
replacement = np.random.choice(list(set(original_data[:,1])))
tbd.append(maxp)
values.append([user,replacement,time-1,0])
if user not in perturbed_users:
perturbed_users.append(user)
new_data = np.insert(original_data,tbd,values,axis=0)
else:
new_data = copy.deepcopy(original_data)
for idx in chosen:
maxp=idx
user,item,time = int(original_data[maxp,0]),int(original_data[maxp,1]),original_data[maxp,2]
print('[last&random replacement] perturbed interaction {}=({},{},{})'.format(maxp,user,item,time),file=f,flush=True)
new_data[maxp,1] = np.random.choice(list(set(original_data[:,1])))
if user not in perturbed_users:
perturbed_users.append(user)
perturbed_users = [user2id[user] for user in perturbed_users]
print(perturbed_users,new_data.shape)
model = copy.deepcopy(original_model)
[probs,rank,current_metrics,perf2] = model.traintest(data=new_data, original_probs = original_probs, original_rank = original_rank, final_metrics = [[],[],[],[],[],[]],perturbed_users = perturbed_users,test_len = test_len,epochs = args.epochs,device=device)
print('\nMRR_diff\tHITS_diff\tRBO\tRank_diff\tProb_diff\tTop-10 Jaccard',file=f,flush=True)
for i in range(len(perf1)):
before_perf[i].append(perf1[i])
after_perf[i].append(perf2[i])
for i in range(6):
avg = np.average(current_metrics[i])
med = np.median(current_metrics[i])
std = np.std(current_metrics[i])
final_metrics[i].append(avg)
print('Avg = {}\tMed = {}\tStd = {}'.format(avg,med,std),file=f,flush=True)
print('[Without perturbation] Avg MRR = {}\tAvg HITS@10 = {}'.format(np.average(before_perf[0]),np.average(before_perf[1])),file=f,flush=True)
print('[With perturbation] Avg MRR = {}\tAvg HITS@10 = {}\n'.format(np.average(after_perf[0]),np.average(after_perf[1])),file=f,flush=True)
for i in range(6):
print(final_metrics[i],file=f,flush=True)
for i in range(6):
avg = np.average(final_metrics[i])
print('({})'.format(avg),file=f,flush=True)
if __name__ == "__main__":
main()
| [
"copy.deepcopy",
"numpy.average",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.median",
"numpy.std",
"numpy.zeros",
"numpy.insert",
"collections.defaultdict",
"numpy.argsort",
"torch.mul",
"numpy.array",
"torch.cuda.is_available",
"numpy.random.choice",
"torch.zeros",
"collecti... | [((315, 356), 'numpy.unique', 'np.unique', (['data[:, 0]'], {'return_counts': '(True)'}), '(data[:, 0], return_counts=True)\n', (324, 356), True, 'import numpy as np\n'), ((759, 777), 'numpy.array', 'np.array', (['new_data'], {}), '(new_data)\n', (767, 777), True, 'import numpy as np\n'), ((1320, 1345), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1343, 1345), False, 'import argparse\n'), ((2149, 2199), 'pandas.read_csv', 'pd.read_csv', (['args.data_path'], {'sep': '"""\t"""', 'header': 'None'}), "(args.data_path, sep='\\t', header=None)\n", (2160, 2199), True, 'import pandas as pd\n'), ((2884, 2912), 'collections.Counter', 'Counter', (['original_data[:, 1]'], {}), '(original_data[:, 1])\n', (2891, 2912), False, 'from collections import defaultdict, Counter\n'), ((3242, 3259), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3253, 3259), False, 'from collections import defaultdict, Counter\n'), ((4928, 4948), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (4941, 4948), False, 'import copy\n'), ((15244, 15273), 'copy.deepcopy', 'copy.deepcopy', (['original_model'], {}), '(original_model)\n', (15257, 15273), False, 'import copy\n'), ((16487, 16515), 'numpy.average', 'np.average', (['final_metrics[i]'], {}), '(final_metrics[i])\n', (16497, 16515), True, 'import numpy as np\n'), ((1197, 1223), 'numpy.argsort', 'np.argsort', (['new_data[:, 2]'], {}), '(new_data[:, 2])\n', (1207, 1223), True, 'import numpy as np\n'), ((2073, 2098), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2096, 2098), False, 'import torch\n'), ((3096, 3128), 'numpy.zeros', 'np.zeros', (['original_data.shape[0]'], {}), '(original_data.shape[0])\n', (3104, 3128), True, 'import numpy as np\n'), ((3129, 3161), 'numpy.zeros', 'np.zeros', (['original_data.shape[0]'], {}), '(original_data.shape[0])\n', (3137, 3161), True, 'import numpy as np\n'), ((3190, 3207), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3201, 3207), False, 'from collections import defaultdict, Counter\n'), ((3208, 3225), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3219, 3225), False, 'from collections import defaultdict, Counter\n'), ((4310, 4342), 'numpy.zeros', 'np.zeros', (['original_data.shape[0]'], {}), '(original_data.shape[0])\n', (4318, 4342), True, 'import numpy as np\n'), ((15814, 15844), 'numpy.average', 'np.average', (['current_metrics[i]'], {}), '(current_metrics[i])\n', (15824, 15844), True, 'import numpy as np\n'), ((15863, 15892), 'numpy.median', 'np.median', (['current_metrics[i]'], {}), '(current_metrics[i])\n', (15872, 15892), True, 'import numpy as np\n'), ((15911, 15937), 'numpy.std', 'np.std', (['current_metrics[i]'], {}), '(current_metrics[i])\n', (15917, 15937), True, 'import numpy as np\n'), ((5919, 5951), 'numpy.delete', 'np.delete', (['original_data', 'tbd', '(0)'], {}), '(original_data, tbd, 0)\n', (5928, 5951), True, 'import numpy as np\n'), ((7608, 7643), 'torch.zeros', 'torch.zeros', (['original_data.shape[0]'], {}), '(original_data.shape[0])\n', (7619, 7643), False, 'import torch\n'), ((16153, 16179), 'numpy.average', 'np.average', (['before_perf[0]'], {}), '(before_perf[0])\n', (16163, 16179), True, 'import numpy as np\n'), ((16180, 16206), 'numpy.average', 'np.average', (['before_perf[1]'], {}), '(before_perf[1])\n', (16190, 16206), True, 'import numpy as np\n'), ((16303, 16328), 'numpy.average', 'np.average', (['after_perf[0]'], {}), '(after_perf[0])\n', (16313, 16328), True, 'import numpy as np\n'), ((16329, 16354), 'numpy.average', 'np.average', (['after_perf[1]'], {}), '(after_perf[1])\n', (16339, 16354), True, 'import numpy as np\n'), ((5265, 5286), 'numpy.argsort', 'np.argsort', (['num_child'], {}), '(num_child)\n', (5275, 5286), True, 'import numpy as np\n'), ((6759, 6804), 'numpy.insert', 'np.insert', (['original_data', 'tbd', 'values'], {'axis': '(0)'}), '(original_data, tbd, values, axis=0)\n', (6768, 6804), True, 'import numpy as np\n'), ((6848, 6876), 'copy.deepcopy', 'copy.deepcopy', (['original_data'], {}), '(original_data)\n', (6861, 6876), False, 'import copy\n'), ((8096, 8126), 'numpy.argsort', 'np.argsort', (['final_contribution'], {}), '(final_contribution)\n', (8106, 8126), True, 'import numpy as np\n'), ((8746, 8778), 'numpy.delete', 'np.delete', (['original_data', 'tbd', '(0)'], {}), '(original_data, tbd, 0)\n', (8755, 8778), True, 'import numpy as np\n'), ((9543, 9588), 'numpy.insert', 'np.insert', (['original_data', 'tbd', 'values'], {'axis': '(0)'}), '(original_data, tbd, values, axis=0)\n', (9552, 9588), True, 'import numpy as np\n'), ((9631, 9659), 'copy.deepcopy', 'copy.deepcopy', (['original_data'], {}), '(original_data)\n', (9644, 9659), False, 'import copy\n'), ((10795, 10854), 'numpy.random.choice', 'np.random.choice', (['candidates2'], {'size': 'num_pert', 'replace': '(False)'}), '(candidates2, size=num_pert, replace=False)\n', (10811, 10854), True, 'import numpy as np\n'), ((10894, 10952), 'numpy.random.choice', 'np.random.choice', (['candidates'], {'size': 'num_pert', 'replace': '(False)'}), '(candidates, size=num_pert, replace=False)\n', (10910, 10952), True, 'import numpy as np\n'), ((11486, 11518), 'numpy.delete', 'np.delete', (['original_data', 'tbd', '(0)'], {}), '(original_data, tbd, 0)\n', (11495, 11518), True, 'import numpy as np\n'), ((13737, 13769), 'numpy.delete', 'np.delete', (['original_data', 'tbd', '(0)'], {}), '(original_data, tbd, 0)\n', (13746, 13769), True, 'import numpy as np\n'), ((12209, 12254), 'numpy.insert', 'np.insert', (['original_data', 'tbd', 'values'], {'axis': '(0)'}), '(original_data, tbd, values, axis=0)\n', (12218, 12254), True, 'import numpy as np\n'), ((12297, 12325), 'copy.deepcopy', 'copy.deepcopy', (['original_data'], {}), '(original_data)\n', (12310, 12325), False, 'import copy\n'), ((14478, 14523), 'numpy.insert', 'np.insert', (['original_data', 'tbd', 'values'], {'axis': '(0)'}), '(original_data, tbd, values, axis=0)\n', (14487, 14523), True, 'import numpy as np\n'), ((14567, 14595), 'copy.deepcopy', 'copy.deepcopy', (['original_data'], {}), '(original_data)\n', (14580, 14595), False, 'import copy\n'), ((7906, 7929), 'torch.mul', 'torch.mul', (['grad1', 'grad1'], {}), '(grad1, grad1)\n', (7915, 7929), False, 'import torch\n'), ((7986, 8009), 'torch.mul', 'torch.mul', (['grad2', 'grad2'], {}), '(grad2, grad2)\n', (7995, 8009), False, 'import torch\n')] |
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 <NAME> and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Common high-level operations test
Tests features common to all high-level objects, like the .name property.
"""
import six
from h5py import File
from ..common import ut, TestCase, UNICODE_FILENAMES
import numpy as np
import os
import tempfile
class BaseTest(TestCase):
def setUp(self):
self.f = File(self.mktemp(), 'w')
def tearDown(self):
if self.f:
self.f.close()
class TestName(BaseTest):
"""
Feature: .name attribute returns the object name
"""
def test_anonymous(self):
""" Anonymous objects have name None """
grp = self.f.create_group(None)
self.assertIs(grp.name, None)
class TestRepr(BaseTest):
"""
repr() works correctly with Unicode names
"""
USTRING = six.unichr(0xfc) + six.unichr(0xdf)
def _check_type(self, obj):
if six.PY2:
self.assertIsInstance(repr(obj), bytes)
else:
self.assertIsInstance(repr(obj), six.text_type)
def test_group(self):
""" Group repr() with unicode """
grp = self.f.create_group(self.USTRING)
self._check_type(grp)
def test_dataset(self):
""" Dataset repr() with unicode """
dset = self.f.create_dataset(self.USTRING, (1,))
self._check_type(dset)
def test_namedtype(self):
""" Named type repr() with unicode """
self.f['type'] = np.dtype('f')
typ = self.f['type']
self._check_type(typ)
@ut.skipIf(not UNICODE_FILENAMES, "Filesystem unicode support required")
def test_file(self):
""" File object repr() with unicode """
fname = tempfile.mktemp(self.USTRING+u'.hdf5')
try:
with File(fname,'w') as f:
self._check_type(f)
finally:
try:
os.unlink(fname)
except Exception:
pass
| [
"h5py.File",
"os.unlink",
"numpy.dtype",
"six.unichr",
"tempfile.mktemp"
] | [((1048, 1063), 'six.unichr', 'six.unichr', (['(252)'], {}), '(252)\n', (1058, 1063), False, 'import six\n'), ((1067, 1082), 'six.unichr', 'six.unichr', (['(223)'], {}), '(223)\n', (1077, 1082), False, 'import six\n'), ((1674, 1687), 'numpy.dtype', 'np.dtype', (['"""f"""'], {}), "('f')\n", (1682, 1687), True, 'import numpy as np\n'), ((1914, 1954), 'tempfile.mktemp', 'tempfile.mktemp', (["(self.USTRING + u'.hdf5')"], {}), "(self.USTRING + u'.hdf5')\n", (1929, 1954), False, 'import tempfile\n'), ((1983, 1999), 'h5py.File', 'File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (1987, 1999), False, 'from h5py import File\n'), ((2091, 2107), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (2100, 2107), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' polynomial regression
It is a form of regression analysis in which
the relationship between the independent variable x
and the dependent variable y is modelled as an
nth degree polynomial in x.
y = c0 * x^0 + c1 * x^1 +
c2 * x^2 + c3 * x^3 + ..... + cn * x ^n
n = degree
PolynomialFeatures :
It generate a new feature matrix
consisting of all polynomial combinations of the
features with degree less than or equal to the
specified degree
'''
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
''' sample polynomial equation with degree=3'''
''' t = 24.0 * x^0
+ 12.0 * x^1
- 0.5 * x^2
+ 9.0 * x^3
'''
constant = 24
error_induce = 0.01
def fx1(x1):
res = (12.0 * x1)
error = 0.0
error = res * random.uniform(-error_induce, error_induce)
return res + error
def fx2(x2):
res = (-0.50 * x2 * x2)
error = 0.0
error = res * random.uniform(-error_induce, error_induce)
return res + error
def fx3(x3):
res = (09.0 * x3 * x3 * x3)
error = 0.0
error = res * random.uniform(-error_induce, error_induce)
return res + error
''' data preparation '''
max_sample_value = 50
total_samples = 800
train_sample_cnt = int((total_samples * 60.0 / 100.0))
test_sample_cnt = total_samples - train_sample_cnt
y1_samples = fx1(np.arange(total_samples))
y2_samples = fx2(np.arange(total_samples))
y3_samples = fx3(np.arange(total_samples))
t_samples = y1_samples + y2_samples + y3_samples
t_samples = t_samples + constant
''' splitting samples into train data and test data '''
y1_samples_train, y1_samples_test = np.split(
y1_samples, [train_sample_cnt,])
y2_samples_train, y2_samples_test = np.split(
y2_samples, [train_sample_cnt,])
y3_samples_train, y3_samples_test = np.split(
y3_samples, [train_sample_cnt,])
t_samples_train, t_samples_test = np.split(
t_samples, [train_sample_cnt,])
''' combining all variables in column structure '''
xyz_samples_train = {'colx1' : y1_samples_train,
'colx2' : y2_samples_train,
'colx3' : y3_samples_train}
dfxyz_samples_train = pd.DataFrame(data=xyz_samples_train)
dft_samples_train = pd.DataFrame(data=t_samples_train)
xyz_samples_test = {'colx1' : y1_samples_test,
'colx2' : y2_samples_test,
'colx3' : y3_samples_test}
dfxyz_samples_test = pd.DataFrame(data=xyz_samples_test)
dft_samples_test = pd.DataFrame(data=t_samples_test)
''' use PolynomialFeatures to fit and transform a model'''
poly = PolynomialFeatures(degree=3, include_bias=False)
poly_fit = poly.fit_transform(
np.arange(train_sample_cnt)[:, np.newaxis])
''' prepare a linear regression model finally for prediction '''
lm = LinearRegression().fit(poly_fit, dft_samples_train)
predictions = lm.predict(dfxyz_samples_test)
print("predictions = ", predictions)
print("lm.score(X,y) = ", lm.score(dfxyz_samples_test,
dft_samples_test))
print("lm.coef_ = ", lm.coef_)
print("lm.intercept_ = %.2f" %lm.intercept_)
''' now we are going to plot the points
just the difference between actual and expected '''
prediced_difference = np.subtract(
dft_samples_test.values,
predictions)
plt.title('Polynomial Regression - difference between actual and expected', fontsize=16)
plt.xlabel('test sample count', fontsize=14)
plt.ylabel('difference value' , fontsize=14)
plt.plot(np.arange(prediced_difference.size),
prediced_difference,
color='b')
plt.show()
| [
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.subtract",
"random.uniform",
"numpy.split",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.PolynomialFeatures",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((1827, 1867), 'numpy.split', 'np.split', (['y1_samples', '[train_sample_cnt]'], {}), '(y1_samples, [train_sample_cnt])\n', (1835, 1867), True, 'import numpy as np\n'), ((1943, 1983), 'numpy.split', 'np.split', (['y2_samples', '[train_sample_cnt]'], {}), '(y2_samples, [train_sample_cnt])\n', (1951, 1983), True, 'import numpy as np\n'), ((2059, 2099), 'numpy.split', 'np.split', (['y3_samples', '[train_sample_cnt]'], {}), '(y3_samples, [train_sample_cnt])\n', (2067, 2099), True, 'import numpy as np\n'), ((2175, 2214), 'numpy.split', 'np.split', (['t_samples', '[train_sample_cnt]'], {}), '(t_samples, [train_sample_cnt])\n', (2183, 2214), True, 'import numpy as np\n'), ((2482, 2518), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'xyz_samples_train'}), '(data=xyz_samples_train)\n', (2494, 2518), True, 'import pandas as pd\n'), ((2541, 2575), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 't_samples_train'}), '(data=t_samples_train)\n', (2553, 2575), True, 'import pandas as pd\n'), ((2750, 2785), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'xyz_samples_test'}), '(data=xyz_samples_test)\n', (2762, 2785), True, 'import pandas as pd\n'), ((2808, 2841), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 't_samples_test'}), '(data=t_samples_test)\n', (2820, 2841), True, 'import pandas as pd\n'), ((2913, 2961), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(3)', 'include_bias': '(False)'}), '(degree=3, include_bias=False)\n', (2931, 2961), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((3580, 3629), 'numpy.subtract', 'np.subtract', (['dft_samples_test.values', 'predictions'], {}), '(dft_samples_test.values, predictions)\n', (3591, 3629), True, 'import numpy as np\n'), ((3688, 3780), 'matplotlib.pyplot.title', 'plt.title', (['"""Polynomial Regression - difference between actual and expected"""'], {'fontsize': '(16)'}), "('Polynomial Regression - difference between actual and expected',\n fontsize=16)\n", (3697, 3780), True, 'import matplotlib.pyplot as plt\n'), ((3777, 3821), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""test sample count"""'], {'fontsize': '(14)'}), "('test sample count', fontsize=14)\n", (3787, 3821), True, 'import matplotlib.pyplot as plt\n'), ((3822, 3865), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""difference value"""'], {'fontsize': '(14)'}), "('difference value', fontsize=14)\n", (3832, 3865), True, 'import matplotlib.pyplot as plt\n'), ((3965, 3975), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3973, 3975), True, 'import matplotlib.pyplot as plt\n'), ((1512, 1536), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (1521, 1536), True, 'import numpy as np\n'), ((1561, 1585), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (1570, 1585), True, 'import numpy as np\n'), ((1610, 1634), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (1619, 1634), True, 'import numpy as np\n'), ((3877, 3912), 'numpy.arange', 'np.arange', (['prediced_difference.size'], {}), '(prediced_difference.size)\n', (3886, 3912), True, 'import numpy as np\n'), ((951, 994), 'random.uniform', 'random.uniform', (['(-error_induce)', 'error_induce'], {}), '(-error_induce, error_induce)\n', (965, 994), False, 'import random\n'), ((1095, 1138), 'random.uniform', 'random.uniform', (['(-error_induce)', 'error_induce'], {}), '(-error_induce, error_induce)\n', (1109, 1138), False, 'import random\n'), ((1243, 1286), 'random.uniform', 'random.uniform', (['(-error_induce)', 'error_induce'], {}), '(-error_induce, error_induce)\n', (1257, 1286), False, 'import random\n'), ((3005, 3032), 'numpy.arange', 'np.arange', (['train_sample_cnt'], {}), '(train_sample_cnt)\n', (3014, 3032), True, 'import numpy as np\n'), ((3131, 3149), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3147, 3149), False, 'from sklearn.linear_model import LinearRegression\n')] |
import numpy as np
#-----------------------------------------------------------------------
# node_0
# [2] ---------(1)------> |[]|xx
# xx (1)zz-----> | | xx
# xx zz | | (2)--->
# xz | | []
# zz xx | | (-1)-->
# zz (-1)xx----> | | ZZ
# [3] --------(1)-------> |[]|zz
# node_1
#-----------------------------------------------------------------------
def relu(input):
'''Define your 'relu' activation function here'''
# Calculate the value for the output of the relu-function: output
# max()-function is called with an iterable, returns the largest item of argument 'input'.
output = max(0, input)
# Return the value just calculated
return(output)
#-----------------------------------------------------------------------
first_input_data = np.array([2, 3])
weights = {
# first hidden layer
'node_0':np.array([1, 1]),
'node_1':np.array([-1, 1]),
# second hidden layer
# 'node_3':np.array([0, 1]),
# 'node_4':np.array([1, 1]),
# output layer
'output':np.array([2, -1])}
print('first_input_data: ', first_input_data)
print("weights: ", weights)
#-----------------------------------------------------------------------
# Calculate node 0 value: node_0_value
print('weights[\'node_0\']: ', weights['node_0'])
node_0_value = (first_input_data * weights['node_0']).sum()
node_0_output = relu(node_0_value)
print('node_0_output: ', node_0_output)
# Calculate node 1 value: node_1_value
print('weights[\'node_1\']', weights['node_1'])
node_1_value = (first_input_data * weights['node_1']).sum()
node_1_output = relu(node_1_value)
print('node_1_output: ', node_1_output)
# Put node values into array: hidden_layer_outputs
first_hidden_layer_outputs = np.array([node_0_output, node_1_output])
print('hidden_layer_outputs (np.array): ', first_hidden_layer_outputs)
#------------------------------------------------
# second_input_data = first_hidden_layer_outputs
# # Calculate node 0 value: node_0_value
# print('weights[\'node_3\']: ', weights['node_3'])
# node_3_value = (second_input_data * weights['node_3']).sum()
# print('node_3_value: ', node_3_value)
# # Calculate node 1 value: node_1_value
# print('weights[\'node_4\']', weights['node_4'])
# node_4_value = (second_input_data * weights['node_4']).sum()
# print('node_4_value: ', node_4_value)
# # Put node values into array: hidden_layer_outputs
# second_hidden_layer_outputs = np.array([node_3_value, node_4_value])
# print('second_hidden_layer_outputs (np.array): ', second_hidden_layer_outputs)
#------------------------------------------------
# Calculate output: output
print('weights[\'output\']: ', weights['output'])
output = (first_hidden_layer_outputs * weights['output']).sum()
print(output)
| [
"numpy.array"
] | [((911, 927), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (919, 927), True, 'import numpy as np\n'), ((1880, 1920), 'numpy.array', 'np.array', (['[node_0_output, node_1_output]'], {}), '([node_0_output, node_1_output])\n', (1888, 1920), True, 'import numpy as np\n'), ((987, 1003), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (995, 1003), True, 'import numpy as np\n'), ((1022, 1039), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1030, 1039), True, 'import numpy as np\n'), ((1185, 1202), 'numpy.array', 'np.array', (['[2, -1]'], {}), '([2, -1])\n', (1193, 1202), True, 'import numpy as np\n')] |
from model import Model
import torch
torch.backends.cudnn.benchmark=True
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.models as models
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import argparse
import time
import numpy as np
import subprocess
from numpy import random
import copy
# import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from data_loader import cifar10, cifar100
transform = None
parser = argparse.ArgumentParser(description='Continuum learning')
parser.add_argument('--outfile', default='temp_0.1.csv', type=str, help='Output file name')
parser.add_argument('--matr', default='results/acc_matr.npz', help='Accuracy matrix file name')
parser.add_argument('--num_classes', default=2, help='Number of new classes introduced each time', type=int)
parser.add_argument('--init_lr', default=0.1, type=float, help='Init learning rate')
parser.add_argument('--num_epochs', default=40, type=int, help='Number of epochs')
parser.add_argument('--batch_size', default=64, type=int, help='Mini batch size')
args = parser.parse_args()
num_classes = args.num_classes
#all_train = cifar100(root='./data',
# train=True,
# classes=range(100),
# download=True,
# transform=None)
#mean_image = all_train.mean_image
#np.save("cifar_mean_image.npy", mean_image)
mean_image = np.load("cifar_mean_image.npy")
total_classes = 100
perm_id = np.random.permutation(total_classes)
all_classes = np.arange(total_classes)
for i in range(len(all_classes)):
all_classes[i] = perm_id[all_classes[i]]
n_cl_temp = 0
num_iters = total_classes//num_classes
class_map = {}
map_reverse = {}
for i, cl in enumerate(all_classes):
if cl not in class_map:
class_map[cl] = int(n_cl_temp)
n_cl_temp += 1
print ("Class map:", class_map)
for cl, map_cl in class_map.items():
map_reverse[map_cl] = int(cl)
print ("Map Reverse:", map_reverse)
print ("all_classes:", all_classes)
# else:
# perm_id = np.arange(args.total_classes)
with open(args.outfile, 'w') as file:
print("Classes, Train Accuracy, Test Accuracy", file=file)
#shuffle classes
# random.shuffle(all_classes)
# class_map = {j: int(i) for i, j in enumerate(all_classes)}
# map_reverse = {i: int(j) for i, j in enumerate(all_classes)}
# print('Map reverse: ', map_reverse)
# print('Class map: ', class_map)
# print('All classes: ', all_classes)
model = Model(1, class_map, args)
model.cuda()
acc_matr = np.zeros((int(total_classes/num_classes), num_iters))
for s in range(0, num_iters, num_classes):
# Load Datasets
print('Iteration: ', s)
#print('Algo running: ', args.algo)
print("Loading training examples for classes", all_classes[s: s+num_classes])
train_set = cifar100(root='./data',
train=True,
classes=all_classes[s:s+num_classes],
download=True,
transform=transform,
mean_image=mean_image)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size,
shuffle=True, num_workers=12)
test_set = cifar100(root='./data',
train=False,
classes=all_classes[:s+num_classes],
download=True,
transform=None,
mean_image=mean_image)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size,
shuffle=False, num_workers=12)
# Update representation via BackProp
model.update(train_set, class_map, args)
model.eval()
model.n_known = model.n_classes
print ("%d, " % model.n_known, file=file, end="")
print ("model classes : %d, " % model.n_known)
total = 0.0
correct = 0.0
for indices, images, labels in train_loader:
images = Variable(images).cuda()
preds = model.classify(images)
preds = [map_reverse[pred] for pred in preds.cpu().numpy()]
total += labels.size(0)
correct += (preds == labels.numpy()).sum()
# Train Accuracy
print ('%.2f ,' % (100.0 * correct / total), file=file, end="")
print ('Train Accuracy : %.2f ,' % (100.0 * correct / total))
total = 0.0
correct = 0.0
for indices, images, labels in test_loader:
images = Variable(images).cuda()
preds = model.classify(images)
preds = [map_reverse[pred] for pred in preds.cpu().numpy()]
total += labels.size(0)
correct += (preds == labels.numpy()).sum()
# Test Accuracy
print ('%.2f' % (100.0 * correct / total), file=file)
print ('Test Accuracy : %.2f' % (100.0 * correct / total))
# Accuracy matrix
for i in range(model.n_known):
test_set = cifar100(root='./data',
train=False,
classes=all_classes[i*num_classes: (i+1)*num_classes],
download=True,
transform=None,
mean_image=mean_image)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=min(500, len(test_set)),
shuffle=False, num_workers=12)
total = 0.0
correct = 0.0
for indices, images, labels in test_loader:
images = Variable(images).cuda()
preds = model.classify(images)
preds = [map_reverse[pred] for pred in preds.cpu().numpy()]
total += labels.size(0)
correct += (preds == labels.numpy()).sum()
acc_matr[i, int(s/num_classes)] = (100 * correct / total)
print ("Accuracy matrix", acc_matr[:int(s/num_classes + 1), :int(s/num_classes + 1)])
model.train()
githash = subprocess.check_output(['git', 'describe', '--always'])
np.savez(args.matr, acc_matr=acc_matr, hyper_params = args, githash=githash)
| [
"numpy.load",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.autograd.Variable",
"subprocess.check_output",
"model.Model",
"data_loader.cifar100",
"numpy.arange",
"numpy.random.permutation",
"numpy.savez"
] | [((555, 612), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Continuum learning"""'}), "(description='Continuum learning')\n", (578, 612), False, 'import argparse\n'), ((1490, 1521), 'numpy.load', 'np.load', (['"""cifar_mean_image.npy"""'], {}), "('cifar_mean_image.npy')\n", (1497, 1521), True, 'import numpy as np\n'), ((1553, 1589), 'numpy.random.permutation', 'np.random.permutation', (['total_classes'], {}), '(total_classes)\n', (1574, 1589), True, 'import numpy as np\n'), ((1604, 1628), 'numpy.arange', 'np.arange', (['total_classes'], {}), '(total_classes)\n', (1613, 1628), True, 'import numpy as np\n'), ((2529, 2554), 'model.Model', 'Model', (['(1)', 'class_map', 'args'], {}), '(1, class_map, args)\n', (2534, 2554), False, 'from model import Model\n'), ((2855, 2993), 'data_loader.cifar100', 'cifar100', ([], {'root': '"""./data"""', 'train': '(True)', 'classes': 'all_classes[s:s + num_classes]', 'download': '(True)', 'transform': 'transform', 'mean_image': 'mean_image'}), "(root='./data', train=True, classes=all_classes[s:s + num_classes],\n download=True, transform=transform, mean_image=mean_image)\n", (2863, 2993), False, 'from data_loader import cifar10, cifar100\n'), ((3045, 3146), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(12)'}), '(train_set, batch_size=args.batch_size, shuffle=\n True, num_workers=12)\n', (3072, 3146), False, 'import torch\n'), ((3171, 3304), 'data_loader.cifar100', 'cifar100', ([], {'root': '"""./data"""', 'train': '(False)', 'classes': 'all_classes[:s + num_classes]', 'download': '(True)', 'transform': 'None', 'mean_image': 'mean_image'}), "(root='./data', train=False, classes=all_classes[:s + num_classes],\n download=True, transform=None, mean_image=mean_image)\n", (3179, 3304), False, 'from data_loader import cifar10, cifar100\n'), ((3355, 3456), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(12)'}), '(test_set, batch_size=args.batch_size, shuffle=\n False, num_workers=12)\n', (3382, 3456), False, 'import torch\n'), ((5416, 5472), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'describe', '--always']"], {}), "(['git', 'describe', '--always'])\n", (5439, 5472), False, 'import subprocess\n'), ((5475, 5549), 'numpy.savez', 'np.savez', (['args.matr'], {'acc_matr': 'acc_matr', 'hyper_params': 'args', 'githash': 'githash'}), '(args.matr, acc_matr=acc_matr, hyper_params=args, githash=githash)\n', (5483, 5549), True, 'import numpy as np\n'), ((4622, 4776), 'data_loader.cifar100', 'cifar100', ([], {'root': '"""./data"""', 'train': '(False)', 'classes': 'all_classes[i * num_classes:(i + 1) * num_classes]', 'download': '(True)', 'transform': 'None', 'mean_image': 'mean_image'}), "(root='./data', train=False, classes=all_classes[i * num_classes:(i +\n 1) * num_classes], download=True, transform=None, mean_image=mean_image)\n", (4630, 4776), False, 'from data_loader import cifar10, cifar100\n'), ((3791, 3807), 'torch.autograd.Variable', 'Variable', (['images'], {}), '(images)\n', (3799, 3807), False, 'from torch.autograd import Variable\n'), ((4224, 4240), 'torch.autograd.Variable', 'Variable', (['images'], {}), '(images)\n', (4232, 4240), False, 'from torch.autograd import Variable\n'), ((5039, 5055), 'torch.autograd.Variable', 'Variable', (['images'], {}), '(images)\n', (5047, 5055), False, 'from torch.autograd import Variable\n')] |
from __future__ import print_function, division
import numpy as np
from openmdao.api import ExplicitComponent
class CreateRHS(ExplicitComponent):
"""
Compute the right-hand-side of the K * u = f linear system to solve for the displacements.
The RHS is based on the loads. For the aerostructural case, these are
recomputed at each design point based on the aerodynamic loads.
Parameters
----------
loads[ny, 6] : numpy array
Flattened array containing the loads applied on the FEM component,
computed from the sectional forces.
Returns
-------
forces[6*(ny+1)] : numpy array
Right-hand-side of the linear system. The loads from the aerodynamic
analysis or the user-defined loads.
"""
def initialize(self):
self.options.declare('surface', types=dict)
def setup(self):
surface = self.options['surface']
self.ny = surface['mesh'].shape[1]
self.add_input('total_loads', val=np.zeros((self.ny, 6)), units='N')
self.add_output('forces', val=np.ones(((self.ny+1)*6)), units='N')
n = self.ny * 6
arange = np.arange((n))
self.declare_partials('forces', 'total_loads', val=1., rows=arange, cols=arange)
def compute(self, inputs, outputs):
outputs['forces'][:] = 0.
# Populate the right-hand side of the linear system using the
# prescribed or computed loads
outputs['forces'][:6*self.ny] += inputs['total_loads'].reshape(self.ny*6)
# Remove extremely small values from the RHS so the linear system
# can more easily be solved
outputs['forces'][np.abs(outputs['forces']) < 1e-6] = 0.
| [
"numpy.zeros",
"numpy.abs",
"numpy.arange",
"numpy.ones"
] | [((1146, 1158), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1155, 1158), True, 'import numpy as np\n'), ((994, 1016), 'numpy.zeros', 'np.zeros', (['(self.ny, 6)'], {}), '((self.ny, 6))\n', (1002, 1016), True, 'import numpy as np\n'), ((1067, 1093), 'numpy.ones', 'np.ones', (['((self.ny + 1) * 6)'], {}), '((self.ny + 1) * 6)\n', (1074, 1093), True, 'import numpy as np\n'), ((1654, 1679), 'numpy.abs', 'np.abs', (["outputs['forces']"], {}), "(outputs['forces'])\n", (1660, 1679), True, 'import numpy as np\n')] |
import os
import sys
import json
import unittest
import numpy as np
import luigi
import z5py
from sklearn.metrics import adjusted_rand_score
try:
from elf.segmentation.mutex_watershed import mutex_watershed
except ImportError:
mutex_watershed = None
try:
from ..base import BaseTest
except ValueError:
sys.path.append('..')
from base import BaseTest
class TestMws(BaseTest):
input_key = 'volumes/affinities'
mask_key = 'volumes/mask'
output_key = 'data'
offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1],
[-2, 0, 0], [0, -3, 0], [0, 0, -3],
[-3, 0, 0], [0, -9, 0], [0, 0, -9],
[-4, 0, 0], [0, -27, 0], [0, 0, -27]]
strides = [4, 12, 12]
def _check_result(self, with_mask=False):
with z5py.File(self.input_path) as f:
shape = f[self.input_key].shape[1:]
affs = f[self.input_key][:3]
with z5py.File(self.output_path) as f:
res = f[self.output_key][:]
self.assertEqual(res.shape, shape)
# load affs and compare
with z5py.File(self.input_path) as f:
ds = f[self.input_key]
ds.n_threads = 8
affs = ds[:]
if with_mask:
with z5py.File(self.input_path) as f:
mask = f[self.mask_key][:]
self.assertTrue(np.allclose(res[np.logical_not(mask)], 0))
exp = mutex_watershed(affs, self.offsets, self.strides, mask=mask)
self.assertTrue(np.allclose(exp[np.logical_not(mask)], 0))
score = adjusted_rand_score(exp.ravel(), res.ravel())
# score is much better with mask, so most of the differences seem
# to be due to boundary artifacts
self.assertLess(1. - score, .01)
else:
exp = mutex_watershed(affs, self.offsets, self.strides)
score = adjusted_rand_score(exp.ravel(), res.ravel())
self.assertLess(1. - score, .175)
# from cremi_tools.viewer.volumina import view
# view([affs.transpose((1, 2, 3, 0)), res, exp, mask.astype('uint32')],
# ['affs', 'result', 'expected', 'mask'])
@unittest.skipUnless(mutex_watershed, "Needs affogato")
def test_mws(self):
from cluster_tools.mutex_watershed import MwsWorkflow
config = MwsWorkflow.get_config()['mws_blocks']
config['strides'] = self.strides
with open(os.path.join(self.config_folder, 'mws_blocks.config'), 'w') as f:
json.dump(config, f)
task = MwsWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_folder,
max_jobs=self.max_jobs, target=self.target,
input_path=self.input_path, input_key=self.input_key,
output_path=self.output_path, output_key=self.output_key,
offsets=self.offsets)
ret = luigi.build([task], local_scheduler=True)
self.assertTrue(ret)
self._check_result(with_mask=False)
@unittest.skipUnless(mutex_watershed, "Needs affogato")
def test_mws_with_mask(self):
from cluster_tools.mutex_watershed import MwsWorkflow
config = MwsWorkflow.get_config()['mws_blocks']
config['strides'] = self.strides
with open(os.path.join(self.config_folder, 'mws_blocks.config'), 'w') as f:
json.dump(config, f)
task = MwsWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_folder,
max_jobs=self.max_jobs, target=self.target,
input_path=self.input_path, input_key=self.input_key,
output_path=self.output_path, output_key=self.output_key,
mask_path=self.input_path, mask_key=self.mask_key,
offsets=self.offsets)
ret = luigi.build([task], local_scheduler=True)
self.assertTrue(ret)
self._check_result(with_mask=True)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"sys.path.append",
"z5py.File",
"json.dump",
"cluster_tools.mutex_watershed.MwsWorkflow.get_config",
"numpy.logical_not",
"unittest.skipUnless",
"elf.segmentation.mutex_watershed.mutex_watershed",
"cluster_tools.mutex_watershed.MwsWorkflow",
"os.path.join",
"luigi.build"
] | [((2166, 2220), 'unittest.skipUnless', 'unittest.skipUnless', (['mutex_watershed', '"""Needs affogato"""'], {}), "(mutex_watershed, 'Needs affogato')\n", (2185, 2220), False, 'import unittest\n'), ((3030, 3084), 'unittest.skipUnless', 'unittest.skipUnless', (['mutex_watershed', '"""Needs affogato"""'], {}), "(mutex_watershed, 'Needs affogato')\n", (3049, 3084), False, 'import unittest\n'), ((4008, 4023), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4021, 4023), False, 'import unittest\n'), ((320, 341), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (335, 341), False, 'import sys\n'), ((2538, 2799), 'cluster_tools.mutex_watershed.MwsWorkflow', 'MwsWorkflow', ([], {'tmp_folder': 'self.tmp_folder', 'config_dir': 'self.config_folder', 'max_jobs': 'self.max_jobs', 'target': 'self.target', 'input_path': 'self.input_path', 'input_key': 'self.input_key', 'output_path': 'self.output_path', 'output_key': 'self.output_key', 'offsets': 'self.offsets'}), '(tmp_folder=self.tmp_folder, config_dir=self.config_folder,\n max_jobs=self.max_jobs, target=self.target, input_path=self.input_path,\n input_key=self.input_key, output_path=self.output_path, output_key=self\n .output_key, offsets=self.offsets)\n', (2549, 2799), False, 'from cluster_tools.mutex_watershed import MwsWorkflow\n'), ((2909, 2950), 'luigi.build', 'luigi.build', (['[task]'], {'local_scheduler': '(True)'}), '([task], local_scheduler=True)\n', (2920, 2950), False, 'import luigi\n'), ((3412, 3729), 'cluster_tools.mutex_watershed.MwsWorkflow', 'MwsWorkflow', ([], {'tmp_folder': 'self.tmp_folder', 'config_dir': 'self.config_folder', 'max_jobs': 'self.max_jobs', 'target': 'self.target', 'input_path': 'self.input_path', 'input_key': 'self.input_key', 'output_path': 'self.output_path', 'output_key': 'self.output_key', 'mask_path': 'self.input_path', 'mask_key': 'self.mask_key', 'offsets': 'self.offsets'}), '(tmp_folder=self.tmp_folder, config_dir=self.config_folder,\n max_jobs=self.max_jobs, target=self.target, input_path=self.input_path,\n input_key=self.input_key, output_path=self.output_path, output_key=self\n .output_key, mask_path=self.input_path, mask_key=self.mask_key, offsets\n =self.offsets)\n', (3423, 3729), False, 'from cluster_tools.mutex_watershed import MwsWorkflow\n'), ((3861, 3902), 'luigi.build', 'luigi.build', (['[task]'], {'local_scheduler': '(True)'}), '([task], local_scheduler=True)\n', (3872, 3902), False, 'import luigi\n'), ((782, 808), 'z5py.File', 'z5py.File', (['self.input_path'], {}), '(self.input_path)\n', (791, 808), False, 'import z5py\n'), ((918, 945), 'z5py.File', 'z5py.File', (['self.output_path'], {}), '(self.output_path)\n', (927, 945), False, 'import z5py\n'), ((1081, 1107), 'z5py.File', 'z5py.File', (['self.input_path'], {}), '(self.input_path)\n', (1090, 1107), False, 'import z5py\n'), ((1408, 1468), 'elf.segmentation.mutex_watershed.mutex_watershed', 'mutex_watershed', (['affs', 'self.offsets', 'self.strides'], {'mask': 'mask'}), '(affs, self.offsets, self.strides, mask=mask)\n', (1423, 1468), False, 'from elf.segmentation.mutex_watershed import mutex_watershed\n'), ((1807, 1856), 'elf.segmentation.mutex_watershed.mutex_watershed', 'mutex_watershed', (['affs', 'self.offsets', 'self.strides'], {}), '(affs, self.offsets, self.strides)\n', (1822, 1856), False, 'from elf.segmentation.mutex_watershed import mutex_watershed\n'), ((2325, 2349), 'cluster_tools.mutex_watershed.MwsWorkflow.get_config', 'MwsWorkflow.get_config', ([], {}), '()\n', (2347, 2349), False, 'from cluster_tools.mutex_watershed import MwsWorkflow\n'), ((2501, 2521), 'json.dump', 'json.dump', (['config', 'f'], {}), '(config, f)\n', (2510, 2521), False, 'import json\n'), ((3199, 3223), 'cluster_tools.mutex_watershed.MwsWorkflow.get_config', 'MwsWorkflow.get_config', ([], {}), '()\n', (3221, 3223), False, 'from cluster_tools.mutex_watershed import MwsWorkflow\n'), ((3375, 3395), 'json.dump', 'json.dump', (['config', 'f'], {}), '(config, f)\n', (3384, 3395), False, 'import json\n'), ((1243, 1269), 'z5py.File', 'z5py.File', (['self.input_path'], {}), '(self.input_path)\n', (1252, 1269), False, 'import z5py\n'), ((2423, 2476), 'os.path.join', 'os.path.join', (['self.config_folder', '"""mws_blocks.config"""'], {}), "(self.config_folder, 'mws_blocks.config')\n", (2435, 2476), False, 'import os\n'), ((3297, 3350), 'os.path.join', 'os.path.join', (['self.config_folder', '"""mws_blocks.config"""'], {}), "(self.config_folder, 'mws_blocks.config')\n", (3309, 3350), False, 'import os\n'), ((1363, 1383), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (1377, 1383), True, 'import numpy as np\n'), ((1513, 1533), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (1527, 1533), True, 'import numpy as np\n')] |
import pytest
import os
import numpy as np
import spiceypy as spice
import json
from unittest.mock import patch, PropertyMock
import unittest
from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts
import ale
from ale.drivers.mex_drivers import MexHrscPds3NaifSpiceDriver, MexHrscIsisLabelNaifSpiceDriver, MexSrcPds3NaifSpiceDriver
@pytest.fixture()
def usgscsm_compare_dict():
return {
"h5270_0000_ir2" : {
"usgscsm" : {
"radii": {
"semimajor": 3396.19,
"semiminor": 3376.2,
"unit": "km"
},
"sensor_position": {
"positions": [
[
711902.968354,
3209827.60790571,
1748326.86116295
],
[
727778.89367768,
3287885.02005966,
1594882.70156054
],
[
743098.34408384,
3361360.97987664,
1439236.15929773
],
[
757817.60768561,
3430175.96634995,
1281609.7397002
],
[
771895.91691839,
3494268.82029183,
1122231.09675971
],
[
785295.6426853,
3553596.86562762,
961331.02292412
]
],
"velocities": [
[
396.19391017,
1971.70523609,
-3738.08862116
],
[
383.09225613,
1860.35892297,
-3794.8312507
],
[
368.88320115,
1746.8383078,
-3846.19171074
],
[
353.63635876,
1631.58973504,
-3892.0182367
],
[
337.42645506,
1515.06674438,
-3932.20958309
],
[
320.33289561,
1397.72243165,
-3966.71239887
]
],
"unit": "m"
},
"sun_position": {
"positions": [
[
2.05222074E+11,
1.19628335E+11,
5.02349719E+10
]
],
"velocities": [
[
8468758.54,
-14528713.8,
8703.55212
]
],
"unit": "m"
},
"sensor_orientation": {
"quaternions": [
[
-0.09146728,
-0.85085751,
0.51357522,
0.06257586
],
[
-0.09123532,
-0.83858882,
0.53326329,
0.0638371
],
[
-0.09097193,
-0.82586685,
0.55265188,
0.06514567
],
[
-0.09050679,
-0.81278131,
0.57165363,
0.06638667
],
[
-0.08988786,
-0.79935128,
0.59024631,
0.06757961
],
[
-0.08924306,
-0.78551905,
0.60849234,
0.06879366
]
]
},
"detector_sample_summing": 1,
"detector_line_summing": 1,
"focal_length_model": {
"focal_length": 174.82
},
"detector_center": {
"line": 0.0,
"sample": 2592.0
},
"starting_detector_line": 0,
"starting_detector_sample": 0,
"focal2pixel_lines": [
-7113.11359717265,
0.062856784318668,
142.857129028729
],
"focal2pixel_samples": [
-0.778052433438109,
-142.857129028729,
0.062856784318668
],
"optical_distortion": {
"radial": {
"coefficients": [
0.0,
0.0,
0.0
]
}
},
"image_lines": 400,
"image_samples": 1288,
"name_platform": "MARS EXPRESS",
"name_sensor": "HIGH RESOLUTION STEREO CAMERA",
"reference_height": {
"maxheight": 1000,
"minheight": -1000,
"unit": "m"
},
"name_model": "USGS_ASTRO_LINE_SCANNER_SENSOR_MODEL",
"interpolation_method": "lagrange",
"line_scan_rate": [
[
0.5,
-94.88182842731476,
0.012800790786743165
],
[
15086.5,
101.82391116023064,
0.013227428436279297
]
],
"starting_ephemeris_time": 255744592.07217148,
"center_ephemeris_time": 255744693.90931007,
"t0_ephemeris": -101.83713859319687,
"dt_ephemeris": 40.734855437278746,
"t0_quaternion": -101.83713859319687,
"dt_quaternion": 40.734855437278746
},
"isis" :
{
"CameraVersion": 1,
"NaifKeywords": {
"BODY499_RADII": [
3396.19,
3396.19,
3376.2
],
"BODY_FRAME_CODE": 10014,
"BODY_CODE": 499,
"INS-41210_FOV_FRAME": "MEX_HRSC_HEAD",
"FRAME_-41210_NAME": "MEX_HRSC_HEAD",
"INS-41210_CK_TIME_TOLERANCE": 1.0,
"TKFRAME_-41210_AXES": [
1.0,
2.0,
3.0
],
"TKFRAME_-41210_SPEC": "ANGLES",
"FRAME_-41210_CLASS": 4.0,
"INS-41210_FOV_ANGULAR_SIZE": [
0.2,
0.659734
],
"INS-41210_OD_K": [
0.0,
0.0,
0.0
],
"INS-41210_F/RATIO": 5.6,
"INS-41210_PLATFORM_ID": -41000.0,
"TKFRAME_-41210_ANGLES": [
-0.334,
0.0101,
0.0
],
"INS-41210_SPK_TIME_BIAS": 0.0,
"FRAME_-41210_CENTER": -41.0,
"TKFRAME_-41210_UNITS": "DEGREES",
"INS-41210_BORESIGHT": [
0.0,
0.0,
175.0
],
"INS-41210_CK_TIME_BIAS": 0.0,
"FRAME_-41210_CLASS_ID": -41210.0,
"INS-41210_IFOV": 4e-05,
"INS-41210_FOV_BOUNDARY_CORNERS": [
18.187,
60.0641,
175.0,
18.1281,
-60.0399,
175.0,
-18.1862,
-60.0435,
175.0,
-18.142
],
"INS-41210_FOV_SHAPE": "RECTANGLE",
"TKFRAME_-41210_RELATIVE": "MEX_HRSC_BASE",
"INS-41210_PIXEL_PITCH": 0.007,
"INS-41210_FOCAL_LENGTH": 175.0,
"BODY499_POLE_DEC": [
52.8865,
-0.0609,
0.0
],
"BODY499_POLE_RA": [
317.68143,
-0.1061,
0.0
],
"BODY499_PM": [
176.63,
350.89198226,
0.0
],
"INS-41218_ITRANSL": [
-7113.11359717265,
0.062856784318668,
142.857129028729
],
"INS-41218_ITRANSS": [
-0.778052433438109,
-142.857129028729,
0.062856784318668
],
"INS-41218_FOV_SHAPE": "RECTANGLE",
"INS-41218_PIXEL_SIZE": [
7.0,
7.0
],
"INS-41218_CK_REFERENCE_ID": 1.0,
"INS-41218_FOV_FRAME": "MEX_HRSC_HEAD",
"INS-41218_CCD_CENTER": [
2592.5,
0.5
],
"INS-41218_CK_FRAME_ID": -41001.0,
"INS-41218_F/RATIO": 5.6,
"INS-41218_PIXEL_SAMPLES": 5184.0,
"INS-41218_BORESIGHT_SAMPLE": 2592.5,
"INS-41218_FILTER_BANDWIDTH": 90.0,
"INS-41218_BORESIGHT_LINE": 0.0,
"INS-41218_PIXEL_LINES": 1.0,
"INS-41218_FOCAL_LENGTH": 174.82,
"INS-41218_FOV_ANGULAR_SIZE": [
0.2,
4e-05
],
"INS-41218_FILTER_BANDCENTER": 970.0,
"INS-41218_TRANSX": [
0.016461898406507,
-0.006999999322408,
3.079982431615e-06
],
"INS-41218_TRANSY": [
49.7917927568053,
3.079982431615e-06,
0.006999999322408
],
"INS-41218_FOV_BOUNDARY_CORNERS": [
18.1982,
49.9121,
175.0,
18.1982,
49.9051,
175.0,
-18.1693,
49.8901,
175.0,
-18.1693
],
"INS-41218_BORESIGHT": [
0.0151,
49.9039,
175.0
],
"INS-41218_IFOV": 4e-05
},
"InstrumentPointing": {
"TimeDependentFrames": [
-41001,
1
],
"CkTableStartTime": 255744599.02748,
"CkTableEndTime": 255744635.91477,
"CkTableOriginalSize": 3,
"EphemerisTimes": [
255744599.02748,
255744623.61901,
255744635.91477
],
"Quaternions": [
[
-0.34147103206303764,
0.46006200041554185,
-0.48264106492774883,
-0.6624183666542334
],
[
-0.34862899148129517,
0.4555408857335137,
-0.47327265910130095,
-0.668545673735942
],
[
-0.3521802679309037,
0.45323805476596757,
-0.46855266563769715,
-0.6715673637959837
]
],
"AngularVelocity": [
[
0.00035176331113592204,
0.0010154650024473105,
0.0003877175924478187
],
[
0.0003524285580283372,
0.001014970147047595,
0.0003878218830533074
],
[
0.00035026208236974156,
0.001017194110775444,
0.000384764361044439
]
],
"ConstantFrames": [
-41210,
-41200,
-41000,
-41001
],
"ConstantRotation": [
-0.9999999844629888,
1.027590578527487e-06,
0.00017627525841189352,
1.2246232944813223e-16,
-0.9999830090976747,
0.00582936668603668,
0.0001762782535384808,
0.0058293665954657434,
0.9999829935609271
]
},
"BodyRotation": {
"TimeDependentFrames": [
10014,
1
],
"CkTableStartTime": 255744599.02748,
"CkTableEndTime": 255744635.91477,
"CkTableOriginalSize": 3,
"EphemerisTimes": [
255744599.02748,
255744623.61901,
255744635.91477
],
"Quaternions": [
[
-0.6525755651363003,
-0.0231514239139282,
0.3174415084289179,
-0.6876336467074378
],
[
-0.6531746247480361,
-0.022874748805603497,
0.31746156550431237,
-0.6870646329712322
],
[
-0.6534739684048748,
-0.022736404778153148,
0.31747150360998055,
-0.68677993048033
]
],
"AngularVelocity": [
[
3.1623981615137114e-05,
-2.8803031775991542e-05,
5.6520727317788564e-05
],
[
3.162398161506756e-05,
-2.8803031776763114e-05,
5.652072731743428e-05
],
[
3.1623981615032794e-05,
-2.8803031777148914e-05,
5.6520727317257115e-05
]
]
},
"InstrumentPosition": {
"SpkTableStartTime": 255744599.02748,
"SpkTableEndTime": 255744635.91477,
"SpkTableOriginalSize": 3,
"EphemerisTimes": [
255744599.02748,
255744623.61901,
255744635.91477
],
"Positions": [
[
3508.767882205483,
-1180.0905787748716,
-404.6580659358628
],
[
3509.6584138014186,
-1143.4324359500313,
-502.6029463204848
],
[
3509.4431532823473,
-1124.886654875713,
-551.4851113671591
]
],
"Velocities": [
[
0.07204008324341263,
1.4787375673363454,
-3.987265079143158
],
[
0.0003930097221548436,
1.5024971608640412,
-3.9781429684078495
],
[
-0.03540185319234399,
1.5140837760694033,
-3.9728346761041364
]
]
},
"SunPosition": {
"SpkTableStartTime": 255744697.39357847,
"SpkTableEndTime": 255744697.39357847,
"SpkTableOriginalSize": 1,
"EphemerisTimes": [
255744697.39357847
],
"Positions": [
[
97397666.49661352,
-201380879.84291452,
-94392949.82617083
]
],
"Velocities": [
[
21.26085726371221,
7.17339564842172,
2.739589556465391
]
]
}
}
}}
@pytest.fixture()
def test_mex_src_kernels(scope="module", autouse=True):
kernels = get_image_kernels("H0010_0023_SR2")
updated_kernels = kernels
updated_kernels, binary_kernels = convert_kernels(kernels)
yield updated_kernels
for kern in binary_kernels:
os.remove(kern)
@pytest.fixture()
def test_mex_hrsc_kernels(scope="module", autouse=True):
kernels = get_image_kernels('h5270_0000_ir2')
updated_kernels, binary_kernels = convert_kernels(kernels)
yield updated_kernels
for kern in binary_kernels:
os.remove(kern)
def test_mex_src_load(test_mex_src_kernels):
label_file = get_image_label("H0010_0023_SR2", 'pds3')
compare_dict = get_isd("mexsrc")
isd_str = ale.loads(label_file, props={'kernels': test_mex_src_kernels}, verbose=True)
isd_obj = json.loads(isd_str)
print(json.dumps(isd_obj, indent=2))
assert compare_dicts(isd_obj, compare_dict) == []
# Eventually all label/formatter combinations should be tested. For now, isis3/usgscsm and
# pds3/isis will fail.
@pytest.mark.parametrize("label,formatter", [('isis3','isis'), ('pds3', 'usgscsm'),
pytest.param('isis3','usgscsm', marks=pytest.mark.xfail),
pytest.param('pds3','isis', marks=pytest.mark.xfail),])
def test_mex_load(test_mex_hrsc_kernels, formatter, usgscsm_compare_dict, label):
label_file = get_image_label('h5270_0000_ir2', label)
with patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times', \
new_callable=PropertyMock) as binary_ephemeris_times, \
patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations', \
new_callable=PropertyMock) as binary_exposure_durations, \
patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_lines', \
new_callable=PropertyMock) as binary_lines, \
patch('ale.drivers.mex_drivers.MexHrscIsisLabelNaifSpiceDriver.ephemeris_time', \
new_callable=PropertyMock) as ephemeris_time, \
patch('ale.drivers.mex_drivers.read_table_data', return_value=12345) as read_table_data, \
patch('ale.drivers.mex_drivers.parse_table', return_value={'EphemerisTime': [255744599.02748165, 255744684.33197814, 255744684.34504557], \
'ExposureTime': [0.012800790786743165, 0.012907449722290038, 0.013227428436279297], \
'LineStart': [1, 6665, 6666]}) as parse_table:
ephemeris_time.return_value = [255744599.02748, 255744623.61901, 255744635.91477]
binary_ephemeris_times.return_value = [255744599.02748165, 255744599.04028246, 255744795.73322123]
binary_exposure_durations.return_value = [0.012800790786743165, 0.012800790786743165, 0.013227428436279297]
binary_lines.return_value = [0.5, 1.5, 15086.5]
usgscsm_isd = ale.load(label_file, props={'kernels': test_mex_hrsc_kernels}, formatter=formatter)
assert compare_dicts(usgscsm_isd, usgscsm_compare_dict['h5270_0000_ir2'][formatter]) == []
# ========= Test mex pds3label and naifspice driver =========
class test_mex_pds3_naif(unittest.TestCase):
def setUp(self):
label = get_image_label("h5270_0000_ir2", "pds3")
self.driver = MexHrscPds3NaifSpiceDriver(label)
def test_short_mission_name(self):
assert self.driver.short_mission_name=='mex'
def test_odtk(self):
assert self.driver.odtk == [0.0, 0.0, 0.0]
def test_ikid(self):
with patch('ale.drivers.mex_drivers.spice.bods2c', return_value=12345) as bods2c:
assert self.driver.ikid == 12345
bods2c.assert_called_with('MEX_HRSC_HEAD')
def test_fikid(self):
with patch('ale.drivers.mex_drivers.spice.bods2c', return_value=12345) as bods2c:
assert self.driver.fikid == 12345
bods2c.assert_called_with('MEX_HRSC_IR')
def test_instrument_id(self):
assert self.driver.instrument_id == 'MEX_HRSC_IR'
def test_spacecraft_name(self):
assert self.driver.spacecraft_name =='MEX'
def test_focal_length(self):
with patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid', \
new_callable=PropertyMock) as fikid:
fikid.return_value = -41218
assert self.driver.focal_length == 174.82
def test_focal2pixel_lines(self):
with patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid', \
new_callable=PropertyMock) as fikid:
fikid.return_value = -41218
np.testing.assert_almost_equal(self.driver.focal2pixel_lines,
[-7113.11359717265, 0.062856784318668, 142.857129028729])
def test_focal2pixel_samples(self):
with patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid', \
new_callable=PropertyMock) as fikid:
fikid.return_value = -41218
np.testing.assert_almost_equal(self.driver.focal2pixel_samples,
[-0.778052433438109, -142.857129028729, 0.062856784318668])
def test_pixel2focal_x(self):
with patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid', \
new_callable=PropertyMock) as fikid:
fikid.return_value = -41218
np.testing.assert_almost_equal(self.driver.pixel2focal_x,
[0.016461898406507, -0.006999999322408, 3.079982431615e-06])
def test_pixel2focal_y(self):
with patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid', \
new_callable=PropertyMock) as fikid:
fikid.return_value = -41218
np.testing.assert_almost_equal(self.driver.pixel2focal_y,
[49.7917927568053, 3.079982431615e-06, 0.006999999322408])
def test_detector_start_line(self):
assert self.driver.detector_start_line == 0.0
def test_detector_center_line(self):
assert self.driver.detector_center_line == 0.0
def test_detector_center_sample(self):
assert self.driver.detector_center_sample == 2592.0
def test_center_ephemeris_time(self):
with patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times', \
new_callable=PropertyMock) as binary_ephemeris_times, \
patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations', \
new_callable=PropertyMock) as binary_exposure_durations, \
patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.ephemeris_start_time',
new_callable=PropertyMock) as ephemeris_start_time:
binary_ephemeris_times.return_value = [255744795.73322123]
binary_exposure_durations.return_value = [0.013227428436279297]
ephemeris_start_time.return_value = 255744592.07217148
assert self.driver.center_ephemeris_time == 255744693.90931007
def test_ephemeris_stop_time(self):
with patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times', \
new_callable=PropertyMock) as binary_ephemeris_times, \
patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations', \
new_callable=PropertyMock) as binary_exposure_durations :
binary_ephemeris_times.return_value = [255744795.73322123]
binary_exposure_durations.return_value = [0.013227428436279297]
assert self.driver.ephemeris_stop_time == 255744795.74644867
def test_line_scan_rate(self):
with patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times', \
new_callable=PropertyMock) as binary_ephemeris_times, \
patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations', \
new_callable=PropertyMock) as binary_exposure_durations, \
patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_lines', \
new_callable=PropertyMock) as binary_lines, \
patch('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.ephemeris_start_time',
new_callable=PropertyMock) as ephemeris_start_time:
binary_ephemeris_times.return_value = [0, 1, 2, 3, 5, 7, 9]
binary_exposure_durations.return_value = [1, 1, 1, 2, 2, 2, 2]
binary_lines.return_value = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5]
ephemeris_start_time.return_value = 0
assert self.driver.line_scan_rate == ([0.5, 3.5],
[-5.5, -2.5],
[1, 2])
def test_sensor_model_version(self):
assert self.driver.sensor_model_version == 1
# ========= Test mex isis3label and naifspice driver =========
class test_mex_isis3_naif(unittest.TestCase):
def setUp(self):
label = get_image_label("h5270_0000_ir2", "isis3")
self.driver = MexHrscIsisLabelNaifSpiceDriver(label)
def test_instrument_id(self):
assert self.driver.instrument_id == 'MEX_HRSC_IR'
def test_ikid(self):
with patch('ale.drivers.mex_drivers.spice.bods2c', return_value=12345) as bods2c:
assert self.driver.ikid == 12345
bods2c.assert_called_with('MEX_HRSC_HEAD')
def test_fikid(self):
with patch('ale.drivers.mex_drivers.spice.bods2c', return_value=12345) as bods2c:
assert self.driver.fikid == 12345
bods2c.assert_called_with('MEX_HRSC_IR')
def test_ephemeris_start_time(self):
with patch('ale.drivers.mex_drivers.read_table_data', return_value=12345) as read_table_data, \
patch('ale.drivers.mex_drivers.parse_table', return_value={'EphemerisTime': [255744599.02748165, 255744684.33197814, 255744684.34504557], \
'ExposureTime': [0.012800790786743165, 0.012907449722290038, 0.013227428436279297], \
'LineStart': [1, 6665, 6666]}) as parse_table:
assert self.driver.ephemeris_start_time == 255744599.02748165
def test_line_scan_rate(self):
with patch('ale.drivers.mex_drivers.read_table_data', return_value=12345) as read_table_data, \
patch('ale.drivers.mex_drivers.parse_table', return_value={'EphemerisTime': [255744599.02748165, 255744684.33197814, 255744684.34504557], \
'ExposureTime': [0.012800790786743165, 0.012907449722290038, 0.013227428436279297], \
'LineStart': [1, 6665, 6666]}) as parse_table:
assert self.driver.line_scan_rate == ([1, 6665, 6666], [255744599.02748165, 255744684.33197814, 255744684.34504557], [0.012800790786743165, 0.012907449722290038, 0.013227428436279297])
def test_ephemeris_stop_time(self):
with patch('ale.drivers.mex_drivers.read_table_data', return_value=12345) as read_table_data, \
patch('ale.drivers.mex_drivers.parse_table', return_value={'EphemerisTime': [255744599.02748165, 255744684.33197814, 255744684.34504557], \
'ExposureTime': [0.012800790786743165, 0.012907449722290038, 0.013227428436279297], \
'LineStart': [1, 6665, 6666]}) as parse_table:
assert self.driver.ephemeris_stop_time == 255744684.34504557 + ((15088 - 6666 + 1) * 0.013227428436279297)
def test_ephemeris_center_time(self):
with patch('ale.drivers.mex_drivers.read_table_data', return_value=12345) as read_table_data, \
patch('ale.drivers.mex_drivers.parse_table', return_value={'EphemerisTime': [255744599.02748165, 255744684.33197814, 255744684.34504557], \
'ExposureTime': [0.012800790786743165, 0.012907449722290038, 0.013227428436279297], \
'LineStart': [1, 6665, 6666]}) as parse_table:
assert self.driver.center_ephemeris_time == (255744599.02748165 + 255744684.34504557 + ((15088 - 6666 + 1) * 0.013227428436279297)) / 2
def test_sensor_model_version(self):
assert self.driver.sensor_model_version == 1
# ========= Test mex - SRC - pds3label and naifspice driver =========
class test_mex_src_pds3_naif(unittest.TestCase):
def setUp(self):
label = get_image_label("H0010_0023_SR2", "pds3")
self.driver = MexSrcPds3NaifSpiceDriver(label)
def test_short_mission_name(self):
assert self.driver.short_mission_name=='mex'
def test_odtk(self):
assert self.driver.odtk == [0.0, 0.0, 0.0]
def test_ikid(self):
with patch('ale.drivers.mex_drivers.spice.bods2c', return_value=12345) as bods2c:
assert self.driver.ikid == 12345
bods2c.assert_called_with('MEX_HRSC_SRC')
def test_instrument_id(self):
assert self.driver.instrument_id == 'MEX_HRSC_SRC'
def test_spacecraft_name(self):
assert self.driver.spacecraft_name =='MEX'
def test_focal_length(self):
with patch('ale.drivers.mex_drivers.spice.gdpool', return_value=[10.0]) as gdpool, \
patch('ale.drivers.mex_drivers.spice.bods2c', return_value=-12345) as bods2c:
assert self.driver.ikid == -12345
bods2c.assert_called_with('MEX_HRSC_SRC')
assert self.driver.focal_length == 10.0
def test_focal2pixel_lines(self):
np.testing.assert_almost_equal(self.driver.focal2pixel_lines,
[0.0, 0.0, 111.1111111])
def test_focal2pixel_samples(self):
np.testing.assert_almost_equal(self.driver.focal2pixel_samples,
[0.0, 111.1111111, 0.0])
def test_detector_center_line(self):
assert self.driver.detector_center_line == 512.0
def test_detector_center_sample(self):
assert self.driver.detector_center_sample == 512.0
def test_sensor_model_version(self):
assert self.driver.sensor_model_version == 1
| [
"os.remove",
"ale.load",
"json.loads",
"conftest.compare_dicts",
"ale.drivers.mex_drivers.MexHrscIsisLabelNaifSpiceDriver",
"ale.drivers.mex_drivers.MexSrcPds3NaifSpiceDriver",
"numpy.testing.assert_almost_equal",
"conftest.get_image_kernels",
"pytest.fixture",
"json.dumps",
"conftest.get_image_... | [((376, 392), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (390, 392), False, 'import pytest\n'), ((13061, 13077), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (13075, 13077), False, 'import pytest\n'), ((13361, 13377), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (13375, 13377), False, 'import pytest\n'), ((13148, 13183), 'conftest.get_image_kernels', 'get_image_kernels', (['"""H0010_0023_SR2"""'], {}), "('H0010_0023_SR2')\n", (13165, 13183), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((13252, 13276), 'conftest.convert_kernels', 'convert_kernels', (['kernels'], {}), '(kernels)\n', (13267, 13276), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((13449, 13484), 'conftest.get_image_kernels', 'get_image_kernels', (['"""h5270_0000_ir2"""'], {}), "('h5270_0000_ir2')\n", (13466, 13484), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((13523, 13547), 'conftest.convert_kernels', 'convert_kernels', (['kernels'], {}), '(kernels)\n', (13538, 13547), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((13693, 13734), 'conftest.get_image_label', 'get_image_label', (['"""H0010_0023_SR2"""', '"""pds3"""'], {}), "('H0010_0023_SR2', 'pds3')\n", (13708, 13734), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((13754, 13771), 'conftest.get_isd', 'get_isd', (['"""mexsrc"""'], {}), "('mexsrc')\n", (13761, 13771), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((13786, 13862), 'ale.loads', 'ale.loads', (['label_file'], {'props': "{'kernels': test_mex_src_kernels}", 'verbose': '(True)'}), "(label_file, props={'kernels': test_mex_src_kernels}, verbose=True)\n", (13795, 13862), False, 'import ale\n'), ((13877, 13896), 'json.loads', 'json.loads', (['isd_str'], {}), '(isd_str)\n', (13887, 13896), False, 'import json\n'), ((14497, 14537), 'conftest.get_image_label', 'get_image_label', (['"""h5270_0000_ir2"""', 'label'], {}), "('h5270_0000_ir2', label)\n", (14512, 14537), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((13343, 13358), 'os.remove', 'os.remove', (['kern'], {}), '(kern)\n', (13352, 13358), False, 'import os\n'), ((13614, 13629), 'os.remove', 'os.remove', (['kern'], {}), '(kern)\n', (13623, 13629), False, 'import os\n'), ((13907, 13936), 'json.dumps', 'json.dumps', (['isd_obj'], {'indent': '(2)'}), '(isd_obj, indent=2)\n', (13917, 13936), False, 'import json\n'), ((13949, 13985), 'conftest.compare_dicts', 'compare_dicts', (['isd_obj', 'compare_dict'], {}), '(isd_obj, compare_dict)\n', (13962, 13985), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((14548, 14667), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times"""'], {'new_callable': 'PropertyMock'}), "(\n 'ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times'\n , new_callable=PropertyMock)\n", (14553, 14667), False, 'from unittest.mock import patch, PropertyMock\n'), ((14712, 14834), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations"""'], {'new_callable': 'PropertyMock'}), "(\n 'ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations'\n , new_callable=PropertyMock)\n", (14717, 14834), False, 'from unittest.mock import patch, PropertyMock\n'), ((14882, 14985), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_lines"""'], {'new_callable': 'PropertyMock'}), "('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_lines',\n new_callable=PropertyMock)\n", (14887, 14985), False, 'from unittest.mock import patch, PropertyMock\n'), ((15026, 15136), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscIsisLabelNaifSpiceDriver.ephemeris_time"""'], {'new_callable': 'PropertyMock'}), "('ale.drivers.mex_drivers.MexHrscIsisLabelNaifSpiceDriver.ephemeris_time',\n new_callable=PropertyMock)\n", (15031, 15136), False, 'from unittest.mock import patch, PropertyMock\n'), ((15179, 15247), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.read_table_data"""'], {'return_value': '(12345)'}), "('ale.drivers.mex_drivers.read_table_data', return_value=12345)\n", (15184, 15247), False, 'from unittest.mock import patch, PropertyMock\n'), ((15278, 15543), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.parse_table"""'], {'return_value': "{'EphemerisTime': [255744599.02748165, 255744684.33197814, \n 255744684.34504557], 'ExposureTime': [0.012800790786743165, \n 0.012907449722290038, 0.013227428436279297], 'LineStart': [1, 6665, 6666]}"}), "('ale.drivers.mex_drivers.parse_table', return_value={'EphemerisTime':\n [255744599.02748165, 255744684.33197814, 255744684.34504557],\n 'ExposureTime': [0.012800790786743165, 0.012907449722290038, \n 0.013227428436279297], 'LineStart': [1, 6665, 6666]})\n", (15283, 15543), False, 'from unittest.mock import patch, PropertyMock\n'), ((16078, 16166), 'ale.load', 'ale.load', (['label_file'], {'props': "{'kernels': test_mex_hrsc_kernels}", 'formatter': 'formatter'}), "(label_file, props={'kernels': test_mex_hrsc_kernels}, formatter=\n formatter)\n", (16086, 16166), False, 'import ale\n'), ((14238, 14295), 'pytest.param', 'pytest.param', (['"""isis3"""', '"""usgscsm"""'], {'marks': 'pytest.mark.xfail'}), "('isis3', 'usgscsm', marks=pytest.mark.xfail)\n", (14250, 14295), False, 'import pytest\n'), ((14342, 14395), 'pytest.param', 'pytest.param', (['"""pds3"""', '"""isis"""'], {'marks': 'pytest.mark.xfail'}), "('pds3', 'isis', marks=pytest.mark.xfail)\n", (14354, 14395), False, 'import pytest\n'), ((16407, 16448), 'conftest.get_image_label', 'get_image_label', (['"""h5270_0000_ir2"""', '"""pds3"""'], {}), "('h5270_0000_ir2', 'pds3')\n", (16422, 16448), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((16472, 16505), 'ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver', 'MexHrscPds3NaifSpiceDriver', (['label'], {}), '(label)\n', (16498, 16505), False, 'from ale.drivers.mex_drivers import MexHrscPds3NaifSpiceDriver, MexHrscIsisLabelNaifSpiceDriver, MexSrcPds3NaifSpiceDriver\n'), ((22272, 22314), 'conftest.get_image_label', 'get_image_label', (['"""h5270_0000_ir2"""', '"""isis3"""'], {}), "('h5270_0000_ir2', 'isis3')\n", (22287, 22314), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((22338, 22376), 'ale.drivers.mex_drivers.MexHrscIsisLabelNaifSpiceDriver', 'MexHrscIsisLabelNaifSpiceDriver', (['label'], {}), '(label)\n', (22369, 22376), False, 'from ale.drivers.mex_drivers import MexHrscPds3NaifSpiceDriver, MexHrscIsisLabelNaifSpiceDriver, MexSrcPds3NaifSpiceDriver\n'), ((25959, 26000), 'conftest.get_image_label', 'get_image_label', (['"""H0010_0023_SR2"""', '"""pds3"""'], {}), "('H0010_0023_SR2', 'pds3')\n", (25974, 26000), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((26024, 26056), 'ale.drivers.mex_drivers.MexSrcPds3NaifSpiceDriver', 'MexSrcPds3NaifSpiceDriver', (['label'], {}), '(label)\n', (26049, 26056), False, 'from ale.drivers.mex_drivers import MexHrscPds3NaifSpiceDriver, MexHrscIsisLabelNaifSpiceDriver, MexSrcPds3NaifSpiceDriver\n'), ((27037, 27128), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.driver.focal2pixel_lines', '[0.0, 0.0, 111.1111111]'], {}), '(self.driver.focal2pixel_lines, [0.0, 0.0, \n 111.1111111])\n', (27067, 27128), True, 'import numpy as np\n'), ((27216, 27309), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.driver.focal2pixel_samples', '[0.0, 111.1111111, 0.0]'], {}), '(self.driver.focal2pixel_samples, [0.0, \n 111.1111111, 0.0])\n', (27246, 27309), True, 'import numpy as np\n'), ((16177, 16254), 'conftest.compare_dicts', 'compare_dicts', (['usgscsm_isd', "usgscsm_compare_dict['h5270_0000_ir2'][formatter]"], {}), "(usgscsm_isd, usgscsm_compare_dict['h5270_0000_ir2'][formatter])\n", (16190, 16254), False, 'from conftest import get_image_label, get_image_kernels, convert_kernels, get_isd, compare_dicts\n'), ((16715, 16780), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.spice.bods2c"""'], {'return_value': '(12345)'}), "('ale.drivers.mex_drivers.spice.bods2c', return_value=12345)\n", (16720, 16780), False, 'from unittest.mock import patch, PropertyMock\n'), ((16932, 16997), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.spice.bods2c"""'], {'return_value': '(12345)'}), "('ale.drivers.mex_drivers.spice.bods2c', return_value=12345)\n", (16937, 16997), False, 'from unittest.mock import patch, PropertyMock\n'), ((17336, 17432), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid"""'], {'new_callable': 'PropertyMock'}), "('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid',\n new_callable=PropertyMock)\n", (17341, 17432), False, 'from unittest.mock import patch, PropertyMock\n'), ((17606, 17702), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid"""'], {'new_callable': 'PropertyMock'}), "('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid',\n new_callable=PropertyMock)\n", (17611, 17702), False, 'from unittest.mock import patch, PropertyMock\n'), ((17782, 17906), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.driver.focal2pixel_lines', '[-7113.11359717265, 0.062856784318668, 142.857129028729]'], {}), '(self.driver.focal2pixel_lines, [-\n 7113.11359717265, 0.062856784318668, 142.857129028729])\n', (17812, 17906), True, 'import numpy as np\n'), ((17999, 18095), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid"""'], {'new_callable': 'PropertyMock'}), "('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid',\n new_callable=PropertyMock)\n", (18004, 18095), False, 'from unittest.mock import patch, PropertyMock\n'), ((18175, 18303), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.driver.focal2pixel_samples', '[-0.778052433438109, -142.857129028729, 0.062856784318668]'], {}), '(self.driver.focal2pixel_samples, [-\n 0.778052433438109, -142.857129028729, 0.062856784318668])\n', (18205, 18303), True, 'import numpy as np\n'), ((18390, 18486), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid"""'], {'new_callable': 'PropertyMock'}), "('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid',\n new_callable=PropertyMock)\n", (18395, 18486), False, 'from unittest.mock import patch, PropertyMock\n'), ((18566, 18689), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.driver.pixel2focal_x', '[0.016461898406507, -0.006999999322408, 3.079982431615e-06]'], {}), '(self.driver.pixel2focal_x, [\n 0.016461898406507, -0.006999999322408, 3.079982431615e-06])\n', (18596, 18689), True, 'import numpy as np\n'), ((18776, 18872), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid"""'], {'new_callable': 'PropertyMock'}), "('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.fikid',\n new_callable=PropertyMock)\n", (18781, 18872), False, 'from unittest.mock import patch, PropertyMock\n'), ((18952, 19072), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.driver.pixel2focal_y', '[49.7917927568053, 3.079982431615e-06, 0.006999999322408]'], {}), '(self.driver.pixel2focal_y, [49.7917927568053,\n 3.079982431615e-06, 0.006999999322408])\n', (18982, 19072), True, 'import numpy as np\n'), ((19464, 19583), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times"""'], {'new_callable': 'PropertyMock'}), "(\n 'ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times'\n , new_callable=PropertyMock)\n", (19469, 19583), False, 'from unittest.mock import patch, PropertyMock\n'), ((19636, 19758), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations"""'], {'new_callable': 'PropertyMock'}), "(\n 'ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations'\n , new_callable=PropertyMock)\n", (19641, 19758), False, 'from unittest.mock import patch, PropertyMock\n'), ((19814, 19926), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.ephemeris_start_time"""'], {'new_callable': 'PropertyMock'}), "('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.ephemeris_start_time'\n , new_callable=PropertyMock)\n", (19819, 19926), False, 'from unittest.mock import patch, PropertyMock\n'), ((20309, 20428), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times"""'], {'new_callable': 'PropertyMock'}), "(\n 'ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times'\n , new_callable=PropertyMock)\n", (20314, 20428), False, 'from unittest.mock import patch, PropertyMock\n'), ((20481, 20603), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations"""'], {'new_callable': 'PropertyMock'}), "(\n 'ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations'\n , new_callable=PropertyMock)\n", (20486, 20603), False, 'from unittest.mock import patch, PropertyMock\n'), ((20915, 21034), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times"""'], {'new_callable': 'PropertyMock'}), "(\n 'ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_ephemeris_times'\n , new_callable=PropertyMock)\n", (20920, 21034), False, 'from unittest.mock import patch, PropertyMock\n'), ((21087, 21209), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations"""'], {'new_callable': 'PropertyMock'}), "(\n 'ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_exposure_durations'\n , new_callable=PropertyMock)\n", (21092, 21209), False, 'from unittest.mock import patch, PropertyMock\n'), ((21265, 21368), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_lines"""'], {'new_callable': 'PropertyMock'}), "('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.binary_lines',\n new_callable=PropertyMock)\n", (21270, 21368), False, 'from unittest.mock import patch, PropertyMock\n'), ((21417, 21529), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.ephemeris_start_time"""'], {'new_callable': 'PropertyMock'}), "('ale.drivers.mex_drivers.MexHrscPds3NaifSpiceDriver.ephemeris_start_time'\n , new_callable=PropertyMock)\n", (21422, 21529), False, 'from unittest.mock import patch, PropertyMock\n'), ((22509, 22574), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.spice.bods2c"""'], {'return_value': '(12345)'}), "('ale.drivers.mex_drivers.spice.bods2c', return_value=12345)\n", (22514, 22574), False, 'from unittest.mock import patch, PropertyMock\n'), ((22726, 22791), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.spice.bods2c"""'], {'return_value': '(12345)'}), "('ale.drivers.mex_drivers.spice.bods2c', return_value=12345)\n", (22731, 22791), False, 'from unittest.mock import patch, PropertyMock\n'), ((22957, 23025), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.read_table_data"""'], {'return_value': '(12345)'}), "('ale.drivers.mex_drivers.read_table_data', return_value=12345)\n", (22962, 23025), False, 'from unittest.mock import patch, PropertyMock\n'), ((23061, 23326), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.parse_table"""'], {'return_value': "{'EphemerisTime': [255744599.02748165, 255744684.33197814, \n 255744684.34504557], 'ExposureTime': [0.012800790786743165, \n 0.012907449722290038, 0.013227428436279297], 'LineStart': [1, 6665, 6666]}"}), "('ale.drivers.mex_drivers.parse_table', return_value={'EphemerisTime':\n [255744599.02748165, 255744684.33197814, 255744684.34504557],\n 'ExposureTime': [0.012800790786743165, 0.012907449722290038, \n 0.013227428436279297], 'LineStart': [1, 6665, 6666]})\n", (23066, 23326), False, 'from unittest.mock import patch, PropertyMock\n'), ((23593, 23661), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.read_table_data"""'], {'return_value': '(12345)'}), "('ale.drivers.mex_drivers.read_table_data', return_value=12345)\n", (23598, 23661), False, 'from unittest.mock import patch, PropertyMock\n'), ((23697, 23962), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.parse_table"""'], {'return_value': "{'EphemerisTime': [255744599.02748165, 255744684.33197814, \n 255744684.34504557], 'ExposureTime': [0.012800790786743165, \n 0.012907449722290038, 0.013227428436279297], 'LineStart': [1, 6665, 6666]}"}), "('ale.drivers.mex_drivers.parse_table', return_value={'EphemerisTime':\n [255744599.02748165, 255744684.33197814, 255744684.34504557],\n 'ExposureTime': [0.012800790786743165, 0.012907449722290038, \n 0.013227428436279297], 'LineStart': [1, 6665, 6666]})\n", (23702, 23962), False, 'from unittest.mock import patch, PropertyMock\n'), ((24357, 24425), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.read_table_data"""'], {'return_value': '(12345)'}), "('ale.drivers.mex_drivers.read_table_data', return_value=12345)\n", (24362, 24425), False, 'from unittest.mock import patch, PropertyMock\n'), ((24461, 24726), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.parse_table"""'], {'return_value': "{'EphemerisTime': [255744599.02748165, 255744684.33197814, \n 255744684.34504557], 'ExposureTime': [0.012800790786743165, \n 0.012907449722290038, 0.013227428436279297], 'LineStart': [1, 6665, 6666]}"}), "('ale.drivers.mex_drivers.parse_table', return_value={'EphemerisTime':\n [255744599.02748165, 255744684.33197814, 255744684.34504557],\n 'ExposureTime': [0.012800790786743165, 0.012907449722290038, \n 0.013227428436279297], 'LineStart': [1, 6665, 6666]})\n", (24466, 24726), False, 'from unittest.mock import patch, PropertyMock\n'), ((25045, 25113), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.read_table_data"""'], {'return_value': '(12345)'}), "('ale.drivers.mex_drivers.read_table_data', return_value=12345)\n", (25050, 25113), False, 'from unittest.mock import patch, PropertyMock\n'), ((25149, 25414), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.parse_table"""'], {'return_value': "{'EphemerisTime': [255744599.02748165, 255744684.33197814, \n 255744684.34504557], 'ExposureTime': [0.012800790786743165, \n 0.012907449722290038, 0.013227428436279297], 'LineStart': [1, 6665, 6666]}"}), "('ale.drivers.mex_drivers.parse_table', return_value={'EphemerisTime':\n [255744599.02748165, 255744684.33197814, 255744684.34504557],\n 'ExposureTime': [0.012800790786743165, 0.012907449722290038, \n 0.013227428436279297], 'LineStart': [1, 6665, 6666]})\n", (25154, 25414), False, 'from unittest.mock import patch, PropertyMock\n'), ((26266, 26331), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.spice.bods2c"""'], {'return_value': '(12345)'}), "('ale.drivers.mex_drivers.spice.bods2c', return_value=12345)\n", (26271, 26331), False, 'from unittest.mock import patch, PropertyMock\n'), ((26671, 26737), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.spice.gdpool"""'], {'return_value': '[10.0]'}), "('ale.drivers.mex_drivers.spice.gdpool', return_value=[10.0])\n", (26676, 26737), False, 'from unittest.mock import patch, PropertyMock\n'), ((26759, 26825), 'unittest.mock.patch', 'patch', (['"""ale.drivers.mex_drivers.spice.bods2c"""'], {'return_value': '(-12345)'}), "('ale.drivers.mex_drivers.spice.bods2c', return_value=-12345)\n", (26764, 26825), False, 'from unittest.mock import patch, PropertyMock\n')] |
#! /usr/bin/env python
# by weil
# Sep 24, 2020
# save as anndata
import pandas as pd
import numpy as np
import scipy
import os
import scanpy as sc
from anndata import AnnData
# expr matrix
expr_mat=pd.read_csv("../download/MacParland/GSE115469_Data.csv.gz", index_col=0)
# reshape to cell * gene
expr_mat=expr_mat.T
# cell meta
meta_df=pd.read_csv("../download/MacParland/Cell_clusterID_cycle.txt", sep="\t", index_col=0)
meta_df.columns=["donor", "barcode", "cluster", "cell_cycle"]
meta_df=meta_df.drop(columns="barcode")
meta_df["donor"]=meta_df["donor"].str.slice(0, 2)
# add donor meta
donor_meta=pd.read_csv("../download/MacParland/donor_annotation.csv")
meta_df["cell_id"]=meta_df.index
meta_df1=meta_df.merge(donor_meta, on="donor")
# add cluster annotation
cluster_annotation=pd.read_csv("../download/MacParland/cluster_annotation.csv")
meta_df2=meta_df1.merge(cluster_annotation, on="cluster")
meta_df2.index = meta_df2["cell_id"]
meta_df2=meta_df2.reindex(meta_df.index)
meta_df2=meta_df2.drop(columns="cell_id")
# datasets meta
datasets_meta=pd.read_csv("../ACA_datasets.csv", header=0, index_col=False)
# cell ontology
cell_ontology = pd.read_csv("../cell_ontology/liver_cell_ontology.csv",
usecols=["cell_type1", "cell_ontology_class", "cell_ontology_id"])
# gene_meta
gene_meta=pd.DataFrame(index=expr_mat.columns)
output_dir="../data/MacParland_anndata"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# add dataset_meta
dataset_name="MacParland"
meta_df2["organism"]=datasets_meta.loc[datasets_meta["dataset_name"]==dataset_name, "organism"].item()
meta_df2["dataset_name"]=datasets_meta.loc[datasets_meta["dataset_name"]==dataset_name, "dataset_name"].item()
meta_df2["platform"]=datasets_meta.loc[datasets_meta["dataset_name"]==dataset_name, "platform"].item()
meta_df2["organ"]=datasets_meta.loc[datasets_meta["dataset_name"]==dataset_name, "organ"].item()
# add CL
meta_df2["cell_id"] = meta_df2.index
meta_df3 = meta_df2.merge(cell_ontology, left_on="cell_type1", right_on="cell_type1")
meta_df3.index = meta_df3["cell_id"]
meta_df3=meta_df3.reindex(meta_df2["cell_id"])
meta_df2=meta_df3.drop(columns="cell_id")
# AnnData
if isinstance(expr_mat, pd.DataFrame):
adata=AnnData(X=expr_mat.values, obs=meta_df2, var=gene_meta)
else:
adata=AnnData(X=expr_mat, obs=meta_df2, var=gene_meta)
adata.raw = adata
sc.pp.normalize_total(adata, target_sum=1e4)
sc.pp.log1p(adata)
print("Selecting scanpy genes...")
sc.pp.highly_variable_genes(adata, min_mean=0.05, max_mean=3, min_disp=0.8, inplace=True)
print(np.sum(adata.var["highly_variable"]), "scanpy genes")
sc.pl.highly_variable_genes(adata, save=".pdf")
import shutil
shutil.move("./figures/filter_genes_dispersion.pdf", os.path.join(output_dir, "scanpy_genes.pdf"))
adata.X = adata.raw.X
adata.raw = None
print("Saving results...")
adata.write(os.path.join(output_dir, "data.h5ad"), compression="gzip", compression_opts=1)
| [
"pandas.DataFrame",
"numpy.sum",
"scanpy.pp.highly_variable_genes",
"os.makedirs",
"os.path.join",
"pandas.read_csv",
"os.path.exists",
"scanpy.pl.highly_variable_genes",
"scanpy.pp.log1p",
"anndata.AnnData",
"scanpy.pp.normalize_total"
] | [((202, 274), 'pandas.read_csv', 'pd.read_csv', (['"""../download/MacParland/GSE115469_Data.csv.gz"""'], {'index_col': '(0)'}), "('../download/MacParland/GSE115469_Data.csv.gz', index_col=0)\n", (213, 274), True, 'import pandas as pd\n'), ((342, 431), 'pandas.read_csv', 'pd.read_csv', (['"""../download/MacParland/Cell_clusterID_cycle.txt"""'], {'sep': '"""\t"""', 'index_col': '(0)'}), "('../download/MacParland/Cell_clusterID_cycle.txt', sep='\\t',\n index_col=0)\n", (353, 431), True, 'import pandas as pd\n'), ((609, 667), 'pandas.read_csv', 'pd.read_csv', (['"""../download/MacParland/donor_annotation.csv"""'], {}), "('../download/MacParland/donor_annotation.csv')\n", (620, 667), True, 'import pandas as pd\n'), ((793, 853), 'pandas.read_csv', 'pd.read_csv', (['"""../download/MacParland/cluster_annotation.csv"""'], {}), "('../download/MacParland/cluster_annotation.csv')\n", (804, 853), True, 'import pandas as pd\n'), ((1063, 1124), 'pandas.read_csv', 'pd.read_csv', (['"""../ACA_datasets.csv"""'], {'header': '(0)', 'index_col': '(False)'}), "('../ACA_datasets.csv', header=0, index_col=False)\n", (1074, 1124), True, 'import pandas as pd\n'), ((1157, 1284), 'pandas.read_csv', 'pd.read_csv', (['"""../cell_ontology/liver_cell_ontology.csv"""'], {'usecols': "['cell_type1', 'cell_ontology_class', 'cell_ontology_id']"}), "('../cell_ontology/liver_cell_ontology.csv', usecols=[\n 'cell_type1', 'cell_ontology_class', 'cell_ontology_id'])\n", (1168, 1284), True, 'import pandas as pd\n'), ((1331, 1367), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'expr_mat.columns'}), '(index=expr_mat.columns)\n', (1343, 1367), True, 'import pandas as pd\n'), ((2400, 2448), 'scanpy.pp.normalize_total', 'sc.pp.normalize_total', (['adata'], {'target_sum': '(10000.0)'}), '(adata, target_sum=10000.0)\n', (2421, 2448), True, 'import scanpy as sc\n'), ((2445, 2463), 'scanpy.pp.log1p', 'sc.pp.log1p', (['adata'], {}), '(adata)\n', (2456, 2463), True, 'import scanpy as sc\n'), ((2499, 2592), 'scanpy.pp.highly_variable_genes', 'sc.pp.highly_variable_genes', (['adata'], {'min_mean': '(0.05)', 'max_mean': '(3)', 'min_disp': '(0.8)', 'inplace': '(True)'}), '(adata, min_mean=0.05, max_mean=3, min_disp=0.8,\n inplace=True)\n', (2526, 2592), True, 'import scanpy as sc\n'), ((2649, 2696), 'scanpy.pl.highly_variable_genes', 'sc.pl.highly_variable_genes', (['adata'], {'save': '""".pdf"""'}), "(adata, save='.pdf')\n", (2676, 2696), True, 'import scanpy as sc\n'), ((1416, 1442), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1430, 1442), False, 'import os\n'), ((1448, 1471), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1459, 1471), False, 'import os\n'), ((2261, 2316), 'anndata.AnnData', 'AnnData', ([], {'X': 'expr_mat.values', 'obs': 'meta_df2', 'var': 'gene_meta'}), '(X=expr_mat.values, obs=meta_df2, var=gene_meta)\n', (2268, 2316), False, 'from anndata import AnnData\n'), ((2333, 2381), 'anndata.AnnData', 'AnnData', ([], {'X': 'expr_mat', 'obs': 'meta_df2', 'var': 'gene_meta'}), '(X=expr_mat, obs=meta_df2, var=gene_meta)\n', (2340, 2381), False, 'from anndata import AnnData\n'), ((2595, 2631), 'numpy.sum', 'np.sum', (["adata.var['highly_variable']"], {}), "(adata.var['highly_variable'])\n", (2601, 2631), True, 'import numpy as np\n'), ((2764, 2808), 'os.path.join', 'os.path.join', (['output_dir', '"""scanpy_genes.pdf"""'], {}), "(output_dir, 'scanpy_genes.pdf')\n", (2776, 2808), False, 'import os\n'), ((2890, 2927), 'os.path.join', 'os.path.join', (['output_dir', '"""data.h5ad"""'], {}), "(output_dir, 'data.h5ad')\n", (2902, 2927), False, 'import os\n')] |
from numpy.random import RandomState
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from random import Random
import time
seed = int(time.time())
py_rng = Random(seed)
np_rng = RandomState(seed)
t_rng = RandomStreams(seed)
def set_seed(n):
global seed, py_rng, np_rng, t_rng
seed = n
py_rng = Random(seed)
np_rng = RandomState(seed)
t_rng = RandomStreams(seed)
| [
"random.Random",
"theano.sandbox.rng_mrg.MRG_RandomStreams",
"numpy.random.RandomState",
"time.time"
] | [((180, 192), 'random.Random', 'Random', (['seed'], {}), '(seed)\n', (186, 192), False, 'from random import Random\n'), ((202, 219), 'numpy.random.RandomState', 'RandomState', (['seed'], {}), '(seed)\n', (213, 219), False, 'from numpy.random import RandomState\n'), ((228, 247), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', (['seed'], {}), '(seed)\n', (241, 247), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n'), ((157, 168), 'time.time', 'time.time', ([], {}), '()\n', (166, 168), False, 'import time\n'), ((336, 348), 'random.Random', 'Random', (['seed'], {}), '(seed)\n', (342, 348), False, 'from random import Random\n'), ((362, 379), 'numpy.random.RandomState', 'RandomState', (['seed'], {}), '(seed)\n', (373, 379), False, 'from numpy.random import RandomState\n'), ((392, 411), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', (['seed'], {}), '(seed)\n', (405, 411), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n')] |
import tvm
import numpy as np
import torch
N = 2
nC = 16
H = 14
W = 14
K = 16
R = 3
S = 3
padding = 1
P = H + 2 * padding
Q = W + 2 * padding
dtype = "float32"
A = tvm.te.placeholder([N, nC, H, W], dtype=dtype, name="A")
C = tvm.te.compute([N, K, P, Q],
lambda n, k, h, w :
tvm.tir.if_then_else(
tvm.tir.all(h >= padding, h < P-padding, w >= padding, w < Q-padding),
A[n, k, h-padding, w-padding], 0.0),
name="C")
dC = tvm.te.placeholder([N, K, P, Q], dtype=dtype, name="dC")
print(C.op.body[0].name)
print(type(C.op.body[0].args[1]))
dA = tvm.te.grad_op(A, C, dC)
s = tvm.te.create_schedule(dA.op)
print(tvm.lower(s, [A, dC, dA], simple_mode=True))
func = tvm.build(s, [A, dC, dA], target="llvm")
A_np = np.random.uniform(-10, 10, [N, nC, H, W]).astype("float32")
dC_np = np.random.uniform(-10, 10, [N, K, P, Q]).astype("float32")
dA_np = np.zeros([N, nC, H, W]).astype("float32")
ctx = tvm.context("llvm", 0)
A_tvm = tvm.nd.array(A_np, ctx)
dC_tvm = tvm.nd.array(dC_np, ctx)
dA_tvm = tvm.nd.array(dA_np, ctx)
func(A_tvm, dC_tvm, dA_tvm)
print(dA_tvm)
# =======>
# compare the results with numpy
golden_np = dC_np[:,:, padding:P-padding, padding:Q-padding]
tvm.testing.assert_allclose(dA_tvm.asnumpy(), golden_np, rtol=1e-30)
print("Compare with Numpy success!") | [
"numpy.random.uniform",
"tvm.te.placeholder",
"tvm.nd.array",
"tvm.context",
"numpy.zeros",
"tvm.build",
"tvm.te.grad_op",
"tvm.te.create_schedule",
"tvm.lower",
"tvm.tir.all"
] | [((171, 227), 'tvm.te.placeholder', 'tvm.te.placeholder', (['[N, nC, H, W]'], {'dtype': 'dtype', 'name': '"""A"""'}), "([N, nC, H, W], dtype=dtype, name='A')\n", (189, 227), False, 'import tvm\n'), ((447, 503), 'tvm.te.placeholder', 'tvm.te.placeholder', (['[N, K, P, Q]'], {'dtype': 'dtype', 'name': '"""dC"""'}), "([N, K, P, Q], dtype=dtype, name='dC')\n", (465, 503), False, 'import tvm\n'), ((571, 595), 'tvm.te.grad_op', 'tvm.te.grad_op', (['A', 'C', 'dC'], {}), '(A, C, dC)\n', (585, 595), False, 'import tvm\n'), ((601, 630), 'tvm.te.create_schedule', 'tvm.te.create_schedule', (['dA.op'], {}), '(dA.op)\n', (623, 630), False, 'import tvm\n'), ((691, 731), 'tvm.build', 'tvm.build', (['s', '[A, dC, dA]'], {'target': '"""llvm"""'}), "(s, [A, dC, dA], target='llvm')\n", (700, 731), False, 'import tvm\n'), ((924, 946), 'tvm.context', 'tvm.context', (['"""llvm"""', '(0)'], {}), "('llvm', 0)\n", (935, 946), False, 'import tvm\n'), ((955, 978), 'tvm.nd.array', 'tvm.nd.array', (['A_np', 'ctx'], {}), '(A_np, ctx)\n', (967, 978), False, 'import tvm\n'), ((988, 1012), 'tvm.nd.array', 'tvm.nd.array', (['dC_np', 'ctx'], {}), '(dC_np, ctx)\n', (1000, 1012), False, 'import tvm\n'), ((1022, 1046), 'tvm.nd.array', 'tvm.nd.array', (['dA_np', 'ctx'], {}), '(dA_np, ctx)\n', (1034, 1046), False, 'import tvm\n'), ((638, 681), 'tvm.lower', 'tvm.lower', (['s', '[A, dC, dA]'], {'simple_mode': '(True)'}), '(s, [A, dC, dA], simple_mode=True)\n', (647, 681), False, 'import tvm\n'), ((740, 781), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', '[N, nC, H, W]'], {}), '(-10, 10, [N, nC, H, W])\n', (757, 781), True, 'import numpy as np\n'), ((808, 848), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', '[N, K, P, Q]'], {}), '(-10, 10, [N, K, P, Q])\n', (825, 848), True, 'import numpy as np\n'), ((875, 898), 'numpy.zeros', 'np.zeros', (['[N, nC, H, W]'], {}), '([N, nC, H, W])\n', (883, 898), True, 'import numpy as np\n'), ((315, 388), 'tvm.tir.all', 'tvm.tir.all', (['(h >= padding)', '(h < P - padding)', '(w >= padding)', '(w < Q - padding)'], {}), '(h >= padding, h < P - padding, w >= padding, w < Q - padding)\n', (326, 388), False, 'import tvm\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Approximating Runge's function
#
# **<NAME>, PhD**
#
# This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>.
#
# Original (Matlab) CompEcon file: **demapp04.m**
#
# Running this file requires the Python version of CompEcon. This can be installed with pip by running
#
# !pip install compecon --upgrade
#
# <i>Last updated: 2021-Oct-01</i>
# <hr>
# ## About
#
# Uniform-node and Chebyshev-node polynomial approximation of Runge's function and compute condition numbers of associated interpolation matrices
# ## Initial tasks
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import norm, cond
from compecon import BasisChebyshev, demo
import warnings
warnings.simplefilter('ignore')
# ### Runge function
# In[2]:
runge = lambda x: 1 / (1 + 25 * x ** 2)
# Set points of approximation interval
# In[3]:
a, b = -1, 1
# Construct plotting grid
# In[4]:
nplot = 1001
x = np.linspace(a, b, nplot)
y = runge(x)
# ## Plot Runge's Function
# Initialize data matrices
# In[5]:
n = np.arange(3, 33, 2)
nn = n.size
errunif, errcheb = (np.zeros([nn, nplot]) for k in range(2))
nrmunif, nrmcheb, conunif, concheb = (np.zeros(nn) for k in range(4))
# Compute approximation errors on refined grid and interpolation matrix condition numbers
# In[6]:
for i in range(nn):
# Uniform-node monomial-basis approximant
xnodes = np.linspace(a, b, n[i])
c = np.polyfit(xnodes, runge(xnodes), n[i])
yfit = np.polyval(c, x)
phi = xnodes.reshape(-1, 1) ** np.arange(n[i])
errunif[i] = yfit - y
nrmunif[i] = np.log10(norm(yfit - y, np.inf))
conunif[i] = np.log10(cond(phi, 2))
# Chebychev-node Chebychev-basis approximant
yapprox = BasisChebyshev(n[i], a, b, f=runge)
yfit = yapprox(x) # [0] no longer needed? # index zero is to eliminate one dimension
phi = yapprox.Phi()
errcheb[i] = yfit - y
nrmcheb[i] = np.log10(norm(yfit - y, np.inf))
concheb[i] = np.log10(cond(phi, 2))
# Plot Chebychev- and uniform node polynomial approximation errors
# In[7]:
figs = []
fig1, ax = plt.subplots()
ax.plot(x, y)
ax.text(-0.8, 0.8, r'$y = \frac{1}{1+25x^2}$', fontsize=18)
ax.set(xticks=[], title="Runge's Function", xlabel='', ylabel='y');
figs.append(fig1)
# In[8]:
fig2, ax = plt.subplots()
ax.hlines(0, a, b, 'gray', '--')
ax.plot(x, errcheb[4], label='Chebychev Nodes')
ax.plot(x, errunif[4], label='Uniform Nodes')
ax.legend(loc='upper center')
ax.set(title="Runge's Function $11^{th}$-Degree\nPolynomial Approximation Error.", xlabel='x', ylabel='Error')
figs.append(fig2)
# Plot approximation error per degree of approximation
# In[9]:
fig3, ax = plt.subplots()
ax.plot(n, nrmcheb, label='Chebychev Nodes')
ax.plot(n, nrmunif, label='Uniform Nodes')
ax.legend(loc='upper left')
ax.set(title="Log10 Polynomial Approximation Error for Runge's Function",xlabel='', ylabel='Log10 Error', xticks=[])
figs.append(fig3)
# In[10]:
fig4, ax = plt.subplots()
ax.plot(n, concheb, label='Chebychev Polynomial Basis')
ax.plot(n, conunif, label='Mononomial Basis')
ax.legend(loc='upper left')
ax.set(title="Log10 Interpolation Matrix Condition Number",
xlabel='Degree of Approximating Polynomial',
ylabel='Log10 Condition Number')
figs.append(fig4)
# ### Save all figures to disc
# In[11]:
#demo.savefig(figs, name='demapp04')
| [
"warnings.simplefilter",
"numpy.polyval",
"numpy.zeros",
"numpy.linalg.cond",
"compecon.BasisChebyshev",
"numpy.arange",
"numpy.linalg.norm",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((910, 941), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (931, 941), False, 'import warnings\n'), ((1140, 1164), 'numpy.linspace', 'np.linspace', (['a', 'b', 'nplot'], {}), '(a, b, nplot)\n', (1151, 1164), True, 'import numpy as np\n'), ((1251, 1270), 'numpy.arange', 'np.arange', (['(3)', '(33)', '(2)'], {}), '(3, 33, 2)\n', (1260, 1270), True, 'import numpy as np\n'), ((2298, 2312), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2310, 2312), True, 'import matplotlib.pyplot as plt\n'), ((2497, 2511), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2509, 2511), True, 'import matplotlib.pyplot as plt\n'), ((2878, 2892), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2890, 2892), True, 'import matplotlib.pyplot as plt\n'), ((3169, 3183), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3181, 3183), True, 'import matplotlib.pyplot as plt\n'), ((1303, 1324), 'numpy.zeros', 'np.zeros', (['[nn, nplot]'], {}), '([nn, nplot])\n', (1311, 1324), True, 'import numpy as np\n'), ((1382, 1394), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (1390, 1394), True, 'import numpy as np\n'), ((1597, 1620), 'numpy.linspace', 'np.linspace', (['a', 'b', 'n[i]'], {}), '(a, b, n[i])\n', (1608, 1620), True, 'import numpy as np\n'), ((1680, 1696), 'numpy.polyval', 'np.polyval', (['c', 'x'], {}), '(c, x)\n', (1690, 1696), True, 'import numpy as np\n'), ((1929, 1964), 'compecon.BasisChebyshev', 'BasisChebyshev', (['n[i]', 'a', 'b'], {'f': 'runge'}), '(n[i], a, b, f=runge)\n', (1943, 1964), False, 'from compecon import BasisChebyshev, demo\n'), ((1732, 1747), 'numpy.arange', 'np.arange', (['n[i]'], {}), '(n[i])\n', (1741, 1747), True, 'import numpy as np\n'), ((1801, 1823), 'numpy.linalg.norm', 'norm', (['(yfit - y)', 'np.inf'], {}), '(yfit - y, np.inf)\n', (1805, 1823), False, 'from numpy.linalg import norm, cond\n'), ((1851, 1863), 'numpy.linalg.cond', 'cond', (['phi', '(2)'], {}), '(phi, 2)\n', (1855, 1863), False, 'from numpy.linalg import norm, cond\n'), ((2132, 2154), 'numpy.linalg.norm', 'norm', (['(yfit - y)', 'np.inf'], {}), '(yfit - y, np.inf)\n', (2136, 2154), False, 'from numpy.linalg import norm, cond\n'), ((2182, 2194), 'numpy.linalg.cond', 'cond', (['phi', '(2)'], {}), '(phi, 2)\n', (2186, 2194), False, 'from numpy.linalg import norm, cond\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 31 14:50:41 2018
@author: mimbres
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
from torch.backends import cudnn
import numpy as np
import glob, os
import argparse
from tqdm import trange, tqdm
from spotify_data_loader import SpotifyDataloader
from utils.eval import evaluate
from blocks.highway_dil_conv import HighwayDCBlock
cudnn.benchmark = True
parser = argparse.ArgumentParser(description="Sequence Skip Prediction")
parser.add_argument("-c","--config",type=str, default="./config_init_dataset.json")
parser.add_argument("-s","--save_path",type=str, default="./save/exp_seq1H_genlog128/")
parser.add_argument("-l","--load_continue_latest",type=str, default=None)
parser.add_argument("-glu","--use_glu", type=bool, default=False)
parser.add_argument("-w","--class_num",type=int, default = 2)
parser.add_argument("-e","--epochs",type=int, default= 15)
parser.add_argument("-lr","--learning_rate", type=float, default = 0.001)
parser.add_argument("-b","--train_batch_size", type=int, default = 2048)
parser.add_argument("-tsb","--test_batch_size", type=int, default = 1024)
parser.add_argument("-g","--gpu",type=int, default=0)
args = parser.parse_args()
# Hyper Parameters
USE_GLU = args.use_glu
INPUT_DIM = 72 if USE_SUPLOG else 31
CLASS_NUM = args.class_num
EPOCHS = args.epochs
LEARNING_RATE = args.learning_rate
TR_BATCH_SZ = args.train_batch_size
TS_BATCH_SZ = args.test_batch_size
GPU = args.gpu
# Model-save directory
MODEL_SAVE_PATH = args.save_path
os.makedirs(os.path.dirname(MODEL_SAVE_PATH), exist_ok=True)
hist_trloss = list()
hist_tracc = list()
hist_vloss = list()
hist_vacc = list()
hist_trloss_qlog = list()
hist_trloss_skip = list()
hist_vloss_qlog = list()
hist_vloss_skip = list()
np.set_printoptions(precision=3)
class SeqFeatEnc(nn.Module):
def __init__(self, input_dim, e_ch, #d_ch=256,
#h_io_chs=[256, 256, 256, 256, 256, 256, 256],
d_ch,
h_io_chs=[1,1,1,1,1,1,1],
h_k_szs=[2,2,2,2,2,1,1],
h_dils=[1,2,4,8,16,1,1],
# h_dils=[1,2,4,1,2,4,1,2,4,1,1,1,1], #이것도 Receptive Field가 20인데 왜 안되는걸까??????
use_glu=False):
super(SeqFeatEnc, self).__init__()
h_io_chs[:] = [n * d_ch for n in h_io_chs]
# Layers:
self.mlp = nn.Sequential(nn.Conv1d(input_dim,e_ch,1),
nn.ReLU(),
nn.Conv1d(e_ch,d_ch,1))
self.h_block = HighwayDCBlock(h_io_chs, h_k_szs, h_dils, causality=True, use_glu=use_glu)
return None
def forward(self, x):
# Input={{x_sup,x_que};{label_sup,label_que}} BxC*T (Bx(29+1)*20), audio feat dim=29, label dim=1, n_sup+n_que=20
# Input bx30x20
x = self.mlp(x) # bx128*20
x = self.h_block(x) #bx256*20, 여기서 attention 쓰려면 split 128,128
return x#x[:,:128,:]
class SeqClassifier(nn.Module):
def __init__(self, input_ch, e_ch,
h_io_chs=[1,1,1,1,1,1,1],
h_k_szs=[2,2,2,2,2,1,1],
h_dils=[1,2,4,8,16,1,1],
use_glu=False):
super(SeqClassifier, self).__init__()
h_io_chs[:] = [n * e_ch for n in h_io_chs]
self.front_1x1 = nn.Conv1d(input_ch, e_ch,1)
self.h_block = HighwayDCBlock(h_io_chs, h_k_szs, h_dils, causality=True, use_glu=use_glu)
self.last_1x1 = nn.Sequential(nn.Conv1d(e_ch,e_ch,1), nn.ReLU(),
nn.Conv1d(e_ch,e_ch,1), nn.ReLU())
self.classifier = nn.Sequential(nn.Conv1d(e_ch,e_ch,1), nn.ReLU(),
nn.Conv1d(e_ch,e_ch,1))#nn.Conv1d(e_ch,1,1))
def forward(self, x): # Input:bx256*20
x = self.front_1x1(x) # bx128*20
x = self.h_block(x) # bx128*20
x = self.last_1x1(x) # bx64*20
return self.classifier(x).squeeze(1) # bx20
class SeqModel(nn.Module):
def __init__(self, input_dim=INPUT_DIM, e_ch=128, d_ch=128, use_glu=USE_GLU):
super(SeqModel, self).__init__()
self.enc = SeqFeatEnc(input_dim=input_dim, e_ch=e_ch, d_ch=d_ch, use_glu=use_glu)
self.clf = SeqClassifier(input_ch=d_ch, e_ch=e_ch, use_glu=use_glu)
self.qlog_classifier = nn.Sequential(nn.Conv1d(e_ch,e_ch,1), nn.ReLU(),
nn.Conv1d(e_ch,41,1))#nn.Conv1d(e_ch,1,1))
self.skip_classifier = nn.Sequential(nn.Conv1d(e_ch,e_ch,1), nn.ReLU(),
nn.Conv1d(e_ch,1,1))#nn.Conv1d(e_ch,1,1))
def forward(self, x):
x = self.enc(x) # bx128x20
x = self.clf(x) # bx128x20
#x_qlog, x_skip = x[:,:41,:], x[:,41,:]
x_qlog = self.qlog_classifier(x) # bx41*20
x_skip = self.skip_classifier(x).squeeze(1) # bx20
return x_qlog, x_skip | [
"numpy.set_printoptions",
"torch.nn.ReLU",
"argparse.ArgumentParser",
"os.path.dirname",
"torch.nn.Conv1d",
"blocks.highway_dil_conv.HighwayDCBlock"
] | [((539, 602), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sequence Skip Prediction"""'}), "(description='Sequence Skip Prediction')\n", (562, 602), False, 'import argparse\n'), ((1901, 1933), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (1920, 1933), True, 'import numpy as np\n'), ((1661, 1693), 'os.path.dirname', 'os.path.dirname', (['MODEL_SAVE_PATH'], {}), '(MODEL_SAVE_PATH)\n', (1676, 1693), False, 'import glob, os\n'), ((2658, 2732), 'blocks.highway_dil_conv.HighwayDCBlock', 'HighwayDCBlock', (['h_io_chs', 'h_k_szs', 'h_dils'], {'causality': '(True)', 'use_glu': 'use_glu'}), '(h_io_chs, h_k_szs, h_dils, causality=True, use_glu=use_glu)\n', (2672, 2732), False, 'from blocks.highway_dil_conv import HighwayDCBlock\n'), ((3425, 3453), 'torch.nn.Conv1d', 'nn.Conv1d', (['input_ch', 'e_ch', '(1)'], {}), '(input_ch, e_ch, 1)\n', (3434, 3453), True, 'import torch.nn as nn\n'), ((3476, 3550), 'blocks.highway_dil_conv.HighwayDCBlock', 'HighwayDCBlock', (['h_io_chs', 'h_k_szs', 'h_dils'], {'causality': '(True)', 'use_glu': 'use_glu'}), '(h_io_chs, h_k_szs, h_dils, causality=True, use_glu=use_glu)\n', (3490, 3550), False, 'from blocks.highway_dil_conv import HighwayDCBlock\n'), ((2505, 2534), 'torch.nn.Conv1d', 'nn.Conv1d', (['input_dim', 'e_ch', '(1)'], {}), '(input_dim, e_ch, 1)\n', (2514, 2534), True, 'import torch.nn as nn\n'), ((2567, 2576), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2574, 2576), True, 'import torch.nn as nn\n'), ((2611, 2635), 'torch.nn.Conv1d', 'nn.Conv1d', (['e_ch', 'd_ch', '(1)'], {}), '(e_ch, d_ch, 1)\n', (2620, 2635), True, 'import torch.nn as nn\n'), ((3590, 3614), 'torch.nn.Conv1d', 'nn.Conv1d', (['e_ch', 'e_ch', '(1)'], {}), '(e_ch, e_ch, 1)\n', (3599, 3614), True, 'import torch.nn as nn\n'), ((3614, 3623), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3621, 3623), True, 'import torch.nn as nn\n'), ((3664, 3688), 'torch.nn.Conv1d', 'nn.Conv1d', (['e_ch', 'e_ch', '(1)'], {}), '(e_ch, e_ch, 1)\n', (3673, 3688), True, 'import torch.nn as nn\n'), ((3688, 3697), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3695, 3697), True, 'import torch.nn as nn\n'), ((3743, 3767), 'torch.nn.Conv1d', 'nn.Conv1d', (['e_ch', 'e_ch', '(1)'], {}), '(e_ch, e_ch, 1)\n', (3752, 3767), True, 'import torch.nn as nn\n'), ((3767, 3776), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3774, 3776), True, 'import torch.nn as nn\n'), ((3818, 3842), 'torch.nn.Conv1d', 'nn.Conv1d', (['e_ch', 'e_ch', '(1)'], {}), '(e_ch, e_ch, 1)\n', (3827, 3842), True, 'import torch.nn as nn\n'), ((4449, 4473), 'torch.nn.Conv1d', 'nn.Conv1d', (['e_ch', 'e_ch', '(1)'], {}), '(e_ch, e_ch, 1)\n', (4458, 4473), True, 'import torch.nn as nn\n'), ((4473, 4482), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4480, 4482), True, 'import torch.nn as nn\n'), ((4524, 4546), 'torch.nn.Conv1d', 'nn.Conv1d', (['e_ch', '(41)', '(1)'], {}), '(e_ch, 41, 1)\n', (4533, 4546), True, 'import torch.nn as nn\n'), ((4612, 4636), 'torch.nn.Conv1d', 'nn.Conv1d', (['e_ch', 'e_ch', '(1)'], {}), '(e_ch, e_ch, 1)\n', (4621, 4636), True, 'import torch.nn as nn\n'), ((4636, 4645), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4643, 4645), True, 'import torch.nn as nn\n'), ((4687, 4708), 'torch.nn.Conv1d', 'nn.Conv1d', (['e_ch', '(1)', '(1)'], {}), '(e_ch, 1, 1)\n', (4696, 4708), True, 'import torch.nn as nn\n')] |
import json
import argparse
import torch
import os
import random
import numpy as np
import requests
import logging
import math
import copy
import string
from tqdm import tqdm
from time import time
from flask import Flask, request, jsonify, render_template, redirect
from flask_cors import CORS
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from requests_futures.sessions import FuturesSession
from eval_phrase_retrieval import evaluate_results, evaluate_results_kilt
from densephrases.utils.open_utils import load_query_encoder, load_phrase_index, load_qa_pairs, get_query2vec
from densephrases.utils.squad_utils import get_cq_dataloader, TrueCaser
from densephrases.utils.embed_utils import get_cq_results
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class DensePhrasesInterface(object):
def __init__(self, args):
self.args = args
self.base_ip = args.base_ip
self.query_port = args.query_port
self.index_port = args.index_port
self.truecase = TrueCaser(os.path.join(os.environ['DATA_DIR'], args.truecase_path))
def serve_query_encoder(self, query_port, args, inmemory=False, batch_size=64, query_encoder=None, tokenizer=None):
device = 'cuda' if args.cuda else 'cpu'
if query_encoder is None:
query_encoder, tokenizer = load_query_encoder(device, args)
query2vec = get_query2vec(
query_encoder=query_encoder, tokenizer=tokenizer, args=args, batch_size=batch_size
)
# Serve query encoder
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
CORS(app)
@app.route('/query2vec_api', methods=['POST'])
def query2vec_api():
batch_query = json.loads(request.form['query'])
start_time = time()
outs = query2vec(batch_query)
# logger.info(f'query2vec {time()-start_time:.3f} for {len(batch_query)} queries: {batch_query[0]}')
return jsonify(outs)
logger.info(f'Starting QueryEncoder server at {self.get_address(query_port)}')
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(query_port)
IOLoop.instance().start()
def serve_phrase_index(self, index_port, args):
args.examples_path = os.path.join('densephrases/demo/static', args.examples_path)
# Load mips
mips = load_phrase_index(args)
app = Flask(__name__, static_url_path='/static')
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
CORS(app)
def batch_search(batch_query, max_answer_length=20, top_k=10,
nprobe=64, return_idxs=False):
t0 = time()
outs, _ = self.embed_query(batch_query)()
start = np.concatenate([out[0] for out in outs], 0)
end = np.concatenate([out[1] for out in outs], 0)
query_vec = np.concatenate([start, end], 1)
rets = mips.search(
query_vec, q_texts=batch_query, nprobe=nprobe,
top_k=top_k, max_answer_length=max_answer_length,
return_idxs=return_idxs,
)
for ret_idx, ret in enumerate(rets):
for rr in ret:
rr['query_tokens'] = outs[ret_idx][2]
t1 = time()
out = {'ret': rets, 'time': int(1000 * (t1 - t0))}
return out
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/files/<path:path>')
def static_files(path):
return app.send_static_file('files/' + path)
# This one uses a default hyperparameters (for Demo)
@app.route('/api', methods=['GET'])
def api():
query = request.args['query']
query = query[:-1] if query.endswith('?') else query
if args.truecase:
if query[1:].lower() == query[1:]:
query = self.truecase.get_true_case(query)
out = batch_search(
[query],
max_answer_length=args.max_answer_length,
top_k=args.top_k,
nprobe=args.nprobe,
)
out['ret'] = out['ret'][0]
return jsonify(out)
@app.route('/batch_api', methods=['POST'])
def batch_api():
batch_query = json.loads(request.form['query'])
max_answer_length = int(request.form['max_answer_length'])
top_k = int(request.form['top_k'])
nprobe = int(request.form['nprobe'])
out = batch_search(
batch_query,
max_answer_length=max_answer_length,
top_k=top_k,
nprobe=nprobe,
)
return jsonify(out)
@app.route('/get_examples', methods=['GET'])
def get_examples():
with open(args.examples_path, 'r') as fp:
examples = [line.strip() for line in fp.readlines()]
return jsonify(examples)
if self.query_port is None:
logger.info('You must set self.query_port for querying. You can use self.update_query_port() later on.')
logger.info(f'Starting Index server at {self.get_address(index_port)}')
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(index_port)
IOLoop.instance().start()
def serve_bert_encoder(self, bert_port, args):
device = 'cuda' if args.cuda else 'cpu'
bert_encoder, tokenizer = load_query_encoder(device, args) # will be just a bert as query_encoder
import binascii
def float_to_hex(vals):
strs = []
# offset = -40.
# scale = 5.
minv = min(vals)
maxv = max(vals)
for val in vals:
strs.append('{0:0{1}X}'.format(int(min((val - minv) / (maxv-minv) * 255, 255)), 2))
return strs
# Define query to vector function
def context_query_to_logit(context, query):
bert_encoder.eval()
# Phrase encoding style
dataloader, examples, features = get_cq_dataloader(
[context], [query], tokenizer, args.max_query_length, batch_size=64
)
cq_results = get_cq_results(
examples, features, dataloader, device, bert_encoder, batch_size=64
)
outs = []
for cq_idx, cq_result in enumerate(cq_results):
# import pdb; pdb.set_trace()
all_logits = (
np.expand_dims(np.array(cq_result.start_logits), axis=1) +
np.expand_dims(np.array(cq_result.end_logits), axis=0)
).max(1).tolist()
out = {
'context': ' '.join(features[cq_idx].tokens[0:]),
'title': 'dummy',
'start_logits': float_to_hex(all_logits[0:len(features[cq_idx].tokens)]),
'end_logits': float_to_hex(cq_result.end_logits[0:len(features[cq_idx].tokens)]),
}
outs.append(out)
return outs
# Serve query encoder
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
CORS(app)
@app.route('/')
def index():
return app.send_static_file('index_single.html')
@app.route('/files/<path:path>')
def static_files(path):
return app.send_static_file('files/' + path)
args.examples_path = os.path.join('static', 'examples_context.txt')
@app.route('/get_examples', methods=['GET'])
def get_examples():
with open(args.examples_path, 'r') as fp:
examples = [line.strip() for line in fp.readlines()]
return jsonify(examples)
@app.route('/single_api', methods=['GET'])
def single_api():
t0 = time()
single_context = request.args['context']
single_query = request.args['query']
# start_time = time()
outs = context_query_to_logit(single_context, single_query)
# logger.info(f'single to logit {time()-start_time}')
t1 = time()
out = {'ret': outs, 'time': int(1000 * (t1 - t0))}
return jsonify(out)
logger.info(f'Starting BertEncoder server at {self.get_address(bert_port)}')
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(bert_port)
IOLoop.instance().start()
def get_address(self, port):
assert self.base_ip is not None and len(port) > 0
return self.base_ip + ':' + port
def embed_query(self, batch_query):
emb_session = FuturesSession()
r = emb_session.post(self.get_address(self.query_port) + '/query2vec_api',
data={'query': json.dumps(batch_query)})
def map_():
result = r.result()
emb = result.json()
return emb, result.elapsed.total_seconds() * 1000
return map_
def query(self, query):
params = {'query': query}
res = requests.get(self.get_address(self.index_port) + '/api', params=params)
if res.status_code != 200:
logger.info('Wrong behavior %d' % res.status_code)
try:
outs = json.loads(res.text)
except Exception as e:
logger.info(f'no response or error for q {query}')
logger.info(res.text)
return outs
def batch_query(self, batch_query, max_answer_length=20, top_k=10, nprobe=64):
post_data = {
'query': json.dumps(batch_query),
'max_answer_length': max_answer_length,
'top_k': top_k,
'nprobe': nprobe,
}
res = requests.post(self.get_address(self.index_port) + '/batch_api', data=post_data)
if res.status_code != 200:
logger.info('Wrong behavior %d' % res.status_code)
try:
outs = json.loads(res.text)
except Exception as e:
logger.info(f'no response or error for q {batch_query}')
logger.info(res.text)
return outs
def eval_request(self, args):
# Load dataset
qids, questions, answers, _ = load_qa_pairs(args.test_path, args)
# Run batch_query and evaluate
step = args.eval_batch_size
predictions = []
evidences = []
titles = []
scores = []
all_tokens = []
start_time = None
num_q = 0
for q_idx in tqdm(range(0, len(questions), step)):
if q_idx >= 5*step: # exclude warmup
if start_time is None:
start_time = time()
num_q += len(questions[q_idx:q_idx+step])
result = self.batch_query(
questions[q_idx:q_idx+step],
max_answer_length=args.max_answer_length,
top_k=args.top_k,
nprobe=args.nprobe,
)
prediction = [[ret['answer'] for ret in out] if len(out) > 0 else [''] for out in result['ret']]
evidence = [[ret['context'] for ret in out] if len(out) > 0 else [''] for out in result['ret']]
title = [[ret['title'] for ret in out] if len(out) > 0 else [''] for out in result['ret']]
score = [[ret['score'] for ret in out] if len(out) > 0 else [-1e10] for out in result['ret']]
q_tokens = [out[0]['query_tokens'] if len(out) > 0 else '' for out in result['ret']]
predictions += prediction
evidences += evidence
titles += title
scores += score
latency = time()-start_time
logger.info(f'{time()-start_time:.3f} sec for {num_q} questions => {num_q/(time()-start_time):.1f} Q/Sec')
eval_fn = evaluate_results if not args.is_kilt else evaluate_results_kilt
eval_fn(
predictions, qids, questions, answers, args, evidences=evidences, scores=scores, titles=titles,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# QueryEncoder
parser.add_argument('--model_type', default='bert', type=str)
parser.add_argument("--pretrained_name_or_path", default='SpanBERT/spanbert-base-cased', type=str)
parser.add_argument("--config_name", default="", type=str)
parser.add_argument("--tokenizer_name", default="", type=str)
parser.add_argument("--do_lower_case", default=False, action='store_true')
parser.add_argument('--max_query_length', default=64, type=int)
parser.add_argument("--cache_dir", default=None, type=str)
parser.add_argument("--query_encoder_path", default='', type=str)
parser.add_argument("--query_port", default='-1', type=str)
# PhraseIndex
parser.add_argument('--dump_dir', default='dump')
parser.add_argument('--phrase_dir', default='phrase')
parser.add_argument('--index_dir', default='256_flat_SQ4')
parser.add_argument('--index_name', default='index.faiss')
parser.add_argument('--idx2id_name', default='idx2id.hdf5')
parser.add_argument('--index_port', default='-1', type=str)
# These can be dynamically changed.
parser.add_argument('--max_answer_length', default=10, type=int)
parser.add_argument('--top_k', default=10, type=int)
parser.add_argument('--nprobe', default=256, type=int)
parser.add_argument('--truecase', default=False, action='store_true')
parser.add_argument("--truecase_path", default='truecase/english_with_questions.dist', type=str)
# KILT
parser.add_argument('--is_kilt', default=False, action='store_true')
parser.add_argument('--kilt_gold_path', default='kilt/trex/trex-dev-kilt.jsonl')
parser.add_argument('--title2wikiid_path', default='wikidump/title2wikiid.json')
# Serving options
parser.add_argument('--examples_path', default='examples.txt')
# Evaluation
parser.add_argument('--dev_path', default='open-qa/nq-open/dev_preprocessed.json')
parser.add_argument('--test_path', default='open-qa/nq-open/test_preprocessed.json')
parser.add_argument('--candidate_path', default=None)
parser.add_argument('--regex', default=False, action='store_true')
parser.add_argument('--eval_batch_size', default=10, type=int)
# Run mode
parser.add_argument('--base_ip', default='http://127.0.0.1')
parser.add_argument('--run_mode', default='batch_query')
parser.add_argument('--cuda', default=False, action='store_true')
parser.add_argument('--draft', default=False, action='store_true')
parser.add_argument('--debug', default=False, action='store_true')
parser.add_argument('--save_pred', default=False, action='store_true')
parser.add_argument('--seed', default=1992, type=int)
args = parser.parse_args()
# Seed for reproducibility
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
server = DensePhrasesInterface(args)
if args.run_mode == 'q_serve':
logger.info(f'Query address: {server.get_address(server.query_port)}')
server.serve_query_encoder(args.query_port, args)
elif args.run_mode == 'p_serve':
logger.info(f'Index address: {server.get_address(server.index_port)}')
server.serve_phrase_index(args.index_port, args)
elif args.run_mode == 'single_serve':
server.serve_bert_encoder(args.query_port, args)
elif args.run_mode == 'query':
query = 'Name three famous writers'
result = server.query(query)
logger.info(f'Answers to a question: {query}')
logger.info(f'{[r["answer"] for r in result["ret"]]}')
elif args.run_mode == 'batch_query':
queries= [
'What was <NAME>\'s original last name on full house',
'when did medicare begin in the united states',
'who sings don\'t stand so close to me',
'Name three famous writers',
'Who was defeated by computer in chess game?'
]
result = server.batch_query(
queries,
max_answer_length=args.max_answer_length,
top_k=args.top_k,
nprobe=args.nprobe,
)
for query, result in zip(queries, result['ret']):
logger.info(f'Answers to a question: {query}')
logger.info(f'{[r["answer"] for r in result]}')
elif args.run_mode == 'eval_request':
server.eval_request(args)
else:
raise NotImplementedError
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"tornado.ioloop.IOLoop.instance",
"flask_cors.CORS",
"json.dumps",
"flask.jsonify",
"os.path.join",
"tornado.wsgi.WSGIContainer",
"json.loads",
"requests_futures.sessions.FuturesSession",
"random.seed",
"densephrases.utils.open_utils.load_phrase_... | [((780, 923), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (799, 923), False, 'import logging\n'), ((943, 970), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (960, 970), False, 'import logging\n'), ((12293, 12318), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12316, 12318), False, 'import argparse\n'), ((15055, 15077), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (15066, 15077), False, 'import random\n'), ((15082, 15107), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (15096, 15107), True, 'import numpy as np\n'), ((15112, 15140), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (15129, 15140), False, 'import torch\n'), ((15148, 15173), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15171, 15173), False, 'import torch\n'), ((1572, 1673), 'densephrases.utils.open_utils.get_query2vec', 'get_query2vec', ([], {'query_encoder': 'query_encoder', 'tokenizer': 'tokenizer', 'args': 'args', 'batch_size': 'batch_size'}), '(query_encoder=query_encoder, tokenizer=tokenizer, args=args,\n batch_size=batch_size)\n', (1585, 1673), False, 'from densephrases.utils.open_utils import load_query_encoder, load_phrase_index, load_qa_pairs, get_query2vec\n'), ((1737, 1752), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1742, 1752), False, 'from flask import Flask, request, jsonify, render_template, redirect\n'), ((1819, 1828), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (1823, 1828), False, 'from flask_cors import CORS\n'), ((2490, 2550), 'os.path.join', 'os.path.join', (['"""densephrases/demo/static"""', 'args.examples_path'], {}), "('densephrases/demo/static', args.examples_path)\n", (2502, 2550), False, 'import os\n'), ((2587, 2610), 'densephrases.utils.open_utils.load_phrase_index', 'load_phrase_index', (['args'], {}), '(args)\n', (2604, 2610), False, 'from densephrases.utils.open_utils import load_query_encoder, load_phrase_index, load_qa_pairs, get_query2vec\n'), ((2625, 2667), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '"""/static"""'}), "(__name__, static_url_path='/static')\n", (2630, 2667), False, 'from flask import Flask, request, jsonify, render_template, redirect\n'), ((2734, 2743), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (2738, 2743), False, 'from flask_cors import CORS\n'), ((5733, 5765), 'densephrases.utils.open_utils.load_query_encoder', 'load_query_encoder', (['device', 'args'], {}), '(device, args)\n', (5751, 5765), False, 'from densephrases.utils.open_utils import load_query_encoder, load_phrase_index, load_qa_pairs, get_query2vec\n'), ((7409, 7424), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (7414, 7424), False, 'from flask import Flask, request, jsonify, render_template, redirect\n'), ((7491, 7500), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (7495, 7500), False, 'from flask_cors import CORS\n'), ((7769, 7815), 'os.path.join', 'os.path.join', (['"""static"""', '"""examples_context.txt"""'], {}), "('static', 'examples_context.txt')\n", (7781, 7815), False, 'import os\n'), ((8959, 8975), 'requests_futures.sessions.FuturesSession', 'FuturesSession', ([], {}), '()\n', (8973, 8975), False, 'from requests_futures.sessions import FuturesSession\n'), ((10493, 10528), 'densephrases.utils.open_utils.load_qa_pairs', 'load_qa_pairs', (['args.test_path', 'args'], {}), '(args.test_path, args)\n', (10506, 10528), False, 'from densephrases.utils.open_utils import load_query_encoder, load_phrase_index, load_qa_pairs, get_query2vec\n'), ((15183, 15220), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (15209, 15220), False, 'import torch\n'), ((1219, 1275), 'os.path.join', 'os.path.join', (["os.environ['DATA_DIR']", 'args.truecase_path'], {}), "(os.environ['DATA_DIR'], args.truecase_path)\n", (1231, 1275), False, 'import os\n'), ((1519, 1551), 'densephrases.utils.open_utils.load_query_encoder', 'load_query_encoder', (['device', 'args'], {}), '(device, args)\n', (1537, 1551), False, 'from densephrases.utils.open_utils import load_query_encoder, load_phrase_index, load_qa_pairs, get_query2vec\n'), ((1940, 1973), 'json.loads', 'json.loads', (["request.form['query']"], {}), "(request.form['query'])\n", (1950, 1973), False, 'import json\n'), ((1999, 2005), 'time.time', 'time', ([], {}), '()\n', (2003, 2005), False, 'from time import time\n'), ((2180, 2193), 'flask.jsonify', 'jsonify', (['outs'], {}), '(outs)\n', (2187, 2193), False, 'from flask import Flask, request, jsonify, render_template, redirect\n'), ((2315, 2333), 'tornado.wsgi.WSGIContainer', 'WSGIContainer', (['app'], {}), '(app)\n', (2328, 2333), False, 'from tornado.wsgi import WSGIContainer\n'), ((2888, 2894), 'time.time', 'time', ([], {}), '()\n', (2892, 2894), False, 'from time import time\n'), ((2969, 3012), 'numpy.concatenate', 'np.concatenate', (['[out[0] for out in outs]', '(0)'], {}), '([out[0] for out in outs], 0)\n', (2983, 3012), True, 'import numpy as np\n'), ((3031, 3074), 'numpy.concatenate', 'np.concatenate', (['[out[1] for out in outs]', '(0)'], {}), '([out[1] for out in outs], 0)\n', (3045, 3074), True, 'import numpy as np\n'), ((3099, 3130), 'numpy.concatenate', 'np.concatenate', (['[start, end]', '(1)'], {}), '([start, end], 1)\n', (3113, 3130), True, 'import numpy as np\n'), ((3503, 3509), 'time.time', 'time', ([], {}), '()\n', (3507, 3509), False, 'from time import time\n'), ((4460, 4472), 'flask.jsonify', 'jsonify', (['out'], {}), '(out)\n', (4467, 4472), False, 'from flask import Flask, request, jsonify, render_template, redirect\n'), ((4576, 4609), 'json.loads', 'json.loads', (["request.form['query']"], {}), "(request.form['query'])\n", (4586, 4609), False, 'import json\n'), ((4984, 4996), 'flask.jsonify', 'jsonify', (['out'], {}), '(out)\n', (4991, 4996), False, 'from flask import Flask, request, jsonify, render_template, redirect\n'), ((5221, 5238), 'flask.jsonify', 'jsonify', (['examples'], {}), '(examples)\n', (5228, 5238), False, 'from flask import Flask, request, jsonify, render_template, redirect\n'), ((5506, 5524), 'tornado.wsgi.WSGIContainer', 'WSGIContainer', (['app'], {}), '(app)\n', (5519, 5524), False, 'from tornado.wsgi import WSGIContainer\n'), ((6356, 6446), 'densephrases.utils.squad_utils.get_cq_dataloader', 'get_cq_dataloader', (['[context]', '[query]', 'tokenizer', 'args.max_query_length'], {'batch_size': '(64)'}), '([context], [query], tokenizer, args.max_query_length,\n batch_size=64)\n', (6373, 6446), False, 'from densephrases.utils.squad_utils import get_cq_dataloader, TrueCaser\n'), ((6498, 6585), 'densephrases.utils.embed_utils.get_cq_results', 'get_cq_results', (['examples', 'features', 'dataloader', 'device', 'bert_encoder'], {'batch_size': '(64)'}), '(examples, features, dataloader, device, bert_encoder,\n batch_size=64)\n', (6512, 6585), False, 'from densephrases.utils.embed_utils import get_cq_results\n'), ((8039, 8056), 'flask.jsonify', 'jsonify', (['examples'], {}), '(examples)\n', (8046, 8056), False, 'from flask import Flask, request, jsonify, render_template, redirect\n'), ((8152, 8158), 'time.time', 'time', ([], {}), '()\n', (8156, 8158), False, 'from time import time\n'), ((8450, 8456), 'time.time', 'time', ([], {}), '()\n', (8454, 8456), False, 'from time import time\n'), ((8539, 8551), 'flask.jsonify', 'jsonify', (['out'], {}), '(out)\n', (8546, 8551), False, 'from flask import Flask, request, jsonify, render_template, redirect\n'), ((8671, 8689), 'tornado.wsgi.WSGIContainer', 'WSGIContainer', (['app'], {}), '(app)\n', (8684, 8689), False, 'from tornado.wsgi import WSGIContainer\n'), ((9557, 9577), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (9567, 9577), False, 'import json\n'), ((9853, 9876), 'json.dumps', 'json.dumps', (['batch_query'], {}), '(batch_query)\n', (9863, 9876), False, 'import json\n'), ((10222, 10242), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (10232, 10242), False, 'import json\n'), ((11901, 11907), 'time.time', 'time', ([], {}), '()\n', (11905, 11907), False, 'from time import time\n'), ((2382, 2399), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (2397, 2399), False, 'from tornado.ioloop import IOLoop\n'), ((5573, 5590), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (5588, 5590), False, 'from tornado.ioloop import IOLoop\n'), ((8737, 8754), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (8752, 8754), False, 'from tornado.ioloop import IOLoop\n'), ((9086, 9109), 'json.dumps', 'json.dumps', (['batch_query'], {}), '(batch_query)\n', (9096, 9109), False, 'import json\n'), ((10941, 10947), 'time.time', 'time', ([], {}), '()\n', (10945, 10947), False, 'from time import time\n'), ((11942, 11948), 'time.time', 'time', ([], {}), '()\n', (11946, 11948), False, 'from time import time\n'), ((12002, 12008), 'time.time', 'time', ([], {}), '()\n', (12006, 12008), False, 'from time import time\n'), ((6807, 6839), 'numpy.array', 'np.array', (['cq_result.start_logits'], {}), '(cq_result.start_logits)\n', (6815, 6839), True, 'import numpy as np\n'), ((6886, 6916), 'numpy.array', 'np.array', (['cq_result.end_logits'], {}), '(cq_result.end_logits)\n', (6894, 6916), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import contextlib
import multiprocessing
import numpy as np
import scipy.spatial.distance as ssd
import aqml.cheminfo.core as cc
import aqml.cheminfo.molecule.nbody as MB
import aqml.cheminfo.molecule.core as cmc
import matplotlib.pylab as plt
T, F = True, False
class RawM(object):
"""
molecule object with only `zs & `coords
"""
def __init__(self, mol):
self.zs = mol.zs
self.coords = mol.coords
self.mol = mol
def generate_coulomb_matrix(self,inorm=False,wz=False,rpower=1.0):
""" Coulomb matrix
You may consider using `cml1 instead of `cm """
na = len(self.zs)
mat = np.zeros((na,na))
ds = ssd.squareform( ssd.pdist(self.coords) )
np.fill_diagonal(ds, 1.0)
if wz:
X, Y = np.meshgrid(self.zs, self.zs)
diag = -1. * np.array(self.zs)**2.4
else:
X, Y = [1., 1.]
diag = np.zeros(na)
mat = X*Y/ds**rpower
np.fill_diagonal(mat, diag)
L1s = np.linalg.norm(mat, ord=1, axis=0)
ias = np.argsort(L1s)
cm = L1s[ias] if inorm else mat[ias,:][:,ias].ravel()
return cm
def get_lbob(self, plcut=2, iconn=T, wz=F, rpower=1.):
"""
get local bob, i.e., a bob vector per atom
"""
mc = cmc.RawMol(self.mol)
o1 = MB.NBody(mc, g=mc.g, pls=mc.pls, iconn=iconn, plcut=plcut, bob=T)
x = []
for i in range(mc.na):
bs = o1.get_bonds([i])
for k in bs:
v = bs[k]
const = np.product( np.array(k.split('-'),dtype=float) ) if wz else 1.0
v2 = list( const/np.array(v)**rpower )
v2.sort()
bs[k] = v2
x.append( bs )
return x
class RawMs(object):
#def __init__(self, mols, repr='cml1', param={'inorm':T, 'wz':F, 'rpower':1.0}):
def __init__(self, mols, repr='bob', param={'iconn':T, 'plcut':2, 'wz':F, 'rpower':1.}, debug=F):
self.mols = mols
self.debug = debug
self.nm = len(mols.nas)
self.repr = repr
self.param = param
@property
def x(self):
if not hasattr(self, '_x'):
self._x = self.get_x()
return self._x
def get_x(self):
xs = []
wz = self.param['wz']
rp = self.param['rpower']
if self.repr in ['cml1']:
inorm = self.param['inorm']
for i in range(self.nm):
mi = self.mols[i]
rmol = RawM(mi)
xi = rmol.generate_coulomb_matrix(inorm=inorm,wz=wz,rpower=rp)
xs.append(xi)
elif self.repr in ['bob']:
iconn = self.param['iconn']
plcut = self.param['plcut']
for i in range(self.nm):
mi = self.mols[i]
rmol = RawM(mi)
xi = rmol.get_lbob(plcut=plcut, wz=wz, rpower=rp)
xs.append(xi)
return xs
@property
def ds(self):
if not hasattr(self, '_ds'):
ims = np.arange(self.nm)
self._ds = self.cdist(ims,ims)
return self._ds
def cdist_lbob(self, xi, xj):
""" calculate distance between two local BoB's of atoms i and j"""
d = 0.
ks = list(xi.keys()) + list(xj.keys())
for k in ks:
vi = xi[k] if k in xi else []
ni = len(vi)
vj = xj[k] if k in xj else []
nj = len(vj)
n = max(len(vi), len(vj))
vi2 = np.array( [0.]*(n-ni)+vi )
vj2 = np.array( [0.]*(n-nj)+vj )
d += np.sum( np.abs( vi2-vj2 ))
return d
def cdist(self, ims, jms, ncpu=None):
""" pair-wise distance """
ni = len(ims)
nj = len(jms)
iap = F # incude all atom pairs
if ni==nj:
if np.all(ims==jms): iap = T
pairs = []; apairs = []
for i0 in range(ni):
for j0 in range(nj):
i = ims[i0]
j = jms[j0]
if iap:
if j0>i0:
pairs.append([i,j])
apairs.append([i0,j0])
else:
pairs.append([i,j])
apairs.append([i0,j0])
print('pairs=', pairs)
ds = np.zeros((ni,nj))
if ncpu is None:
ncpu = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=ncpu)
dsr = pool.map(self.get_dij, pairs)
for ip, pair in enumerate(apairs):
i,j = pair
if iap:
ds[i,j] = ds[j,i] = dsr[ip]
else:
ds[i,j] = dsr[ip]
return ds
def get_dij(self, ij):
i,j = ij
xi = self.x[i]
xj = self.x[j]
zsi = self.mols[i].zs; ias = np.arange(len(zsi))
zsj = self.mols[j].zs; jas = np.arange(len(zsj))
zsu = list(set(zsi))
if set(zsi)!=set(zsj):
print(' ** elements differ!')
dij = 0.
for z in zsu:
iasz = ias[z==zsi]
jasz = jas[z==zsj]
dsz = []
nazi = len(iasz)
nazj = len(jasz)
for ii,ia in enumerate(iasz):
dsi = []
for jj,ja in enumerate(jasz):
daij = self.cdist_lbob( xi[ia], xj[ja] )
dsi.append(daij)
di = np.min(dsi)
dsz.append(di)
jac = jasz[dsi.index(di)]
if self.debug: print('z=',z, 'im,ia=',i,ia, 'jm,ja=',j,jac, 'd=',di)
dz = np.max(dsz)
dij = max(dij,dz)
return dij
def remove_redundant(self, thresh=0.03):
idx = [0]
for i in range(1,self.nm):
if np.all(self.ds[i,idx] > thresh):
idx.append(i)
return idx
def vb(self, i,ia, j,ja, keys=None, sigma=0.1, xlim=None, ylim=None):
""" visualize bob """
rs = np.linspace(0, 4.8, 1000)
wz = self.param['wz']
rp = self.param['rpower']
ys = []
ys0 = np.zeros(len(rs))
xi = self.x[i][ia]
xj = self.x[j][ja]
if keys is None:
keys = list( set(list(xi.keys())+list(xj.keys())) )
colors = ['k', 'b', 'r', 'g']
legends = []
for ik, key in enumerate(keys):
ysi = ys0.copy()
const = np.product( np.array(key.split('-'),dtype=float) ) if wz else 1.0
rsi = const/np.array(xi[key]) if rp==1. else (const/np.array(xi[key]))**(1./rp)
for ir,ri in enumerate(rsi):
ysi += np.exp( - 0.5 * (rs-ri)**2/sigma**2 ) * xi[key][ir]
ysj = ys0.copy()
rsj = const/np.array(xj[key]) if rp==1. else (const/np.array(xj[key]))**(1./rp)
for jr,rj in enumerate(rsj):
ysj += np.exp( - 0.5 * (rs-rj)**2/sigma**2 ) * xj[key][jr]
#ys.append([ ysi,ysj ])
plt.plot(rs, ysi, '-'+colors[ik], rs, ysj,'--'+colors[ik])
legends += keys[ik:ik+1] + ['']
plt.legend( legends )
if xlim:
plt.xlim(xlim[0],xlim[1])
if ylim:
plt.ylim(ylim[0],ylim[1])
return plt
@contextlib.contextmanager
def printoptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
def printa(a, precision=2, suppress=T, *args):
with printoptions(precision=precision, suppress=T):
print(a)
if __name__ == "__main__":
import os, sys, io2
import argparse as ap
args = sys.argv[1:]
ps = ap.ArgumentParser()
ps.add_argument('-plcut', nargs='?', default=3, type=int, help='path length cutoff')
ps.add_argument('-rcut','--rcut', nargs='?', default=2.7, type=float, help='SLATM cutoff radius, default is 4.8 Ang')
ps.add_argument('-rp', '-rpower', nargs='?', type=int, default=1, help='r^rpower in BoB / SLATM')
ps.add_argument('-thresh', nargs='?', type=float, float=0.03, help='threshold distance between mols')
ps.add_argument('-debug', action='store_true')
ps.add_argument('-iconn', nargs='?', type=str, default='T')
ps.add_argument('-i', dest='idxs', type=int, nargs='*')
ag = ps.parse_args(args)
ag.iconn = {'T':True, 'F':False}[ag.iconn]
debug = ag.debug
thresh = ag.thresh #0.03
so = ''
for i in ag.idxs:
fsi = io2.cmdout('ls frag_%s*z'%i)
ms1 = RawMs( cc.molecules(fsi), repr='bob', param={'iconn':ag.iconn, 'plcut':ag.plcut, 'wz':F, 'rpower':rp}, debug=debug )
idx = ms1.remove_redundant(thresh)
so += ' '.join([ fsi[j][:-4]+'*' for j in idx ])
so += ' '
#print('nm=', ms1.nm, 'f=',fs[idx[0]], 'idx=', idx)
#print('ds=')
#printa(ms1.ds)
print( so )
| [
"numpy.abs",
"argparse.ArgumentParser",
"numpy.argsort",
"numpy.linalg.norm",
"scipy.spatial.distance.pdist",
"numpy.arange",
"numpy.exp",
"multiprocessing.cpu_count",
"numpy.set_printoptions",
"numpy.meshgrid",
"matplotlib.pylab.legend",
"numpy.max",
"numpy.linspace",
"aqml.cheminfo.core.... | [((7386, 7407), 'numpy.get_printoptions', 'np.get_printoptions', ([], {}), '()\n', (7405, 7407), True, 'import numpy as np\n'), ((7412, 7448), 'numpy.set_printoptions', 'np.set_printoptions', (['*args'], {}), '(*args, **kwargs)\n', (7431, 7448), True, 'import numpy as np\n'), ((7761, 7780), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {}), '()\n', (7778, 7780), True, 'import argparse as ap\n'), ((673, 691), 'numpy.zeros', 'np.zeros', (['(na, na)'], {}), '((na, na))\n', (681, 691), True, 'import numpy as np\n'), ((753, 778), 'numpy.fill_diagonal', 'np.fill_diagonal', (['ds', '(1.0)'], {}), '(ds, 1.0)\n', (769, 778), True, 'import numpy as np\n'), ((1002, 1029), 'numpy.fill_diagonal', 'np.fill_diagonal', (['mat', 'diag'], {}), '(mat, diag)\n', (1018, 1029), True, 'import numpy as np\n'), ((1044, 1078), 'numpy.linalg.norm', 'np.linalg.norm', (['mat'], {'ord': '(1)', 'axis': '(0)'}), '(mat, ord=1, axis=0)\n', (1058, 1078), True, 'import numpy as np\n'), ((1093, 1108), 'numpy.argsort', 'np.argsort', (['L1s'], {}), '(L1s)\n', (1103, 1108), True, 'import numpy as np\n'), ((1338, 1358), 'aqml.cheminfo.molecule.core.RawMol', 'cmc.RawMol', (['self.mol'], {}), '(self.mol)\n', (1348, 1358), True, 'import aqml.cheminfo.molecule.core as cmc\n'), ((1372, 1437), 'aqml.cheminfo.molecule.nbody.NBody', 'MB.NBody', (['mc'], {'g': 'mc.g', 'pls': 'mc.pls', 'iconn': 'iconn', 'plcut': 'plcut', 'bob': 'T'}), '(mc, g=mc.g, pls=mc.pls, iconn=iconn, plcut=plcut, bob=T)\n', (1380, 1437), True, 'import aqml.cheminfo.molecule.nbody as MB\n'), ((4372, 4390), 'numpy.zeros', 'np.zeros', (['(ni, nj)'], {}), '((ni, nj))\n', (4380, 4390), True, 'import numpy as np\n'), ((4477, 4513), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'ncpu'}), '(processes=ncpu)\n', (4497, 4513), False, 'import multiprocessing\n'), ((6039, 6064), 'numpy.linspace', 'np.linspace', (['(0)', '(4.8)', '(1000)'], {}), '(0, 4.8, 1000)\n', (6050, 6064), True, 'import numpy as np\n'), ((7156, 7175), 'matplotlib.pylab.legend', 'plt.legend', (['legends'], {}), '(legends)\n', (7166, 7175), True, 'import matplotlib.pylab as plt\n'), ((7493, 7524), 'numpy.set_printoptions', 'np.set_printoptions', ([], {}), '(**original)\n', (7512, 7524), True, 'import numpy as np\n'), ((8553, 8583), 'io2.cmdout', 'io2.cmdout', (["('ls frag_%s*z' % i)"], {}), "('ls frag_%s*z' % i)\n", (8563, 8583), False, 'import os, sys, io2\n'), ((720, 742), 'scipy.spatial.distance.pdist', 'ssd.pdist', (['self.coords'], {}), '(self.coords)\n', (729, 742), True, 'import scipy.spatial.distance as ssd\n'), ((813, 842), 'numpy.meshgrid', 'np.meshgrid', (['self.zs', 'self.zs'], {}), '(self.zs, self.zs)\n', (824, 842), True, 'import numpy as np\n'), ((952, 964), 'numpy.zeros', 'np.zeros', (['na'], {}), '(na)\n', (960, 964), True, 'import numpy as np\n'), ((3091, 3109), 'numpy.arange', 'np.arange', (['self.nm'], {}), '(self.nm)\n', (3100, 3109), True, 'import numpy as np\n'), ((3563, 3594), 'numpy.array', 'np.array', (['([0.0] * (n - ni) + vi)'], {}), '([0.0] * (n - ni) + vi)\n', (3571, 3594), True, 'import numpy as np\n'), ((3608, 3639), 'numpy.array', 'np.array', (['([0.0] * (n - nj) + vj)'], {}), '([0.0] * (n - nj) + vj)\n', (3616, 3639), True, 'import numpy as np\n'), ((3901, 3919), 'numpy.all', 'np.all', (['(ims == jms)'], {}), '(ims == jms)\n', (3907, 3919), True, 'import numpy as np\n'), ((4434, 4461), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (4459, 4461), False, 'import multiprocessing\n'), ((5664, 5675), 'numpy.max', 'np.max', (['dsz'], {}), '(dsz)\n', (5670, 5675), True, 'import numpy as np\n'), ((5839, 5871), 'numpy.all', 'np.all', (['(self.ds[i, idx] > thresh)'], {}), '(self.ds[i, idx] > thresh)\n', (5845, 5871), True, 'import numpy as np\n'), ((7045, 7108), 'matplotlib.pylab.plot', 'plt.plot', (['rs', 'ysi', "('-' + colors[ik])", 'rs', 'ysj', "('--' + colors[ik])"], {}), "(rs, ysi, '-' + colors[ik], rs, ysj, '--' + colors[ik])\n", (7053, 7108), True, 'import matplotlib.pylab as plt\n'), ((7207, 7233), 'matplotlib.pylab.xlim', 'plt.xlim', (['xlim[0]', 'xlim[1]'], {}), '(xlim[0], xlim[1])\n', (7215, 7233), True, 'import matplotlib.pylab as plt\n'), ((7262, 7288), 'matplotlib.pylab.ylim', 'plt.ylim', (['ylim[0]', 'ylim[1]'], {}), '(ylim[0], ylim[1])\n', (7270, 7288), True, 'import matplotlib.pylab as plt\n'), ((8603, 8620), 'aqml.cheminfo.core.molecules', 'cc.molecules', (['fsi'], {}), '(fsi)\n', (8615, 8620), True, 'import aqml.cheminfo.core as cc\n'), ((3660, 3677), 'numpy.abs', 'np.abs', (['(vi2 - vj2)'], {}), '(vi2 - vj2)\n', (3666, 3677), True, 'import numpy as np\n'), ((5477, 5488), 'numpy.min', 'np.min', (['dsi'], {}), '(dsi)\n', (5483, 5488), True, 'import numpy as np\n'), ((868, 885), 'numpy.array', 'np.array', (['self.zs'], {}), '(self.zs)\n', (876, 885), True, 'import numpy as np\n'), ((6567, 6584), 'numpy.array', 'np.array', (['xi[key]'], {}), '(xi[key])\n', (6575, 6584), True, 'import numpy as np\n'), ((6699, 6741), 'numpy.exp', 'np.exp', (['(-0.5 * (rs - ri) ** 2 / sigma ** 2)'], {}), '(-0.5 * (rs - ri) ** 2 / sigma ** 2)\n', (6705, 6741), True, 'import numpy as np\n'), ((6804, 6821), 'numpy.array', 'np.array', (['xj[key]'], {}), '(xj[key])\n', (6812, 6821), True, 'import numpy as np\n'), ((6936, 6978), 'numpy.exp', 'np.exp', (['(-0.5 * (rs - rj) ** 2 / sigma ** 2)'], {}), '(-0.5 * (rs - rj) ** 2 / sigma ** 2)\n', (6942, 6978), True, 'import numpy as np\n'), ((6607, 6624), 'numpy.array', 'np.array', (['xi[key]'], {}), '(xi[key])\n', (6615, 6624), True, 'import numpy as np\n'), ((6844, 6861), 'numpy.array', 'np.array', (['xj[key]'], {}), '(xj[key])\n', (6852, 6861), True, 'import numpy as np\n'), ((1691, 1702), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1699, 1702), True, 'import numpy as np\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn as nn
from crossbeam.model.op_arg import LSTMArgSelector
from crossbeam.model.op_init import OpPoolingState
from crossbeam.model.great import Great
from crossbeam.model.encoder import DummyWeightEncoder, ValueWeightEncoder
class LogicModel(nn.Module):
def __init__(self, args, operations, max_entities=10):
super(LogicModel, self).__init__()
self.max_entities = max_entities
self.arg = LSTMArgSelector(hidden_size=args.embed_dim,
mlp_sizes=[256, 1],
step_score_func=args.step_score_func,
step_score_normalize=args.score_normed)
self.init = OpPoolingState(ops=tuple(operations), state_dim=args.embed_dim, pool_method='mean')
if args.encode_weight:
self.encode_weight = ValueWeightEncoder(hidden_size=args.embed_dim)
print('encode weight')
else:
self.encode_weight = DummyWeightEncoder()
if args.great_transformer:
print('use great transformer')
self.entity_project = nn.Embedding(max_entities, args.embed_dim)
# relations:
# unary both no
# unary first yes
# unary second yes
# unary both yes
# binary no
# binary ->
# binary <-
# binary <->
# also applies for spec (2x)
self.relation_project = nn.Embedding(8+8, args.embed_dim)
self.great = Great(d_model=args.embed_dim,
dim_feedforward=args.embed_dim*4,
layers=4,
batch_first=True)
self.final_projection = nn.Linear(max_entities*args.embed_dim,args.embed_dim)
else:
print('use mlp')
self.great = None
self.embed_spec,self.embed_value,self.embed_input = \
[ nn.Sequential(nn.Linear(max_entities*max_entities + 1, args.embed_dim),
nn.ReLU(),
nn.Linear(args.embed_dim, args.embed_dim),
nn.ReLU(),
nn.Linear(args.embed_dim, args.embed_dim),
nn.ReLU(),
nn.Linear(args.embed_dim, args.embed_dim))
for _ in range(3) ]
def batch_init(self, io_embed, io_scatter, val_embed, value_indices, operation, sample_indices=None, io_gather=None):
return self.init.batch_forward(io_embed, io_scatter, val_embed, value_indices, operation, sample_indices, io_gather)
@staticmethod
def serialize_relation(r):
if len(r.shape) == 1:
return [0] + list(1*r) + [0]*(r.shape[0]*r.shape[0] - r.shape[0])
elif len(r.shape) == 2:
return [1] + list(1*np.reshape(r,-1))
else:
assert False, "only handle relations with 1/2 indices"
def features_of_relation(self, relations, is_specification, device=None):
if self.great is None:
x = torch.tensor([LogicModel.serialize_relation(v) for v in relations]).float()
if device: x = x.to(device)
if is_specification:
return self.embed_spec(x)
else:
return self.embed_input(x)
x = self.entity_project(torch.arange(self.max_entities,device=device).long())
x = x.unsqueeze(0).repeat(len(relations),1,1)
d = {(False,False): 0,
(False,True): 1,
(True,False): 2,
(True,True): 3}
def o(a):
nonlocal is_specification
h = {1:0,2:4}[len(a.shape)]
if is_specification:
return h+8
return h
def I(matrix,a,b):
if len(matrix.shape) == 2:
return matrix[a,b]
if len(matrix.shape) == 1:
return matrix[a]
assert False
r = [ [ [ o(m) + d[I(m,i,j), I(m,j,i)]
for j in range(self.max_entities)]
for i in range(self.max_entities)]
for m in relations ]
r = self.relation_project(torch.tensor(r).long().to(x.device))
output = self.great(x,r).view(len(relations),-1)
output = self.final_projection(output)
return output
def io(self, list_input_dictionary, list_outputs, device, needs_scatter_idx=False):
"""input_dictionary/outputs: list of length batch_size, batching over task
Each element of outputs is the output for a particular I/O example
Here we only ever have one I/O example
"""
list_feat = []
#TODO: make this vectorized, instead of using the manual loop
for input_dictionary, outputs in zip(list_input_dictionary, list_outputs):
assert len(outputs) == 1
outputs = outputs[0]
specification = self.features_of_relation([outputs], True, device)
values = self.val(list(input_dictionary.values()),
device)
values = values.max(0).values.unsqueeze(0)
feat = torch.cat((specification,values),-1)
list_feat.append(feat)
feats = torch.cat(list_feat, dim=0)
if needs_scatter_idx:
idx = torch.arange(len(list_input_dictionary)).to(device)
return feats, idx
else:
return feats
def val(self, all_values, device, output_values=None):
"""all_values: list of values. each value is represented by its instantiation on each I/O example
so if you have three I/O examples and ten values then you will have 10x3 matrix as input
returns as output [number_of_values,embedding_size]"""
all_values = [v[0] for v in all_values]
return self.features_of_relation(all_values, False, device)
| [
"crossbeam.model.encoder.ValueWeightEncoder",
"torch.nn.ReLU",
"torch.nn.Embedding",
"crossbeam.model.great.Great",
"torch.cat",
"crossbeam.model.op_arg.LSTMArgSelector",
"torch.arange",
"numpy.reshape",
"torch.nn.Linear",
"crossbeam.model.encoder.DummyWeightEncoder",
"torch.tensor"
] | [((1029, 1179), 'crossbeam.model.op_arg.LSTMArgSelector', 'LSTMArgSelector', ([], {'hidden_size': 'args.embed_dim', 'mlp_sizes': '[256, 1]', 'step_score_func': 'args.step_score_func', 'step_score_normalize': 'args.score_normed'}), '(hidden_size=args.embed_dim, mlp_sizes=[256, 1],\n step_score_func=args.step_score_func, step_score_normalize=args.\n score_normed)\n', (1044, 1179), False, 'from crossbeam.model.op_arg import LSTMArgSelector\n'), ((5488, 5515), 'torch.cat', 'torch.cat', (['list_feat'], {'dim': '(0)'}), '(list_feat, dim=0)\n', (5497, 5515), False, 'import torch\n'), ((1418, 1464), 'crossbeam.model.encoder.ValueWeightEncoder', 'ValueWeightEncoder', ([], {'hidden_size': 'args.embed_dim'}), '(hidden_size=args.embed_dim)\n', (1436, 1464), False, 'from crossbeam.model.encoder import DummyWeightEncoder, ValueWeightEncoder\n'), ((1531, 1551), 'crossbeam.model.encoder.DummyWeightEncoder', 'DummyWeightEncoder', ([], {}), '()\n', (1549, 1551), False, 'from crossbeam.model.encoder import DummyWeightEncoder, ValueWeightEncoder\n'), ((1648, 1690), 'torch.nn.Embedding', 'nn.Embedding', (['max_entities', 'args.embed_dim'], {}), '(max_entities, args.embed_dim)\n', (1660, 1690), True, 'import torch.nn as nn\n'), ((1951, 1986), 'torch.nn.Embedding', 'nn.Embedding', (['(8 + 8)', 'args.embed_dim'], {}), '(8 + 8, args.embed_dim)\n', (1963, 1986), True, 'import torch.nn as nn\n'), ((2005, 2102), 'crossbeam.model.great.Great', 'Great', ([], {'d_model': 'args.embed_dim', 'dim_feedforward': '(args.embed_dim * 4)', 'layers': '(4)', 'batch_first': '(True)'}), '(d_model=args.embed_dim, dim_feedforward=args.embed_dim * 4, layers=4,\n batch_first=True)\n', (2010, 2102), False, 'from crossbeam.model.great import Great\n'), ((2203, 2259), 'torch.nn.Linear', 'nn.Linear', (['(max_entities * args.embed_dim)', 'args.embed_dim'], {}), '(max_entities * args.embed_dim, args.embed_dim)\n', (2212, 2259), True, 'import torch.nn as nn\n'), ((5410, 5448), 'torch.cat', 'torch.cat', (['(specification, values)', '(-1)'], {}), '((specification, values), -1)\n', (5419, 5448), False, 'import torch\n'), ((2412, 2470), 'torch.nn.Linear', 'nn.Linear', (['(max_entities * max_entities + 1)', 'args.embed_dim'], {}), '(max_entities * max_entities + 1, args.embed_dim)\n', (2421, 2470), True, 'import torch.nn as nn\n'), ((2507, 2516), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2514, 2516), True, 'import torch.nn as nn\n'), ((2555, 2596), 'torch.nn.Linear', 'nn.Linear', (['args.embed_dim', 'args.embed_dim'], {}), '(args.embed_dim, args.embed_dim)\n', (2564, 2596), True, 'import torch.nn as nn\n'), ((2635, 2644), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2642, 2644), True, 'import torch.nn as nn\n'), ((2683, 2724), 'torch.nn.Linear', 'nn.Linear', (['args.embed_dim', 'args.embed_dim'], {}), '(args.embed_dim, args.embed_dim)\n', (2692, 2724), True, 'import torch.nn as nn\n'), ((2763, 2772), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2770, 2772), True, 'import torch.nn as nn\n'), ((2811, 2852), 'torch.nn.Linear', 'nn.Linear', (['args.embed_dim', 'args.embed_dim'], {}), '(args.embed_dim, args.embed_dim)\n', (2820, 2852), True, 'import torch.nn as nn\n'), ((3793, 3839), 'torch.arange', 'torch.arange', (['self.max_entities'], {'device': 'device'}), '(self.max_entities, device=device)\n', (3805, 3839), False, 'import torch\n'), ((3338, 3355), 'numpy.reshape', 'np.reshape', (['r', '(-1)'], {}), '(r, -1)\n', (3348, 3355), True, 'import numpy as np\n'), ((4511, 4526), 'torch.tensor', 'torch.tensor', (['r'], {}), '(r)\n', (4523, 4526), False, 'import torch\n')] |
import glob
import os
import numpy as np
import skimage.io
import skimage.transform
import multiprocessing as mp
import utils
directories = glob.glob("data/train/*")
class_names = [os.path.basename(d) for d in directories]
class_names.sort()
num_classes = len(class_names)
paths_train = glob.glob("data/train/*/*")
paths_train.sort()
paths_test = glob.glob("data/test/*")
paths_test.sort()
paths = {
'train': paths_train,
'test': paths_test,
}
# labels_train = np.zeros(len(paths['train']), dtype='int32')
# for k, path in enumerate(paths['train']):
# class_name = os.path.basename(os.path.dirname(path))
# labels_train[k] = class_names.index(class_name)
labels_train = utils.load_gz("data/labels_train.npy.gz")
default_augmentation_params = {
'zoom_range': (1 / 1.1, 1.1),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
'allow_stretch': False,
}
no_augmentation_params = {
'zoom_range': (1.0, 1.0),
'rotation_range': (0, 0),
'shear_range': (0, 0),
'translation_range': (0, 0),
'do_flip': False,
'allow_stretch': False,
}
no_augmentation_params_gaussian = {
'zoom_std': 0.0,
'rotation_range': (0, 0),
'shear_std': 0.0,
'translation_std': 0.0,
'do_flip': False,
'stretch_std': 0.0,
}
tform_identity = skimage.transform.AffineTransform()
# def load(subset='train'):
# """
# Load all images into memory for faster processing
# """
# images = np.empty(len(paths[subset]), dtype='object')
# for k, path in enumerate(paths[subset]):
# img = skimage.io.imread(path, as_grey=True)
# images[k] = img
# return images
def load(subset='train'):
"""
Load all images into memory for faster processing
"""
return utils.load_gz("data/images_%s.npy.gz" % subset)
def uint_to_float(img):
return 1 - (img / np.float32(255.0))
def extract_image_patch(chunk_dst, img):
"""
extract a correctly sized patch from img and place it into chunk_dst,
which assumed to be preinitialized to zeros.
"""
# # DEBUG: draw a border to see where the image ends up
# img[0, :] = 127
# img[-1, :] = 127
# img[:, 0] = 127
# img[:, -1] = 127
p_x, p_y = chunk_dst.shape
im_x, im_y = img.shape
offset_x = (im_x - p_x) // 2
offset_y = (im_y - p_y) // 2
if offset_x < 0:
cx = slice(-offset_x, -offset_x + im_x)
ix = slice(0, im_x)
else:
cx = slice(0, p_x)
ix = slice(offset_x, offset_x + p_x)
if offset_y < 0:
cy = slice(-offset_y, -offset_y + im_y)
iy = slice(0, im_y)
else:
cy = slice(0, p_y)
iy = slice(offset_y, offset_y + p_y)
chunk_dst[cx, cy] = uint_to_float(img[ix, iy])
def patches_gen(images, labels, patch_size=(50, 50), chunk_size=4096, num_chunks=100, rng=np.random):
p_x, p_y = patch_size
for n in range(num_chunks):
indices = rng.randint(0, len(images), chunk_size)
chunk_x = np.zeros((chunk_size, p_x, p_y), dtype='float32')
chunk_y = np.zeros((chunk_size,), dtype='float32')
for k, idx in enumerate(indices):
img = images[indices[k]]
extract_image_patch(chunk_x[k], img)
chunk_y[k] = labels[indices[k]]
yield chunk_x, chunk_y
def patches_gen_ordered(images, patch_size=(50, 50), chunk_size=4096):
p_x, p_y = patch_size
num_images = len(images)
num_chunks = int(np.ceil(num_images / float(chunk_size)))
idx = 0
for n in range(num_chunks):
chunk_x = np.zeros((chunk_size, p_x, p_y), dtype='float32')
chunk_length = chunk_size
for k in range(chunk_size):
if idx >= num_images:
chunk_length = k
break
img = images[idx]
extract_image_patch(chunk_x[k], img)
idx += 1
yield chunk_x, chunk_length
## augmentation
def fast_warp(img, tf, output_shape=(50, 50), mode='constant', order=1):
"""
This wrapper function is faster than skimage.transform.warp
"""
m = tf.params # tf._matrix is
return skimage.transform._warps_cy._warp_fast(img, m, output_shape=output_shape, mode=mode, order=order)
def build_centering_transform(image_shape, target_shape=(50, 50)):
rows, cols = image_shape
trows, tcols = target_shape
shift_x = (cols - tcols) / 2.0
shift_y = (rows - trows) / 2.0
return skimage.transform.SimilarityTransform(translation=(shift_x, shift_y))
def build_rescale_transform_slow(downscale_factor, image_shape, target_shape):
"""
This mimics the skimage.transform.resize function.
The resulting image is centered.
"""
rows, cols = image_shape
trows, tcols = target_shape
col_scale = row_scale = downscale_factor
src_corners = np.array([[1, 1], [1, rows], [cols, rows]]) - 1
dst_corners = np.zeros(src_corners.shape, dtype=np.double)
# take into account that 0th pixel is at position (0.5, 0.5)
dst_corners[:, 0] = col_scale * (src_corners[:, 0] + 0.5) - 0.5
dst_corners[:, 1] = row_scale * (src_corners[:, 1] + 0.5) - 0.5
tform_ds = skimage.transform.AffineTransform()
tform_ds.estimate(src_corners, dst_corners)
# centering
shift_x = cols / (2.0 * downscale_factor) - tcols / 2.0
shift_y = rows / (2.0 * downscale_factor) - trows / 2.0
tform_shift_ds = skimage.transform.SimilarityTransform(translation=(shift_x, shift_y))
return tform_shift_ds + tform_ds
def build_rescale_transform_fast(downscale_factor, image_shape, target_shape):
"""
estimating the correct rescaling transform is slow, so just use the
downscale_factor to define a transform directly. This probably isn't
100% correct, but it shouldn't matter much in practice.
"""
rows, cols = image_shape
trows, tcols = target_shape
tform_ds = skimage.transform.AffineTransform(scale=(downscale_factor, downscale_factor))
# centering
shift_x = cols / (2.0 * downscale_factor) - tcols / 2.0
shift_y = rows / (2.0 * downscale_factor) - trows / 2.0
tform_shift_ds = skimage.transform.SimilarityTransform(translation=(shift_x, shift_y))
return tform_shift_ds + tform_ds
build_rescale_transform = build_rescale_transform_fast
def build_center_uncenter_transforms(image_shape):
"""
These are used to ensure that zooming and rotation happens around the center of the image.
Use these transforms to center and uncenter the image around such a transform.
"""
center_shift = np.array([image_shape[1], image_shape[0]]) / 2.0 - 0.5 # need to swap rows and cols here apparently! confusing!
tform_uncenter = skimage.transform.SimilarityTransform(translation=-center_shift)
tform_center = skimage.transform.SimilarityTransform(translation=center_shift)
return tform_center, tform_uncenter
def build_augmentation_transform(zoom=(1.0, 1.0), rotation=0, shear=0, translation=(0, 0), flip=False):
if flip:
shear += 180
rotation += 180
# shear by 180 degrees is equivalent to rotation by 180 degrees + flip.
# So after that we rotate it another 180 degrees to get just the flip.
tform_augment = skimage.transform.AffineTransform(scale=(1/zoom[0], 1/zoom[1]), rotation=np.deg2rad(rotation), shear=np.deg2rad(shear), translation=translation)
return tform_augment
def random_perturbation_transform(zoom_range, rotation_range, shear_range, translation_range, do_flip=True, allow_stretch=False, rng=np.random):
shift_x = rng.uniform(*translation_range)
shift_y = rng.uniform(*translation_range)
translation = (shift_x, shift_y)
rotation = rng.uniform(*rotation_range)
shear = rng.uniform(*shear_range)
if do_flip:
flip = (rng.randint(2) > 0) # flip half of the time
else:
flip = False
# random zoom
log_zoom_range = [np.log(z) for z in zoom_range]
if isinstance(allow_stretch, float):
log_stretch_range = [-np.log(allow_stretch), np.log(allow_stretch)]
zoom = np.exp(rng.uniform(*log_zoom_range))
stretch = np.exp(rng.uniform(*log_stretch_range))
zoom_x = zoom * stretch
zoom_y = zoom / stretch
elif allow_stretch is True: # avoid bugs, f.e. when it is an integer
zoom_x = np.exp(rng.uniform(*log_zoom_range))
zoom_y = np.exp(rng.uniform(*log_zoom_range))
else:
zoom_x = zoom_y = np.exp(rng.uniform(*log_zoom_range))
# the range should be multiplicatively symmetric, so [1/1.1, 1.1] instead of [0.9, 1.1] makes more sense.
return build_augmentation_transform((zoom_x, zoom_y), rotation, shear, translation, flip)
def perturb(img, augmentation_params, target_shape=(50, 50), rng=np.random):
# # DEBUG: draw a border to see where the image ends up
# img[0, :] = 0.5
# img[-1, :] = 0.5
# img[:, 0] = 0.5
# img[:, -1] = 0.5
tform_centering = build_centering_transform(img.shape, target_shape)
tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape)
tform_augment = random_perturbation_transform(rng=rng, **augmentation_params)
tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing)
return fast_warp(img, tform_centering + tform_augment, output_shape=target_shape, mode='constant').astype('float32')
def patches_gen_augmented(images, labels, patch_size=(50, 50), chunk_size=4096,
num_chunks=100, rng=np.random, rng_aug=np.random, augmentation_params=default_augmentation_params):
p_x, p_y = patch_size
if augmentation_params is None:
augmentation_params = no_augmentation_params
for n in range(num_chunks):
indices = rng.randint(0, len(images), chunk_size)
chunk_x = np.zeros((chunk_size, p_x, p_y), dtype='float32')
chunk_y = labels[indices].astype('float32')
for k, idx in enumerate(indices):
img = images[idx]
img = uint_to_float(img)
chunk_x[k] = perturb(img, augmentation_params, target_shape=patch_size, rng=rng_aug)
yield chunk_x, chunk_y
## RESCALING
def perturb_rescaled(img, scale, augmentation_params, target_shape=(50, 50), rng=np.random):
"""
scale is a DOWNSCALING factor.
"""
tform_rescale = build_rescale_transform(scale, img.shape, target_shape) # also does centering
tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape)
tform_augment = random_perturbation_transform(rng=rng, **augmentation_params)
tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing)
return fast_warp(img, tform_rescale + tform_augment, output_shape=target_shape, mode='constant').astype('float32')
def rescaled_patches_gen_augmented(images, labels, estimate_scale_func, patch_size=(50, 50),
chunk_size=4096, num_chunks=100, rng=np.random, rng_aug=np.random, augmentation_params=default_augmentation_params):
p_x, p_y = patch_size
if augmentation_params is None:
augmentation_params = no_augmentation_params
for n in range(num_chunks):
indices = rng.randint(0, len(images), chunk_size)
chunk_x = np.zeros((chunk_size, p_x, p_y), dtype='float32')
chunk_y = labels[indices].astype('float32')
chunk_shape = np.zeros((chunk_size, 2), dtype='float32')
for k, idx in enumerate(indices):
img = images[idx]
img = uint_to_float(img)
scale = estimate_scale_func(img)
chunk_x[k] = perturb_rescaled(img, scale, augmentation_params, target_shape=patch_size, rng=rng_aug)
chunk_shape[k] = img.shape
yield chunk_x, chunk_y, chunk_shape
def rescaled_patches_gen_ordered(images, estimate_scale_func, patch_size=(50, 50), chunk_size=4096,
augmentation_params=no_augmentation_params, rng=np.random, rng_aug=np.random):
p_x, p_y = patch_size
num_images = len(images)
num_chunks = int(np.ceil(num_images / float(chunk_size)))
idx = 0
for n in range(num_chunks):
chunk_x = np.zeros((chunk_size, p_x, p_y), dtype='float32')
chunk_shape = np.zeros((chunk_size, 2), dtype='float32')
chunk_length = chunk_size
for k in range(chunk_size):
if idx >= num_images:
chunk_length = k
break
img = images[idx]
img = uint_to_float(img)
scale = estimate_scale_func(img)
chunk_x[k] = perturb_rescaled(img, scale, augmentation_params, target_shape=patch_size, rng=rng_aug)
chunk_shape[k] = img.shape
idx += 1
yield chunk_x, chunk_shape, chunk_length
# for test-time augmentation
def perturb_rescaled_fixed(img, scale, tform_augment, target_shape=(50, 50)):
"""
scale is a DOWNSCALING factor.
"""
tform_rescale = build_rescale_transform(scale, img.shape, target_shape) # also does centering
tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape)
tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing)
return fast_warp(img, tform_rescale + tform_augment, output_shape=target_shape, mode='constant').astype('float32')
def rescaled_patches_gen_fixed(images, estimate_scale_func, patch_size=(50, 50), chunk_size=4096,
augmentation_transforms=None, rng=np.random):
if augmentation_transforms is None:
augmentation_transforms = [tform_identity]
p_x, p_y = patch_size
num_images = len(images)
num_tfs = len(augmentation_transforms)
num_patches = num_images * num_tfs
num_chunks = int(np.ceil(num_patches / float(chunk_size)))
idx = 0
for n in range(num_chunks):
chunk_x = np.zeros((chunk_size, p_x, p_y), dtype='float32')
chunk_shape = np.zeros((chunk_size, 2), dtype='float32')
chunk_length = chunk_size
for k in range(chunk_size):
if idx >= num_patches:
chunk_length = k
break
img = images[idx // num_tfs]
img = uint_to_float(img)
tf = augmentation_transforms[idx % num_tfs]
scale = estimate_scale_func(img) # could technically be cached but w/e
chunk_x[k] = perturb_rescaled_fixed(img, scale, tf, target_shape=patch_size)
chunk_shape[k] = img.shape
idx += 1
yield chunk_x, chunk_shape, chunk_length
### MULTISCALE GENERATORS
def perturb_multiscale(img, scale_factors, augmentation_params, target_shapes, rng=np.random):
"""
scale is a DOWNSCALING factor.
"""
tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape)
tform_augment = random_perturbation_transform(rng=rng, **augmentation_params)
tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing)
output = []
for scale, target_shape in zip(scale_factors, target_shapes):
if isinstance(scale, skimage.transform.ProjectiveTransform):
tform_rescale = scale
else:
tform_rescale = build_rescale_transform(scale, img.shape, target_shape) # also does centering
output.append(fast_warp(img, tform_rescale + tform_augment, output_shape=target_shape, mode='constant').astype('float32'))
return output
def multiscale_patches_gen_augmented(images, labels, scale_factors=[1.0], patch_sizes=[(50, 50)],
chunk_size=4096, num_chunks=100, rng=np.random, rng_aug=np.random, augmentation_params=default_augmentation_params):
assert len(patch_sizes) == len(scale_factors)
if augmentation_params is None:
augmentation_params = no_augmentation_params
for n in range(num_chunks):
indices = rng.randint(0, len(images), chunk_size)
chunks_x = [np.zeros((chunk_size, p_x, p_y), dtype='float32') for p_x, p_y in patch_sizes]
chunk_y = labels[indices].astype('float32')
chunk_shape = np.zeros((chunk_size, 2), dtype='float32')
for k, idx in enumerate(indices):
img = images[idx]
img = uint_to_float(img)
sfs = [(sf(img) if callable(sf) else sf) for sf in scale_factors] # support both fixed scale factors and variable scale factors with callables
patches = perturb_multiscale(img, sfs, augmentation_params, target_shapes=patch_sizes, rng=rng_aug)
for chunk_x, patch in zip(chunks_x, patches):
chunk_x[k] = patch
chunk_shape[k] = img.shape
yield chunks_x, chunk_y, chunk_shape
# for test-time augmentation
def perturb_multiscale_fixed(img, scale_factors, tform_augment, target_shapes):
"""
scale is a DOWNSCALING factor.
"""
tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape)
tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing)
output = []
for scale, target_shape in zip(scale_factors, target_shapes):
if isinstance(scale, skimage.transform.ProjectiveTransform):
tform_rescale = scale
else:
tform_rescale = build_rescale_transform(scale, img.shape, target_shape) # also does centering
output.append(fast_warp(img, tform_rescale + tform_augment, output_shape=target_shape, mode='constant').astype('float32'))
return output
def multiscale_patches_gen_fixed(images, scale_factors=[1.0], patch_sizes=[(50, 50)], chunk_size=4096,
augmentation_transforms=None, rng=np.random):
if augmentation_transforms is None:
augmentation_transforms = [tform_identity]
assert len(patch_sizes) == len(scale_factors)
num_images = len(images)
num_tfs = len(augmentation_transforms)
num_patches = num_images * num_tfs
num_chunks = int(np.ceil(num_patches / float(chunk_size)))
idx = 0
for n in range(num_chunks):
chunks_x = [np.zeros((chunk_size, p_x, p_y), dtype='float32') for p_x, p_y in patch_sizes]
chunk_shape = np.zeros((chunk_size, 2), dtype='float32')
chunk_length = chunk_size
for k in range(chunk_size):
if idx >= num_patches:
chunk_length = k
break
img = images[idx // num_tfs]
img = uint_to_float(img)
tf = augmentation_transforms[idx % num_tfs]
sfs = [(sf(img) if callable(sf) else sf) for sf in scale_factors] # support both fixed scale factors and variable scale factors with callables
patches = perturb_multiscale_fixed(img, sfs, tf, target_shapes=patch_sizes)
for chunk_x, patch in zip(chunks_x, patches):
chunk_x[k] = patch
chunk_shape[k] = img.shape
idx += 1
yield chunks_x, chunk_shape, chunk_length
def intensity_jitter(chunk, std=0.1, rng=np.random):
factors = np.exp(rng.normal(0.0, std, chunk.shape[0])).astype(chunk.dtype)
return chunk * factors[:, None, None]
### GAUSSIAN AUGMENTATION PARAMETER DISTRIBUTIONS
def random_perturbation_transform_gaussian(zoom_std, rotation_range, shear_std, translation_std, do_flip=True, stretch_std=0.0, rng=np.random):
shift_x = rng.normal(0.0, translation_std)
shift_y = rng.normal(0.0, translation_std)
translation = (shift_x, shift_y)
rotation = rng.uniform(*rotation_range)
shear = rng.normal(0.0, shear_std)
if do_flip:
flip = (rng.randint(2) > 0) # flip half of the time
else:
flip = False
zoom = np.exp(rng.normal(0.0, zoom_std))
stretch = np.exp(rng.normal(0.0, stretch_std))
zoom_x = zoom * stretch
zoom_y = zoom / stretch
return build_augmentation_transform((zoom_x, zoom_y), rotation, shear, translation, flip)
def perturb_rescaled_gaussian(img, scale, augmentation_params, target_shape=(50, 50), rng=np.random):
"""
scale is a DOWNSCALING factor.
"""
tform_rescale = build_rescale_transform(scale, img.shape, target_shape) # also does centering
tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape)
tform_augment = random_perturbation_transform_gaussian(rng=rng, **augmentation_params)
tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing)
return fast_warp(img, tform_rescale + tform_augment, output_shape=target_shape, mode='constant').astype('float32')
def rescaled_patches_gen_augmented_gaussian(images, labels, estimate_scale_func, patch_size=(50, 50),
chunk_size=4096, num_chunks=100, rng=np.random, rng_aug=np.random, augmentation_params=None):
p_x, p_y = patch_size
if augmentation_params is None:
augmentation_params = no_augmentation_params_gaussian
for n in range(num_chunks):
indices = rng.randint(0, len(images), chunk_size)
chunk_x = np.zeros((chunk_size, p_x, p_y), dtype='float32')
chunk_y = labels[indices].astype('float32')
chunk_shape = np.zeros((chunk_size, 2), dtype='float32')
for k, idx in enumerate(indices):
img = images[idx]
img = uint_to_float(img)
scale = estimate_scale_func(img)
chunk_x[k] = perturb_rescaled_gaussian(img, scale, augmentation_params, target_shape=patch_size, rng=rng_aug)
chunk_shape[k] = img.shape
yield chunk_x, chunk_y, chunk_shape
| [
"utils.load_gz",
"numpy.log",
"os.path.basename",
"numpy.deg2rad",
"numpy.float32",
"numpy.zeros",
"numpy.array",
"glob.glob"
] | [((155, 180), 'glob.glob', 'glob.glob', (['"""data/train/*"""'], {}), "('data/train/*')\n", (164, 180), False, 'import glob\n'), ((308, 335), 'glob.glob', 'glob.glob', (['"""data/train/*/*"""'], {}), "('data/train/*/*')\n", (317, 335), False, 'import glob\n'), ((372, 396), 'glob.glob', 'glob.glob', (['"""data/test/*"""'], {}), "('data/test/*')\n", (381, 396), False, 'import glob\n'), ((727, 768), 'utils.load_gz', 'utils.load_gz', (['"""data/labels_train.npy.gz"""'], {}), "('data/labels_train.npy.gz')\n", (740, 768), False, 'import utils\n'), ((197, 216), 'os.path.basename', 'os.path.basename', (['d'], {}), '(d)\n', (213, 216), False, 'import os\n'), ((1893, 1940), 'utils.load_gz', 'utils.load_gz', (["('data/images_%s.npy.gz' % subset)"], {}), "('data/images_%s.npy.gz' % subset)\n", (1906, 1940), False, 'import utils\n'), ((5131, 5175), 'numpy.zeros', 'np.zeros', (['src_corners.shape'], {'dtype': 'np.double'}), '(src_corners.shape, dtype=np.double)\n', (5139, 5175), True, 'import numpy as np\n'), ((3169, 3218), 'numpy.zeros', 'np.zeros', (['(chunk_size, p_x, p_y)'], {'dtype': '"""float32"""'}), "((chunk_size, p_x, p_y), dtype='float32')\n", (3177, 3218), True, 'import numpy as np\n'), ((3238, 3278), 'numpy.zeros', 'np.zeros', (['(chunk_size,)'], {'dtype': '"""float32"""'}), "((chunk_size,), dtype='float32')\n", (3246, 3278), True, 'import numpy as np\n'), ((3766, 3815), 'numpy.zeros', 'np.zeros', (['(chunk_size, p_x, p_y)'], {'dtype': '"""float32"""'}), "((chunk_size, p_x, p_y), dtype='float32')\n", (3774, 3815), True, 'import numpy as np\n'), ((5064, 5107), 'numpy.array', 'np.array', (['[[1, 1], [1, rows], [cols, rows]]'], {}), '([[1, 1], [1, rows], [cols, rows]])\n', (5072, 5107), True, 'import numpy as np\n'), ((8208, 8217), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (8214, 8217), True, 'import numpy as np\n'), ((10162, 10211), 'numpy.zeros', 'np.zeros', (['(chunk_size, p_x, p_y)'], {'dtype': '"""float32"""'}), "((chunk_size, p_x, p_y), dtype='float32')\n", (10170, 10211), True, 'import numpy as np\n'), ((11664, 11713), 'numpy.zeros', 'np.zeros', (['(chunk_size, p_x, p_y)'], {'dtype': '"""float32"""'}), "((chunk_size, p_x, p_y), dtype='float32')\n", (11672, 11713), True, 'import numpy as np\n'), ((11790, 11832), 'numpy.zeros', 'np.zeros', (['(chunk_size, 2)'], {'dtype': '"""float32"""'}), "((chunk_size, 2), dtype='float32')\n", (11798, 11832), True, 'import numpy as np\n'), ((12586, 12635), 'numpy.zeros', 'np.zeros', (['(chunk_size, p_x, p_y)'], {'dtype': '"""float32"""'}), "((chunk_size, p_x, p_y), dtype='float32')\n", (12594, 12635), True, 'import numpy as np\n'), ((12659, 12701), 'numpy.zeros', 'np.zeros', (['(chunk_size, 2)'], {'dtype': '"""float32"""'}), "((chunk_size, 2), dtype='float32')\n", (12667, 12701), True, 'import numpy as np\n'), ((14342, 14391), 'numpy.zeros', 'np.zeros', (['(chunk_size, p_x, p_y)'], {'dtype': '"""float32"""'}), "((chunk_size, p_x, p_y), dtype='float32')\n", (14350, 14391), True, 'import numpy as np\n'), ((14415, 14457), 'numpy.zeros', 'np.zeros', (['(chunk_size, 2)'], {'dtype': '"""float32"""'}), "((chunk_size, 2), dtype='float32')\n", (14423, 14457), True, 'import numpy as np\n'), ((16643, 16685), 'numpy.zeros', 'np.zeros', (['(chunk_size, 2)'], {'dtype': '"""float32"""'}), "((chunk_size, 2), dtype='float32')\n", (16651, 16685), True, 'import numpy as np\n'), ((18773, 18815), 'numpy.zeros', 'np.zeros', (['(chunk_size, 2)'], {'dtype': '"""float32"""'}), "((chunk_size, 2), dtype='float32')\n", (18781, 18815), True, 'import numpy as np\n'), ((21701, 21750), 'numpy.zeros', 'np.zeros', (['(chunk_size, p_x, p_y)'], {'dtype': '"""float32"""'}), "((chunk_size, p_x, p_y), dtype='float32')\n", (21709, 21750), True, 'import numpy as np\n'), ((21827, 21869), 'numpy.zeros', 'np.zeros', (['(chunk_size, 2)'], {'dtype': '"""float32"""'}), "((chunk_size, 2), dtype='float32')\n", (21835, 21869), True, 'import numpy as np\n'), ((1995, 2012), 'numpy.float32', 'np.float32', (['(255.0)'], {}), '(255.0)\n', (2005, 2012), True, 'import numpy as np\n'), ((6837, 6879), 'numpy.array', 'np.array', (['[image_shape[1], image_shape[0]]'], {}), '([image_shape[1], image_shape[0]])\n', (6845, 6879), True, 'import numpy as np\n'), ((7587, 7607), 'numpy.deg2rad', 'np.deg2rad', (['rotation'], {}), '(rotation)\n', (7597, 7607), True, 'import numpy as np\n'), ((7615, 7632), 'numpy.deg2rad', 'np.deg2rad', (['shear'], {}), '(shear)\n', (7625, 7632), True, 'import numpy as np\n'), ((8335, 8356), 'numpy.log', 'np.log', (['allow_stretch'], {}), '(allow_stretch)\n', (8341, 8356), True, 'import numpy as np\n'), ((16488, 16537), 'numpy.zeros', 'np.zeros', (['(chunk_size, p_x, p_y)'], {'dtype': '"""float32"""'}), "((chunk_size, p_x, p_y), dtype='float32')\n", (16496, 16537), True, 'import numpy as np\n'), ((18671, 18720), 'numpy.zeros', 'np.zeros', (['(chunk_size, p_x, p_y)'], {'dtype': '"""float32"""'}), "((chunk_size, p_x, p_y), dtype='float32')\n", (18679, 18720), True, 'import numpy as np\n'), ((8312, 8333), 'numpy.log', 'np.log', (['allow_stretch'], {}), '(allow_stretch)\n', (8318, 8333), True, 'import numpy as np\n')] |
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
\file Test.py
\brief Code to train a denoiser network.
\copyright Copyright (c) 2019 Visual Computing group of Ulm University,
Germany. See the LICENSE file at the top-level directory of
this distribution.
\author <NAME> (<EMAIL>)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import sys
import math
import time
import argparse
import importlib
import os
from os import listdir
from os.path import isdir, isfile, join
import numpy as np
import pickle
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, 'tf_ops'))
MCCNN_DIR = os.path.join(BASE_DIR, 'MCCNN')
sys.path.append(os.path.join(MCCNN_DIR, 'utils'))
sys.path.append(os.path.join(MCCNN_DIR, 'tf_ops'))
from PyUtils import visualize_progress, save_model
from NoisyDataSet import NoisyDataSet
from tf_ops_module import find_knn, point_to_mesh_distance
def tensors_in_checkpoint_file(fileName):
varlist=[]
reader = pywrap_tensorflow.NewCheckpointReader(fileName)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in sorted(var_to_shape_map):
varlist.append(key)
return varlist
def build_tensors_in_checkpoint_file(loaded_tensors):
full_var_list = dict()
for i, tensor_name in enumerate(loaded_tensors):
try:
tensor_aux = tf.get_default_graph().get_tensor_by_name(tensor_name+":0")
full_var_list[tensor_name] = tensor_aux
except:
pass
return full_var_list
def float_to_color_scale(values, scale = 1.0, color1=np.array([255, 255, 0]), color2=np.array([50, 50, 255])):
valuesColors = []
for currVal in values:
clipVal = min(currVal/scale, 1.0)
color = color1*clipVal + color2*(1.0-clipVal)
valuesColors.append([int(color[0]), int(color[1]), int(color[2])])
return np.array(valuesColors)
current_milli_time = lambda: time.time() * 1000.0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script to evaluate PtNoise2PtNoise model.')
parser.add_argument('--modelsFolder', default='dnTestModels', help='Output folder where to save the denoised point clouds. (default: dnTestModels)')
parser.add_argument('--inTrainedModel', default='log/model.ckpt', help='Input trained model (default: log/model.ckpt)')
parser.add_argument('--model', default='MCModel', help='model (default: MCModel)')
parser.add_argument('--grow', default=64, type=int, help='Grow rate (default: 64)')
parser.add_argument('--numIters', default=10, type=int, help='Number of iterations (default: 10)')
parser.add_argument('--numExecs', default=1, type=int, help='Number executions (default: 1)')
parser.add_argument('--gaussFilter', action='store_true', help='Use gauss filter (default: False)')
parser.add_argument('--clusterError', action='store_true', help='Use the clustering metric (default: False)')
parser.add_argument('--saveModels', action='store_true', help='Save models (default: False)')
parser.add_argument('--noCompError', action='store_true', help='No computation of the error (default: False)')
parser.add_argument('--histogram', action='store_true', help='Create an histogram of the distances (default: False)')
parser.add_argument('--dataset', default=0, type=int, help='Dataset (0:Gaussian, 1:ColoredGaussian, 2:Blensor, 3:RueMadame) (default: 0)')
parser.add_argument('--gpu', default='0', help='GPU (default: 0)')
parser.add_argument('--gpuMem', default=0.5, type=float, help='GPU memory used (default: 0.5)')
args = parser.parse_args()
if args.saveModels:
if not os.path.exists(args.modelsFolder): os.mkdir(args.modelsFolder)
print("Models Folder: "+str(args.modelsFolder))
print("Input trained model: "+str(args.inTrainedModel))
print("Model: "+args.model)
print("Grow: "+str(args.grow))
print("Dataset: "+str(args.dataset))
#Load the model
model = importlib.import_module(args.model)
#Create session
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpuMem, visible_device_list=args.gpu)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
#Create variable and place holders
inPts = tf.placeholder(tf.float32, [None, 3])
inPtsShape = tf.shape(inPts)
inBatchIds = tf.zeros([inPtsShape[0], 1], dtype=tf.int32)
inFeatures = tf.ones([inPtsShape[0], 1], dtype=tf.float32)
if args.clusterError:
inPDPts = tf.placeholder(tf.float32, [None, 3])
inPtsShape2 = tf.shape(inPDPts)
inPDBatchIds = tf.zeros([inPtsShape2[0], 1], dtype=tf.int32)
inPDFeatures = tf.ones([inPtsShape2[0], 1], dtype=tf.float32)
isTraining = tf.placeholder(tf.bool, shape=())
inVertexs = tf.placeholder(tf.float32, [None, 3])
inFaces = tf.placeholder(tf.int32, [None, 3])
inFaceIndexs = tf.placeholder(tf.int32, [None])
inVoxelIndexs = tf.placeholder(tf.int32, [None, None, None, 2])
inAABBMin = tf.placeholder(tf.float32, [3])
inCellSizes = tf.placeholder(tf.float32, [1])
#Create the network.
mPointHierarchyIn = model.create_point_hierarchy_input(inPts, inBatchIds, inFeatures, 1, relRad=False)
mConvBuilder = model.create_convolution_builder(relRad=False)
with tf.variable_scope('Denoiser_scope'):
predDisp = model.create_network_parts(
pointHierarchyIn=mPointHierarchyIn,
convBuilder=mConvBuilder,
features=inFeatures,
numInputFeatures=1,
k=args.grow,
isTraining=isTraining,
dropVal=1.0)
if args.gaussFilter:
lowFreqDisp = model.create_gaussian_conv(
pointHierarchyIn=mPointHierarchyIn,
featuresIn = predDisp,
radius=0.035)
predDisp = predDisp-lowFreqDisp
predPts = inPts+predDisp
distancesGraph, _, _ = point_to_mesh_distance(predPts,
inVertexs, inFaces, inFaceIndexs, inVoxelIndexs, inAABBMin, inCellSizes)
initDistancesGraph, _, _ = point_to_mesh_distance(inPts,
inVertexs, inFaces, inFaceIndexs, inVoxelIndexs, inAABBMin, inCellSizes)
if args.clusterError:
mPointHierarchyClean = model.create_point_hierarchy_output(inPDPts, inPDBatchIds, inPDFeatures, 1, relRad=False)
patchRadius = 0.05
neighPts, _, startIndexs, packedNeighs = model.create_neighborhood(mPointHierarchyIn, mPointHierarchyClean, patchRadius)
knnIndexs = find_knn(neighPts, inPDPts, startIndexs, packedNeighs, 1)
#Create the saver
varsModelNames = tensors_in_checkpoint_file(args.inTrainedModel)
varsModel = build_tensors_in_checkpoint_file(varsModelNames)
print("Loading model: "+args.inTrainedModel)
saver1 = tf.train.Saver(var_list=varsModel)
saver1.restore(sess, args.inTrainedModel)
#Init variables
step = 0
epochStep = 0
np.random.seed(0)#int(time.time()))
#Look for the test files.
mTestNoisyDataSet = NoisyDataSet(args.dataset, False, seed=0)
print("Noisy: "+str(mTestNoisyDataSet.modelList_))
#Process the test files
totalHistogram = np.zeros((20))
modelsError = {}
modelsErrorDist = {}
modelsErrorCluster = {}
mTestNoisyDataSet.begin_epoch()
modelIter = 0
while not(mTestNoisyDataSet.end_epoch()) and modelIter < 200:
initPoints, modelName, modelInstance = mTestNoisyDataSet.get_current_model()
batchIds = [[0] for currPt in initPoints]
features = [[1.0] for currPt in initPoints]
if not(args.noCompError):
initPoints = initPoints[:,0:3]
voxelization = pickle.load(open("NoisyDataSets/TestMeshes/"+modelName+".vox", "rb"))
indexSet = np.array(list(set(voxelization[1].flatten())))
auxPt = voxelization[0][indexSet]
aabbMinVal = np.amin(auxPt, axis=0)
if args.saveModels and not(args.noCompError):
distancesRes = \
sess.run(initDistancesGraph,
{inPts:initPoints,
inBatchIds:batchIds,
inFeatures:features,
inVertexs: voxelization[0],
inFaces: voxelization[1],
inFaceIndexs: voxelization[2],
inVoxelIndexs: voxelization[3],
inAABBMin: aabbMinVal,
inCellSizes: [voxelization[5]],
isTraining: False})
distColors = float_to_color_scale(distancesRes, 0.02)
save_model(args.modelsFolder+"/"+modelName+"_"+modelInstance+"_c",
initPoints, distColors)
elif args.noCompError:
distColors = [[255, 255, 255] for pt in initPoints]
save_model(args.modelsFolder+"/"+modelName+"_"+modelInstance+"_c",
initPoints, distColors)
accumErrors = []
accumErrorsDist = []
accumErrorsCluster = []
lastDistances = None
for execIter in range(args.numExecs):
minError = 10.0
minErrorDist = 0.0
minErrorCluster = 0.0
newPoints = initPoints
for refIter in range(args.numIters):
if not(args.noCompError):
newPoints, distancesRes, predDispRes = \
sess.run([predPts, distancesGraph, predDisp],
{inPts:newPoints,
inBatchIds:batchIds,
inFeatures:features,
inVertexs: voxelization[0],
inFaces: voxelization[1],
inFaceIndexs: voxelization[2],
inVoxelIndexs: voxelization[3],
inAABBMin: aabbMinVal,
inCellSizes: [voxelization[5]],
isTraining: False})
if args.saveModels:
distColors = float_to_color_scale(distancesRes, 0.02)
save_model(args.modelsFolder+"/"+modelName+"_"+modelInstance+"_"+str(refIter),
newPoints, distColors)
distLoss = np.mean(distancesRes)
numNansPts = np.sum(np.isnan(newPoints))
numNansDisp = np.sum(np.isnan(predDispRes))
if numNansPts > 0 or numNansDisp > 0:
print(numNansPts)
print(numNansDisp)
clusterLoss = 0.0
if args.clusterError:
cleanPoints, _, _ = mTestNoisyDataSet.get_current_model(clean=True)
if args.dataset == 4:
cleanPoints = cleanPoints[:,0:3]
knnIndexsRes, neighPtsRes = \
sess.run([knnIndexs, neighPts],
{inPts:newPoints,
inPDPts:cleanPoints,
isTraining: False})
clusterDistList = []
for ptIter, cleanPt in enumerate(cleanPoints):
if knnIndexsRes[ptIter]>=0:
currClusterLoss = np.linalg.norm(neighPtsRes[knnIndexsRes[ptIter]]-cleanPt)
else:
currClusterLoss = patchRadius
clusterDistList.append(currClusterLoss)
clusterLoss += currClusterLoss
clusterLoss = clusterLoss/float(len(cleanPoints))
if args.saveModels:
distColors = float_to_color_scale(clusterDistList, 0.02, color1=np.array([255, 50, 50]), color2=np.array([50, 255, 50]))
save_model(args.modelsFolder+"/"+modelName+"_"+modelInstance+"_"+str(refIter)+"_cluster",
cleanPoints, distColors)
errorValue = clusterLoss + distLoss
print(errorValue)
if errorValue < minError:
minError = errorValue
minErrorDist = distLoss
minErrorCluster = clusterLoss
elif not(args.saveModels):
break
lastDistances = distancesRes
else:
predDispRes, newPoints = \
sess.run([predDisp, predPts],
{inPts:newPoints,
inBatchIds:batchIds,
inFeatures:features,
isTraining: False})
if args.saveModels:
distColors = [[255, 255, 255] for pt in newPoints]
save_model(args.modelsFolder+"/"+modelName+"_"+modelInstance+"_"+str(refIter), newPoints, distColors)
if args.histogram:
currHistogram = np.histogram(lastDistances.flatten(), bins=20)
totalHistogram = totalHistogram+currHistogram[0]
visualize_progress(modelIter, mTestNoisyDataSet.get_num_instances()*args.numExecs,
modelName+"_"+modelInstance+" Error: "+str(minError))
modelIter += 1
accumErrors.append(minError)
accumErrorsDist.append(minErrorDist)
accumErrorsCluster.append(minErrorCluster)
if not(modelInstance in modelsError):
modelsError[modelInstance] = [np.mean(np.array(accumErrors))]
modelsErrorDist[modelInstance] = [np.mean(np.array(accumErrorsDist))]
modelsErrorCluster[modelInstance] = [np.mean(np.array(accumErrorsCluster))]
else:
modelsError[modelInstance].append(np.mean(np.array(accumErrors)))
modelsErrorDist[modelInstance].append(np.mean(np.array(accumErrorsDist)))
modelsErrorCluster[modelInstance].append(np.mean(np.array(accumErrorsCluster)))
mTestNoisyDataSet.next()
totalError = 0.0
totalErrorDist = 0.0
totalErrorCluster = 0.0
print("")
for key, value in modelsError.items():
currError = np.mean(np.array(value))
currErrorDist = np.mean(np.array(modelsErrorDist[key]))
currErrorCluster = np.mean(np.array(modelsErrorCluster[key]))
totalError += currError
totalErrorDist += currErrorDist
totalErrorCluster += currErrorCluster
print("Dist: ("+str(key)+"): "+str(currErrorDist))
print("Cluster: ("+str(key)+"): "+str(currErrorCluster))
print("Error ("+str(key)+"): "+str(currError))
print("")
print("Error Dist: "+str(totalErrorDist/float(len(modelsError.keys()))))
print("Error Cluster: "+str(totalErrorCluster/float(len(modelsError.keys()))))
print("Error: "+str(totalError/float(len(modelsError.keys()))))
print("")
print(totalHistogram)
| [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.amin",
"tf_ops_module.point_to_mesh_distance",
"numpy.isnan",
"tensorflow.ConfigProto",
"numpy.mean",
"numpy.linalg.norm",
"tensorflow.get_default_graph",
"tensorflow.GPUOptions",
"tf_ops_module.find_knn",
"os.path.join",
"... | [((805, 836), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""MCCNN"""'], {}), "(BASE_DIR, 'MCCNN')\n", (817, 836), False, 'import os\n'), ((716, 741), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (731, 741), False, 'import os\n'), ((759, 791), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""tf_ops"""'], {}), "(BASE_DIR, 'tf_ops')\n", (771, 791), False, 'import os\n'), ((853, 885), 'os.path.join', 'os.path.join', (['MCCNN_DIR', '"""utils"""'], {}), "(MCCNN_DIR, 'utils')\n", (865, 885), False, 'import os\n'), ((903, 936), 'os.path.join', 'os.path.join', (['MCCNN_DIR', '"""tf_ops"""'], {}), "(MCCNN_DIR, 'tf_ops')\n", (915, 936), False, 'import os\n'), ((1158, 1205), 'tensorflow.python.pywrap_tensorflow.NewCheckpointReader', 'pywrap_tensorflow.NewCheckpointReader', (['fileName'], {}), '(fileName)\n', (1195, 1205), False, 'from tensorflow.python import pywrap_tensorflow\n'), ((1749, 1772), 'numpy.array', 'np.array', (['[255, 255, 0]'], {}), '([255, 255, 0])\n', (1757, 1772), True, 'import numpy as np\n'), ((1781, 1804), 'numpy.array', 'np.array', (['[50, 50, 255]'], {}), '([50, 50, 255])\n', (1789, 1804), True, 'import numpy as np\n'), ((2038, 2060), 'numpy.array', 'np.array', (['valuesColors'], {}), '(valuesColors)\n', (2046, 2060), True, 'import numpy as np\n'), ((2155, 2240), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script to evaluate PtNoise2PtNoise model."""'}), "(description='Script to evaluate PtNoise2PtNoise model.'\n )\n", (2178, 2240), False, 'import argparse\n'), ((4145, 4180), 'importlib.import_module', 'importlib.import_module', (['args.model'], {}), '(args.model)\n', (4168, 4180), False, 'import importlib\n'), ((4220, 4312), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'args.gpuMem', 'visible_device_list': 'args.gpu'}), '(per_process_gpu_memory_fraction=args.gpuMem,\n visible_device_list=args.gpu)\n', (4233, 4312), True, 'import tensorflow as tf\n'), ((4431, 4468), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 3]'], {}), '(tf.float32, [None, 3])\n', (4445, 4468), True, 'import tensorflow as tf\n'), ((4486, 4501), 'tensorflow.shape', 'tf.shape', (['inPts'], {}), '(inPts)\n', (4494, 4501), True, 'import tensorflow as tf\n'), ((4519, 4563), 'tensorflow.zeros', 'tf.zeros', (['[inPtsShape[0], 1]'], {'dtype': 'tf.int32'}), '([inPtsShape[0], 1], dtype=tf.int32)\n', (4527, 4563), True, 'import tensorflow as tf\n'), ((4581, 4626), 'tensorflow.ones', 'tf.ones', (['[inPtsShape[0], 1]'], {'dtype': 'tf.float32'}), '([inPtsShape[0], 1], dtype=tf.float32)\n', (4588, 4626), True, 'import tensorflow as tf\n'), ((4907, 4940), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '()'}), '(tf.bool, shape=())\n', (4921, 4940), True, 'import tensorflow as tf\n'), ((4957, 4994), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 3]'], {}), '(tf.float32, [None, 3])\n', (4971, 4994), True, 'import tensorflow as tf\n'), ((5009, 5044), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, 3]'], {}), '(tf.int32, [None, 3])\n', (5023, 5044), True, 'import tensorflow as tf\n'), ((5065, 5097), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (5079, 5097), True, 'import tensorflow as tf\n'), ((5118, 5165), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None, None, 2]'], {}), '(tf.int32, [None, None, None, 2])\n', (5132, 5165), True, 'import tensorflow as tf\n'), ((5182, 5213), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[3]'], {}), '(tf.float32, [3])\n', (5196, 5213), True, 'import tensorflow as tf\n'), ((5232, 5263), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1]'], {}), '(tf.float32, [1])\n', (5246, 5263), True, 'import tensorflow as tf\n'), ((6099, 6207), 'tf_ops_module.point_to_mesh_distance', 'point_to_mesh_distance', (['predPts', 'inVertexs', 'inFaces', 'inFaceIndexs', 'inVoxelIndexs', 'inAABBMin', 'inCellSizes'], {}), '(predPts, inVertexs, inFaces, inFaceIndexs,\n inVoxelIndexs, inAABBMin, inCellSizes)\n', (6121, 6207), False, 'from tf_ops_module import find_knn, point_to_mesh_distance\n'), ((6245, 6351), 'tf_ops_module.point_to_mesh_distance', 'point_to_mesh_distance', (['inPts', 'inVertexs', 'inFaces', 'inFaceIndexs', 'inVoxelIndexs', 'inAABBMin', 'inCellSizes'], {}), '(inPts, inVertexs, inFaces, inFaceIndexs,\n inVoxelIndexs, inAABBMin, inCellSizes)\n', (6267, 6351), False, 'from tf_ops_module import find_knn, point_to_mesh_distance\n'), ((6960, 6994), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'varsModel'}), '(var_list=varsModel)\n', (6974, 6994), True, 'import tensorflow as tf\n'), ((7097, 7114), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (7111, 7114), True, 'import numpy as np\n'), ((7188, 7229), 'NoisyDataSet.NoisyDataSet', 'NoisyDataSet', (['args.dataset', '(False)'], {'seed': '(0)'}), '(args.dataset, False, seed=0)\n', (7200, 7229), False, 'from NoisyDataSet import NoisyDataSet\n'), ((7336, 7348), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (7344, 7348), True, 'import numpy as np\n'), ((2091, 2102), 'time.time', 'time.time', ([], {}), '()\n', (2100, 2102), False, 'import time\n'), ((4672, 4709), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 3]'], {}), '(tf.float32, [None, 3])\n', (4686, 4709), True, 'import tensorflow as tf\n'), ((4732, 4749), 'tensorflow.shape', 'tf.shape', (['inPDPts'], {}), '(inPDPts)\n', (4740, 4749), True, 'import tensorflow as tf\n'), ((4773, 4818), 'tensorflow.zeros', 'tf.zeros', (['[inPtsShape2[0], 1]'], {'dtype': 'tf.int32'}), '([inPtsShape2[0], 1], dtype=tf.int32)\n', (4781, 4818), True, 'import tensorflow as tf\n'), ((4842, 4888), 'tensorflow.ones', 'tf.ones', (['[inPtsShape2[0], 1]'], {'dtype': 'tf.float32'}), '([inPtsShape2[0], 1], dtype=tf.float32)\n', (4849, 4888), True, 'import tensorflow as tf\n'), ((5472, 5507), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Denoiser_scope"""'], {}), "('Denoiser_scope')\n", (5489, 5507), True, 'import tensorflow as tf\n'), ((6682, 6739), 'tf_ops_module.find_knn', 'find_knn', (['neighPts', 'inPDPts', 'startIndexs', 'packedNeighs', '(1)'], {}), '(neighPts, inPDPts, startIndexs, packedNeighs, 1)\n', (6690, 6739), False, 'from tf_ops_module import find_knn, point_to_mesh_distance\n'), ((3828, 3861), 'os.path.exists', 'os.path.exists', (['args.modelsFolder'], {}), '(args.modelsFolder)\n', (3842, 3861), False, 'import os\n'), ((3863, 3890), 'os.mkdir', 'os.mkdir', (['args.modelsFolder'], {}), '(args.modelsFolder)\n', (3871, 3890), False, 'import os\n'), ((4338, 4377), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (4352, 4377), True, 'import tensorflow as tf\n'), ((8049, 8071), 'numpy.amin', 'np.amin', (['auxPt'], {'axis': '(0)'}), '(auxPt, axis=0)\n', (8056, 8071), True, 'import numpy as np\n'), ((8703, 8807), 'PyUtils.save_model', 'save_model', (["(args.modelsFolder + '/' + modelName + '_' + modelInstance + '_c')", 'initPoints', 'distColors'], {}), "(args.modelsFolder + '/' + modelName + '_' + modelInstance + '_c',\n initPoints, distColors)\n", (8713, 8807), False, 'from PyUtils import visualize_progress, save_model\n'), ((14508, 14523), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (14516, 14523), True, 'import numpy as np\n'), ((14557, 14587), 'numpy.array', 'np.array', (['modelsErrorDist[key]'], {}), '(modelsErrorDist[key])\n', (14565, 14587), True, 'import numpy as np\n'), ((14624, 14657), 'numpy.array', 'np.array', (['modelsErrorCluster[key]'], {}), '(modelsErrorCluster[key])\n', (14632, 14657), True, 'import numpy as np\n'), ((8918, 9022), 'PyUtils.save_model', 'save_model', (["(args.modelsFolder + '/' + modelName + '_' + modelInstance + '_c')", 'initPoints', 'distColors'], {}), "(args.modelsFolder + '/' + modelName + '_' + modelInstance + '_c',\n initPoints, distColors)\n", (8928, 9022), False, 'from PyUtils import visualize_progress, save_model\n'), ((1525, 1547), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1545, 1547), True, 'import tensorflow as tf\n'), ((10380, 10401), 'numpy.mean', 'np.mean', (['distancesRes'], {}), '(distancesRes)\n', (10387, 10401), True, 'import numpy as np\n'), ((13850, 13871), 'numpy.array', 'np.array', (['accumErrors'], {}), '(accumErrors)\n', (13858, 13871), True, 'import numpy as np\n'), ((13928, 13953), 'numpy.array', 'np.array', (['accumErrorsDist'], {}), '(accumErrorsDist)\n', (13936, 13953), True, 'import numpy as np\n'), ((14013, 14041), 'numpy.array', 'np.array', (['accumErrorsCluster'], {}), '(accumErrorsCluster)\n', (14021, 14041), True, 'import numpy as np\n'), ((14112, 14133), 'numpy.array', 'np.array', (['accumErrors'], {}), '(accumErrors)\n', (14120, 14133), True, 'import numpy as np\n'), ((14194, 14219), 'numpy.array', 'np.array', (['accumErrorsDist'], {}), '(accumErrorsDist)\n', (14202, 14219), True, 'import numpy as np\n'), ((14283, 14311), 'numpy.array', 'np.array', (['accumErrorsCluster'], {}), '(accumErrorsCluster)\n', (14291, 14311), True, 'import numpy as np\n'), ((10443, 10462), 'numpy.isnan', 'np.isnan', (['newPoints'], {}), '(newPoints)\n', (10451, 10462), True, 'import numpy as np\n'), ((10506, 10527), 'numpy.isnan', 'np.isnan', (['predDispRes'], {}), '(predDispRes)\n', (10514, 10527), True, 'import numpy as np\n'), ((11434, 11493), 'numpy.linalg.norm', 'np.linalg.norm', (['(neighPtsRes[knnIndexsRes[ptIter]] - cleanPt)'], {}), '(neighPtsRes[knnIndexsRes[ptIter]] - cleanPt)\n', (11448, 11493), True, 'import numpy as np\n'), ((11950, 11973), 'numpy.array', 'np.array', (['[255, 50, 50]'], {}), '([255, 50, 50])\n', (11958, 11973), True, 'import numpy as np\n'), ((11982, 12005), 'numpy.array', 'np.array', (['[50, 255, 50]'], {}), '([50, 255, 50])\n', (11990, 12005), True, 'import numpy as np\n')] |
# Injector parameter calculation script (like-on-like doublet impingment injector)
# Authors: <NAME>, <NAME>, <NAME>, <NAME>,
# Project Caelus, 04 March 2021
"""
INPUTS:
- mdot = Mass flow rate, kg/sec
- of_ratio = Oxidizer to fuel ratio, dimensionless
- rho_f = Fuel density, kg/m^3
- rho_o = Oxidizer density, kg/m^3
- P_0, Chamber pressure, Pa
- delta_p = Pressure drop across injector, % (of chamber pressure)
- d_o = Starting diameter of oxidizer orifice, mm (1.58)
- d_f = Starting diameter of fuel orifice, mm (1.00)
- Cd_o = Discharge coefficient of oxidizer orifice, dimensionless (0.9)
- Cd_f = Discharge coefficient of fuel orifice, dimensionless (0.88)
- imp_angle = Impingement angle, degrees (60)
OUTPUTS:
- n_o = Number of oxidizer orifices
- d_o = Diameter of oxidizer orifice, mm
- a_o = Area of oxidizer orifice, mm
- L_jet_o = Oxidizer jet length, mm
- L_poi_o = Oxidizer point of impigement distance, mm
- d_com_o = Oxidizer orifice distance (combustor), mm
- d_man_o = Oxidizer orifice distance (manifold), mm
- n_f = Number of fuel orifices
- d_f = Diameter of fuel orifice, mm
- a_f = Area of fuel orifice, mm
- L_jet_f = Fuel jet length, mm
- L_poi_f = Fuel point of impigement distance, mm
- d_com_f = Oxidizer fuel distance (combustor), mm
- d_man_f = Oxidizer fuel distance (manifold), mm
- L_inj = Injector plate thickness
"""
import numpy as np
import os
import sys
from helpers.misc import print_header
def injector_main(data: dict) -> dict:
""" Calculates injector parameters. """
# Process CEA parameters
mdot = data["x_mdot"] # [kg/s] Target total mass flow rate
mdot_o = data["x_mdot_o"] # [kg/s] Target oxidizer mass flow rate
mdot_f = data["x_mdot_f"] # [kg/s] Target fuel mass flow rate
of_ratio = data["of_ratio"]
rho_f = data["rho_f"]
rho_o = data["rho_o"]
P_0 = data["P_0"]
delta_p = data["delta_p"]
d_o = data["min_d_o"]
d_f = data["min_d_f"]
Cd_o = data["ox"]["Cd_injector"]
Cd_f = data["fuel"]["Cd_injector"]
imp_angle = data["imp_angle"]
M = data["M_coeff"]
jet_LD = data["jet_LD"]
orifice_LD = data["orifice_LD"]
"""
First, find the desired total injector area using an estimated Cd and target mass
flow rate (x_mdot), assuming constant density for nitrous oxide. Note that real nitrous
behaves through two-phase flow (both gaseous and liquid), so increasing the injector area
will increase the actual oxidizer mass flow rate.
"""
if data["ox"]["injector_area"] is None or data["fuel"]["injector_area"] is None:
# Total injector area
A_inj_total_o = mdot_o/(Cd_o * np.sqrt(2*rho_o*P_0*(delta_p/100)))
A_inj_total_f = mdot_f/(Cd_f * np.sqrt(2*rho_f*P_0*(delta_p/100)))
else: # Both injector areas are driving parameters
A_inj_total_o = data["ox"]["injector_area"]
A_inj_total_f = data["fuel"]["injector_area"]
# Area of a single orifice [m^2]
a_o = (np.pi * ((d_o/2) * 0.001)**2) # Attempt to use the minimum diameter orifice
a_f = (np.pi * ((d_f/2) * 0.001)**2)
# Number of orifices
n_o = A_inj_total_o / a_o
n_f = A_inj_total_f / a_f
# Want to round down (np.floor()) so that the minimum diameter isn't exceeded
n_o = np.floor(n_o) if np.floor(n_o) % 2 == 0 else np.floor(n_o) - 1
n_f = np.floor(n_f) if np.floor(n_f) % 2 == 0 else np.floor(n_f) - 1
if n_f <= 0:
print_header("The given fuel injector area is too small (minimum orifice diameter exceeded).")
sys.exit(0)
if n_o <= 0:
print_header("The given oxidizer injector area is too small (minimum orifice diameter exceeded).")
sys.exit(0)
# Check to see if maximum number of orifices is exceeded
if n_o > data["max_n_o"]:
n_o = data["max_n_o"]
if n_f > data["max_n_f"]:
n_f = data["max_n_f"]
# Diameter of a single orifice (this is different from d_o after a set n_o is chosen) [mm]
d_o = 2 * np.sqrt((A_inj_total_o/n_o)/np.pi) * 1e03
d_f = 2 * np.sqrt((A_inj_total_f/n_f)/np.pi) * 1e03
# Catch invalid inputs (if it is physically impossible to meet both orifice constraints)
if d_o < data["min_d_o"]:
print_header("The oxidizer minimum orifice diameter/maximum number of orifices is overconstrained.")
sys.exit(0)
if d_f < data["min_d_f"]:
print_header("The fuel minimum orifice diameter/maximum number of orifices is overconstrained.")
sys.exit(0)
# Length of fluid jets [mm]
L_jet_o = jet_LD * d_o
L_jet_f = jet_LD * d_f
# Point of impingment [mm]
L_poi_o = L_jet_o * np.cos(np.deg2rad(imp_angle/2))
L_poi_f = L_jet_f * np.cos(np.deg2rad(imp_angle/2))
# Length (thickness) of injector [mm]
L_inj = orifice_LD * max(d_o, d_f) * np.cos(np.deg2rad(imp_angle / 2))
# Distance between orifice (in an element pair) on combustion chamber side [mm]
d_com_f = 2 * L_jet_f * np.sin(np.deg2rad(imp_angle / 2))
d_com_o = 2 * L_jet_o * np.sin(np.deg2rad(imp_angle / 2))
# Distance between orifice (in a pair) on injector side [mm]
d_man_f = (d_com_o/L_poi_o) * (L_inj + L_poi_o)
d_man_o = (d_com_f/L_poi_f) * (L_inj + L_poi_f)
A_inj_total_o, A_inj_total_f = get_eff_A_inj(data)
data["ox"]["A_inj_o_only"] = data["ox"]["injector_area"] # Only the oxidizer injector area
data["ox"]["A_inj_f_only"] = data["fuel"]["injector_area"] # Only the fuel injector area
data["ox"]["injector_area"] = A_inj_total_o # Effective oxidizer injector area (including Cv)
data["fuel"]["injector_area"] = A_inj_total_f # Effective fuel injector area (including Cv)
data["thrust"] = data["x_thrust"] # For now, assume target thrust is the actual thrust
data["n_o"] = n_o
data["n_f"] = n_f
data["d_o"] = d_o
data["d_f"] = d_f
data["L_jet_o"] = L_jet_o
data["L_jet_f"] = L_jet_f
data["L_poi_o"] = L_poi_o
data["L_poi_f"] = L_poi_f
data["L_inj"] = L_inj
data["d_com_f"] = d_com_f
data["d_com_o"] = d_com_o
data["d_man_f"] = d_man_f
data["d_man_o"] = d_man_o
# Fill remaining parameters
if data["ox"]["V_l"] is None: # Is this the same as using data["prop_mass"]?
data["ox"]["V_l"] = mdot_o*data["x_burn_time"]
if data["fuel"]["V_l"] is None:
data["fuel"]["V_l"] = mdot_f*data["x_burn_time"]
return data
def get_eff_A_inj(data: dict) -> tuple:
"""
Finds effective injector area. Combines injector area with flow coefficient (Cv) values.
Finding the effective CdA throughout the system involves the following steps:
- Find total flow coefficient (Cv)
- Convert Cv into CdA; CdA = Cv/sqrt(2/rho_H2O)
- Cv is in [gallons/min]/sqrt([psi]), need to convert metric
- i.e. convert GPM to m^3/s and psi^-0.5 to Pa^-0.5
- Conversion factor of equation's right-hand-side: 7.59805e-07
- (Try: 1 gpm/(1 psi)^0.5*sqrt(1 kg/m^3) in WolframAlpha)
- So, 7.59805e-07*Cv/sqrt(2/rho_H2O) = CdA [m^2]
- Combine valve's CdA with injector CdA to find effective CdA
"""
# Effective total Cv of fuel valves
cv_eff_ox = total_cv(data["ox"]["valve_cvs"], data["ox"]["cv_type"])
# Effective total Cv of oxidizer valves
cv_eff_fuel = total_cv(data["fuel"]["valve_cvs"], data["fuel"]["cv_type"])
if cv_eff_ox == 0: # User input indicated no propellant valves; simply use A_inj
A_inj_ox = data["ox"]["injector_area"]
else:
A_mpv_ox = cv_eff_ox*7.59805e-07/np.sqrt(2/1000) # % 1000 kg/m^3, density of water
A_inj_ox = 1/np.sqrt((1/(A_mpv_ox**2))+(1/(data["ox"]["injector_area"]**2))) # in [m^2]
if cv_eff_fuel == 0: # User input indicated no propellant valves; simply use A_inj
A_inj_fuel = data["fuel"]["injector_area"]
else:
A_mpv_fuel = cv_eff_fuel*7.59805e-07/np.sqrt(2/1000) # % 1000 kg/m^3, density of water
A_inj_fuel = 1/np.sqrt((1/(A_mpv_fuel**2))+(1/(data["fuel"]["injector_area"]**2))) # in [m^2]
return A_inj_ox, A_inj_fuel
def total_cv(valve_cvs, arg: int) -> float:
if type(valve_cvs) in {float, np.float64, np.float32, int} and valve_cvs > 0:
return valve_cvs
elif type(valve_cvs) in {float, np.float64, np.float32, int} and valve_cvs <= 0:
return 0
if arg == 0: # Valves are arranged in series
if type(valve_cvs) in {list, np.ndarray} and len(valve_cvs) > 0:
temp_sum = 0
for i in range(len(valve_cvs)):
temp_sum += 1/(valve_cvs[i]**2) # Sum of inverse squares
cv = np.sqrt(temp_sum**-1)
return cv
return 0
else: # Valves are arranged in parallel
if type(valve_cvs) in {list, np.ndarray} and len(valve_cvs) > 0:
return np.sum(valve_cvs) # Simply add all Cv values if in parallel
return 0
| [
"numpy.sum",
"numpy.deg2rad",
"numpy.floor",
"helpers.misc.print_header",
"sys.exit",
"numpy.sqrt"
] | [((3363, 3376), 'numpy.floor', 'np.floor', (['n_o'], {}), '(n_o)\n', (3371, 3376), True, 'import numpy as np\n'), ((3436, 3449), 'numpy.floor', 'np.floor', (['n_f'], {}), '(n_f)\n', (3444, 3449), True, 'import numpy as np\n'), ((3524, 3628), 'helpers.misc.print_header', 'print_header', (['"""The given fuel injector area is too small (minimum orifice diameter exceeded)."""'], {}), "(\n 'The given fuel injector area is too small (minimum orifice diameter exceeded).'\n )\n", (3536, 3628), False, 'from helpers.misc import print_header\n'), ((3627, 3638), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3635, 3638), False, 'import sys\n'), ((3664, 3772), 'helpers.misc.print_header', 'print_header', (['"""The given oxidizer injector area is too small (minimum orifice diameter exceeded)."""'], {}), "(\n 'The given oxidizer injector area is too small (minimum orifice diameter exceeded).'\n )\n", (3676, 3772), False, 'from helpers.misc import print_header\n'), ((3771, 3782), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3779, 3782), False, 'import sys\n'), ((4302, 4412), 'helpers.misc.print_header', 'print_header', (['"""The oxidizer minimum orifice diameter/maximum number of orifices is overconstrained."""'], {}), "(\n 'The oxidizer minimum orifice diameter/maximum number of orifices is overconstrained.'\n )\n", (4314, 4412), False, 'from helpers.misc import print_header\n'), ((4411, 4422), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4419, 4422), False, 'import sys\n'), ((4461, 4567), 'helpers.misc.print_header', 'print_header', (['"""The fuel minimum orifice diameter/maximum number of orifices is overconstrained."""'], {}), "(\n 'The fuel minimum orifice diameter/maximum number of orifices is overconstrained.'\n )\n", (4473, 4567), False, 'from helpers.misc import print_header\n'), ((4566, 4577), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4574, 4577), False, 'import sys\n'), ((3408, 3421), 'numpy.floor', 'np.floor', (['n_o'], {}), '(n_o)\n', (3416, 3421), True, 'import numpy as np\n'), ((3481, 3494), 'numpy.floor', 'np.floor', (['n_f'], {}), '(n_f)\n', (3489, 3494), True, 'import numpy as np\n'), ((4073, 4109), 'numpy.sqrt', 'np.sqrt', (['(A_inj_total_o / n_o / np.pi)'], {}), '(A_inj_total_o / n_o / np.pi)\n', (4080, 4109), True, 'import numpy as np\n'), ((4129, 4165), 'numpy.sqrt', 'np.sqrt', (['(A_inj_total_f / n_f / np.pi)'], {}), '(A_inj_total_f / n_f / np.pi)\n', (4136, 4165), True, 'import numpy as np\n'), ((4726, 4751), 'numpy.deg2rad', 'np.deg2rad', (['(imp_angle / 2)'], {}), '(imp_angle / 2)\n', (4736, 4751), True, 'import numpy as np\n'), ((4782, 4807), 'numpy.deg2rad', 'np.deg2rad', (['(imp_angle / 2)'], {}), '(imp_angle / 2)\n', (4792, 4807), True, 'import numpy as np\n'), ((4897, 4922), 'numpy.deg2rad', 'np.deg2rad', (['(imp_angle / 2)'], {}), '(imp_angle / 2)\n', (4907, 4922), True, 'import numpy as np\n'), ((5043, 5068), 'numpy.deg2rad', 'np.deg2rad', (['(imp_angle / 2)'], {}), '(imp_angle / 2)\n', (5053, 5068), True, 'import numpy as np\n'), ((5106, 5131), 'numpy.deg2rad', 'np.deg2rad', (['(imp_angle / 2)'], {}), '(imp_angle / 2)\n', (5116, 5131), True, 'import numpy as np\n'), ((7636, 7653), 'numpy.sqrt', 'np.sqrt', (['(2 / 1000)'], {}), '(2 / 1000)\n', (7643, 7653), True, 'import numpy as np\n'), ((7707, 7772), 'numpy.sqrt', 'np.sqrt', (["(1 / A_mpv_ox ** 2 + 1 / data['ox']['injector_area'] ** 2)"], {}), "(1 / A_mpv_ox ** 2 + 1 / data['ox']['injector_area'] ** 2)\n", (7714, 7772), True, 'import numpy as np\n'), ((7975, 7992), 'numpy.sqrt', 'np.sqrt', (['(2 / 1000)'], {}), '(2 / 1000)\n', (7982, 7992), True, 'import numpy as np\n'), ((8048, 8117), 'numpy.sqrt', 'np.sqrt', (["(1 / A_mpv_fuel ** 2 + 1 / data['fuel']['injector_area'] ** 2)"], {}), "(1 / A_mpv_fuel ** 2 + 1 / data['fuel']['injector_area'] ** 2)\n", (8055, 8117), True, 'import numpy as np\n'), ((8703, 8726), 'numpy.sqrt', 'np.sqrt', (['(temp_sum ** -1)'], {}), '(temp_sum ** -1)\n', (8710, 8726), True, 'import numpy as np\n'), ((8900, 8917), 'numpy.sum', 'np.sum', (['valve_cvs'], {}), '(valve_cvs)\n', (8906, 8917), True, 'import numpy as np\n'), ((2746, 2788), 'numpy.sqrt', 'np.sqrt', (['(2 * rho_o * P_0 * (delta_p / 100))'], {}), '(2 * rho_o * P_0 * (delta_p / 100))\n', (2753, 2788), True, 'import numpy as np\n'), ((2822, 2864), 'numpy.sqrt', 'np.sqrt', (['(2 * rho_f * P_0 * (delta_p / 100))'], {}), '(2 * rho_f * P_0 * (delta_p / 100))\n', (2829, 2864), True, 'import numpy as np\n'), ((3380, 3393), 'numpy.floor', 'np.floor', (['n_o'], {}), '(n_o)\n', (3388, 3393), True, 'import numpy as np\n'), ((3453, 3466), 'numpy.floor', 'np.floor', (['n_f'], {}), '(n_f)\n', (3461, 3466), True, 'import numpy as np\n')] |
import numpy as np
# # Performs interpolation along a Bezier curve
# # p0, p1, p2, and p3 are 2D coordinates
# # t is a time value from 0 to 1
# def bezier_interp(p0, p1, p2, p3, t):
# l01 = p1 - p0
# l12 = p2 - p1
# l23 = p3 - p2
# p01 = l01 * t + p0
# p12 = l12 * t + p1
# p23 = l23 * t + p2
# l02 = p12 - p01
# l13 = p23 - p12
# p02 = l02 * t + p01
# p13 = l13 * t + p12
# l03 = p13 - p02
# p = l03 * t + p02
# return p
def linear_interp(p0, p1, t):
print(t)
print(p0)
print(p1)
return (1-t)*p0 + t*p1
def quadratic_interp(p0, p1, p2, t):
term1 = (1-t) ** 2 * p0
term2 = 2 * (1-t) * p1
term3 = t ** 2 * p2
return term1 + term2 + term3
def cubic_interp(p0, p1, p2, p3, t):
term1 = (1-t) ** 3 * p0
term2 = 3 * (1-t) ** 2 * t * p1
term3 = 3 * (1-t) * t ** 2 * p2
term4 = t ** 3 * p3
return term1 + term2 + term3 + term4
def interp_frames(p0, p1, numFrames):
# frames = np.fromfunction(lambda i: linear_interp(p0,p1,i/numFrames), (numFrames,), dtype=int)
frames = np.zeros((numFrames,4))
for i in range(numFrames):
frames[i] = linear_interp(p0,p1,i/numFrames)
print(frames.shape)
return frames
def interp_entire_video(knownPts, numFrames):
iPts = np.zeros((numFrames,4))
for i in range(len(knownPts)-1):
curFrame = knownPts[i][1]
curRect = knownPts[i][0]
nextFrame = knownPts[i+1][1]
nextRect = knownPts[i+1][0]
print('cuframe %d next frame %d' % (curFrame, nextFrame))
if i == 0 and curFrame > 0:
iPts[0:curFrame] = np.repeat(curRect,curFrame)
print(iPts[curFrame:nextFrame].shape)
if curFrame == nextFrame - 1:
iPts[curFrame] = curRect
else:
iPts[curFrame:nextFrame] = interp_frames(curRect, nextRect, nextFrame - curFrame)
return iPts | [
"numpy.zeros",
"numpy.repeat"
] | [((1086, 1110), 'numpy.zeros', 'np.zeros', (['(numFrames, 4)'], {}), '((numFrames, 4))\n', (1094, 1110), True, 'import numpy as np\n'), ((1294, 1318), 'numpy.zeros', 'np.zeros', (['(numFrames, 4)'], {}), '((numFrames, 4))\n', (1302, 1318), True, 'import numpy as np\n'), ((1636, 1664), 'numpy.repeat', 'np.repeat', (['curRect', 'curFrame'], {}), '(curRect, curFrame)\n', (1645, 1664), True, 'import numpy as np\n')] |
"""Tests ellipse_fit with satellites. Compatible with pytest."""
import pytest
import numpy as np
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from util.new_tle_kep_state import tle_to_state
from util.rkf5 import rkf5
from kep_determination.ellipse_fit import determine_kep
def test_ellipse_fit():
"""Tests ellipse fit with 8 satellites:
* NOAA-1
* GPS-23
* Cryosat-2
* NOAA-15
* NOAA-18
* NOAA-19
* MOLNIYA 2-10
* ISS
To add your own test copy the template, put the 2nd row of the TLE of the satellite
in place of kep. In the rkf5 line put the final time and time step such that 700±200
points are generated. Now, put the actual orbital parameters in the assert statements.
Args:
NIL
Returns:
NIL
"""
#noaa-1
tle = np.array([101.7540, 195.7370, 0.0031531, 352.8640, 117.2610, 12.53984625169364])
r = tle_to_state(tle)
_,vecs = rkf5(0,7200,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(7826.006538, 0.1) # sma
assert kep[1] == pytest.approx(0.0031531, 0.01) # ecc
assert kep[2] == pytest.approx(101.7540, 0.1) # inc
assert kep[3] == pytest.approx(352.8640, 1.0) # argp
assert kep[4] == pytest.approx(195.7370, 0.1) # raan
assert kep[5] == pytest.approx(117.2610, 0.5) # true_anom
#gps-23
tle = np.array([54.4058, 84.8417, 0.0142955, 74.4543, 193.5934, 2.00565117179872])
r = tle_to_state(tle)
_,vecs = rkf5(0,43080,50,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(26560.21419, 0.1) # sma
assert kep[1] == pytest.approx(0.0142955, 0.01) # ecc
assert kep[2] == pytest.approx(54.4058, 0.1) # inc
assert kep[3] == pytest.approx(74.4543, 1.0) # argp
assert kep[4] == pytest.approx(84.8417, 0.1) # raan
assert kep[5] == pytest.approx(193.5934, 0.5) # true_anom
#cryosat-2
tle = np.array([92.0287, 282.8216, 0.0005088, 298.0188, 62.0505, 14.52172969429489])
r = tle_to_state(tle)
_,vecs = rkf5(0,5950,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(7096.69719, 0.1) # sma
assert kep[1] == pytest.approx(0.0005088, 0.01) # ecc
assert kep[2] == pytest.approx(92.0287, 0.1) # inc
assert kep[3] == pytest.approx(298.0188, 1.0) # argp
assert kep[4] == pytest.approx(282.8216, 0.1) # raan
assert kep[5] == pytest.approx(62.0505, 0.5) # true_anom
#noaa-15
tle = np.array([98.7705, 158.2195, 0.0009478, 307.8085, 52.2235, 14.25852803])
r = tle_to_state(tle)
_,vecs = rkf5(0,6120,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(7183.76381, 0.1) # sma
assert kep[1] == pytest.approx(0.0009478, 0.01) # ecc
assert kep[2] == pytest.approx(98.7705, 0.1) # inc
assert kep[3] == pytest.approx(307.8085, 1.0) # argp
assert kep[4] == pytest.approx(158.2195, 0.1) # raan
assert kep[5] == pytest.approx(52.2235, 0.5) # true_anom
#noaa-18
tle = np.array([99.1472, 176.6654, 0.0014092, 197.4778, 162.5909, 14.12376102669957])
r = tle_to_state(tle)
_,vecs = rkf5(0,6120,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(7229.38911, 0.1) # sma
assert kep[1] == pytest.approx(0.0014092, 0.01) # ecc
assert kep[2] == pytest.approx(99.1472, 0.1) # inc
assert kep[3] == pytest.approx(197.4778, 1.0) # argp
assert kep[4] == pytest.approx(176.6654, 0.1) # raan
assert kep[5] == pytest.approx(162.5909, 0.5) # true_anom
#noaa-19
tle = np.array([99.1401, 119.3629, 0.0014753, 44.0001, 316.2341, 14.12279464478196])
r = tle_to_state(tle)
_,vecs = rkf5(0,6120,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(7229.71889, 0.1) # sma
assert kep[1] == pytest.approx(0.0014753, 0.01) # ecc
assert kep[2] == pytest.approx(99.1401, 0.1) # inc
assert kep[3] == pytest.approx(44.0001, 1.0) # argp
assert kep[4] == pytest.approx(119.3629, 0.1) # raan
assert kep[5] == pytest.approx(316.2341, 0.5) # true_anom
#molniya 2-10
tle = np.array([63.2749, 254.2968, 0.7151443, 294.4926, 9.2905, 2.01190064320534])
r = tle_to_state(tle)
_,vecs = rkf5(0,43000,50,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(26505.1836, 0.1) # sma
assert kep[1] == pytest.approx(0.7151443, 0.01) # ecc
assert kep[2] == pytest.approx(63.2749, 0.1) # inc
assert kep[3] == pytest.approx(294.4926, 1.0) # argp
assert kep[4] == pytest.approx(254.2968, 0.1) # raan
assert kep[5] == pytest.approx(65.56742, 0.5) # true_anom
#ISS
tle = np.array([51.6402, 150.4026, 0.0004084, 108.2140, 238.0528, 15.54082454114406])
r = tle_to_state(tle)
_,vecs = rkf5(0,5560,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(6782.95812, 0.1) # sma
assert kep[1] == pytest.approx(0.0004084, 0.01) # ecc
assert kep[2] == pytest.approx(51.6402, 0.1) # inc
assert kep[3] == pytest.approx(108.2140, 1.0) # argp
assert kep[4] == pytest.approx(150.4026, 0.1) # raan
assert kep[5] == pytest.approx(238.0528, 0.5) # true_anom
| [
"util.rkf5.rkf5",
"kep_determination.ellipse_fit.determine_kep",
"numpy.insert",
"util.new_tle_kep_state.tle_to_state",
"numpy.array",
"numpy.reshape",
"pytest.approx"
] | [((923, 999), 'numpy.array', 'np.array', (['[101.754, 195.737, 0.0031531, 352.864, 117.261, 12.53984625169364]'], {}), '([101.754, 195.737, 0.0031531, 352.864, 117.261, 12.53984625169364])\n', (931, 999), True, 'import numpy as np\n'), ((1012, 1029), 'util.new_tle_kep_state.tle_to_state', 'tle_to_state', (['tle'], {}), '(tle)\n', (1024, 1029), False, 'from util.new_tle_kep_state import tle_to_state\n'), ((1043, 1063), 'util.rkf5.rkf5', 'rkf5', (['(0)', '(7200)', '(10)', 'r'], {}), '(0, 7200, 10, r)\n', (1047, 1063), False, 'from util.rkf5 import rkf5\n'), ((1069, 1090), 'numpy.reshape', 'np.reshape', (['r', '(1, 6)'], {}), '(r, (1, 6))\n', (1079, 1090), True, 'import numpy as np\n'), ((1100, 1129), 'numpy.insert', 'np.insert', (['vecs', '(0)', 'r'], {'axis': '(0)'}), '(vecs, 0, r, axis=0)\n', (1109, 1129), True, 'import numpy as np\n'), ((1163, 1182), 'kep_determination.ellipse_fit.determine_kep', 'determine_kep', (['vecs'], {}), '(vecs)\n', (1176, 1182), False, 'from kep_determination.ellipse_fit import determine_kep\n'), ((1586, 1662), 'numpy.array', 'np.array', (['[54.4058, 84.8417, 0.0142955, 74.4543, 193.5934, 2.00565117179872]'], {}), '([54.4058, 84.8417, 0.0142955, 74.4543, 193.5934, 2.00565117179872])\n', (1594, 1662), True, 'import numpy as np\n'), ((1671, 1688), 'util.new_tle_kep_state.tle_to_state', 'tle_to_state', (['tle'], {}), '(tle)\n', (1683, 1688), False, 'from util.new_tle_kep_state import tle_to_state\n'), ((1702, 1723), 'util.rkf5.rkf5', 'rkf5', (['(0)', '(43080)', '(50)', 'r'], {}), '(0, 43080, 50, r)\n', (1706, 1723), False, 'from util.rkf5 import rkf5\n'), ((1729, 1750), 'numpy.reshape', 'np.reshape', (['r', '(1, 6)'], {}), '(r, (1, 6))\n', (1739, 1750), True, 'import numpy as np\n'), ((1760, 1789), 'numpy.insert', 'np.insert', (['vecs', '(0)', 'r'], {'axis': '(0)'}), '(vecs, 0, r, axis=0)\n', (1769, 1789), True, 'import numpy as np\n'), ((1823, 1842), 'kep_determination.ellipse_fit.determine_kep', 'determine_kep', (['vecs'], {}), '(vecs)\n', (1836, 1842), False, 'from kep_determination.ellipse_fit import determine_kep\n'), ((2249, 2327), 'numpy.array', 'np.array', (['[92.0287, 282.8216, 0.0005088, 298.0188, 62.0505, 14.52172969429489]'], {}), '([92.0287, 282.8216, 0.0005088, 298.0188, 62.0505, 14.52172969429489])\n', (2257, 2327), True, 'import numpy as np\n'), ((2336, 2353), 'util.new_tle_kep_state.tle_to_state', 'tle_to_state', (['tle'], {}), '(tle)\n', (2348, 2353), False, 'from util.new_tle_kep_state import tle_to_state\n'), ((2367, 2387), 'util.rkf5.rkf5', 'rkf5', (['(0)', '(5950)', '(10)', 'r'], {}), '(0, 5950, 10, r)\n', (2371, 2387), False, 'from util.rkf5 import rkf5\n'), ((2393, 2414), 'numpy.reshape', 'np.reshape', (['r', '(1, 6)'], {}), '(r, (1, 6))\n', (2403, 2414), True, 'import numpy as np\n'), ((2424, 2453), 'numpy.insert', 'np.insert', (['vecs', '(0)', 'r'], {'axis': '(0)'}), '(vecs, 0, r, axis=0)\n', (2433, 2453), True, 'import numpy as np\n'), ((2487, 2506), 'kep_determination.ellipse_fit.determine_kep', 'determine_kep', (['vecs'], {}), '(vecs)\n', (2500, 2506), False, 'from kep_determination.ellipse_fit import determine_kep\n'), ((2911, 2983), 'numpy.array', 'np.array', (['[98.7705, 158.2195, 0.0009478, 307.8085, 52.2235, 14.25852803]'], {}), '([98.7705, 158.2195, 0.0009478, 307.8085, 52.2235, 14.25852803])\n', (2919, 2983), True, 'import numpy as np\n'), ((2992, 3009), 'util.new_tle_kep_state.tle_to_state', 'tle_to_state', (['tle'], {}), '(tle)\n', (3004, 3009), False, 'from util.new_tle_kep_state import tle_to_state\n'), ((3023, 3043), 'util.rkf5.rkf5', 'rkf5', (['(0)', '(6120)', '(10)', 'r'], {}), '(0, 6120, 10, r)\n', (3027, 3043), False, 'from util.rkf5 import rkf5\n'), ((3049, 3070), 'numpy.reshape', 'np.reshape', (['r', '(1, 6)'], {}), '(r, (1, 6))\n', (3059, 3070), True, 'import numpy as np\n'), ((3080, 3109), 'numpy.insert', 'np.insert', (['vecs', '(0)', 'r'], {'axis': '(0)'}), '(vecs, 0, r, axis=0)\n', (3089, 3109), True, 'import numpy as np\n'), ((3143, 3162), 'kep_determination.ellipse_fit.determine_kep', 'determine_kep', (['vecs'], {}), '(vecs)\n', (3156, 3162), False, 'from kep_determination.ellipse_fit import determine_kep\n'), ((3567, 3646), 'numpy.array', 'np.array', (['[99.1472, 176.6654, 0.0014092, 197.4778, 162.5909, 14.12376102669957]'], {}), '([99.1472, 176.6654, 0.0014092, 197.4778, 162.5909, 14.12376102669957])\n', (3575, 3646), True, 'import numpy as np\n'), ((3655, 3672), 'util.new_tle_kep_state.tle_to_state', 'tle_to_state', (['tle'], {}), '(tle)\n', (3667, 3672), False, 'from util.new_tle_kep_state import tle_to_state\n'), ((3686, 3706), 'util.rkf5.rkf5', 'rkf5', (['(0)', '(6120)', '(10)', 'r'], {}), '(0, 6120, 10, r)\n', (3690, 3706), False, 'from util.rkf5 import rkf5\n'), ((3712, 3733), 'numpy.reshape', 'np.reshape', (['r', '(1, 6)'], {}), '(r, (1, 6))\n', (3722, 3733), True, 'import numpy as np\n'), ((3743, 3772), 'numpy.insert', 'np.insert', (['vecs', '(0)', 'r'], {'axis': '(0)'}), '(vecs, 0, r, axis=0)\n', (3752, 3772), True, 'import numpy as np\n'), ((3806, 3825), 'kep_determination.ellipse_fit.determine_kep', 'determine_kep', (['vecs'], {}), '(vecs)\n', (3819, 3825), False, 'from kep_determination.ellipse_fit import determine_kep\n'), ((4230, 4308), 'numpy.array', 'np.array', (['[99.1401, 119.3629, 0.0014753, 44.0001, 316.2341, 14.12279464478196]'], {}), '([99.1401, 119.3629, 0.0014753, 44.0001, 316.2341, 14.12279464478196])\n', (4238, 4308), True, 'import numpy as np\n'), ((4317, 4334), 'util.new_tle_kep_state.tle_to_state', 'tle_to_state', (['tle'], {}), '(tle)\n', (4329, 4334), False, 'from util.new_tle_kep_state import tle_to_state\n'), ((4348, 4368), 'util.rkf5.rkf5', 'rkf5', (['(0)', '(6120)', '(10)', 'r'], {}), '(0, 6120, 10, r)\n', (4352, 4368), False, 'from util.rkf5 import rkf5\n'), ((4374, 4395), 'numpy.reshape', 'np.reshape', (['r', '(1, 6)'], {}), '(r, (1, 6))\n', (4384, 4395), True, 'import numpy as np\n'), ((4405, 4434), 'numpy.insert', 'np.insert', (['vecs', '(0)', 'r'], {'axis': '(0)'}), '(vecs, 0, r, axis=0)\n', (4414, 4434), True, 'import numpy as np\n'), ((4468, 4487), 'kep_determination.ellipse_fit.determine_kep', 'determine_kep', (['vecs'], {}), '(vecs)\n', (4481, 4487), False, 'from kep_determination.ellipse_fit import determine_kep\n'), ((4891, 4967), 'numpy.array', 'np.array', (['[63.2749, 254.2968, 0.7151443, 294.4926, 9.2905, 2.01190064320534]'], {}), '([63.2749, 254.2968, 0.7151443, 294.4926, 9.2905, 2.01190064320534])\n', (4899, 4967), True, 'import numpy as np\n'), ((4976, 4993), 'util.new_tle_kep_state.tle_to_state', 'tle_to_state', (['tle'], {}), '(tle)\n', (4988, 4993), False, 'from util.new_tle_kep_state import tle_to_state\n'), ((5007, 5028), 'util.rkf5.rkf5', 'rkf5', (['(0)', '(43000)', '(50)', 'r'], {}), '(0, 43000, 50, r)\n', (5011, 5028), False, 'from util.rkf5 import rkf5\n'), ((5034, 5055), 'numpy.reshape', 'np.reshape', (['r', '(1, 6)'], {}), '(r, (1, 6))\n', (5044, 5055), True, 'import numpy as np\n'), ((5065, 5094), 'numpy.insert', 'np.insert', (['vecs', '(0)', 'r'], {'axis': '(0)'}), '(vecs, 0, r, axis=0)\n', (5074, 5094), True, 'import numpy as np\n'), ((5128, 5147), 'kep_determination.ellipse_fit.determine_kep', 'determine_kep', (['vecs'], {}), '(vecs)\n', (5141, 5147), False, 'from kep_determination.ellipse_fit import determine_kep\n'), ((5548, 5626), 'numpy.array', 'np.array', (['[51.6402, 150.4026, 0.0004084, 108.214, 238.0528, 15.54082454114406]'], {}), '([51.6402, 150.4026, 0.0004084, 108.214, 238.0528, 15.54082454114406])\n', (5556, 5626), True, 'import numpy as np\n'), ((5636, 5653), 'util.new_tle_kep_state.tle_to_state', 'tle_to_state', (['tle'], {}), '(tle)\n', (5648, 5653), False, 'from util.new_tle_kep_state import tle_to_state\n'), ((5667, 5687), 'util.rkf5.rkf5', 'rkf5', (['(0)', '(5560)', '(10)', 'r'], {}), '(0, 5560, 10, r)\n', (5671, 5687), False, 'from util.rkf5 import rkf5\n'), ((5693, 5714), 'numpy.reshape', 'np.reshape', (['r', '(1, 6)'], {}), '(r, (1, 6))\n', (5703, 5714), True, 'import numpy as np\n'), ((5724, 5753), 'numpy.insert', 'np.insert', (['vecs', '(0)', 'r'], {'axis': '(0)'}), '(vecs, 0, r, axis=0)\n', (5733, 5753), True, 'import numpy as np\n'), ((5787, 5806), 'kep_determination.ellipse_fit.determine_kep', 'determine_kep', (['vecs'], {}), '(vecs)\n', (5800, 5806), False, 'from kep_determination.ellipse_fit import determine_kep\n'), ((1204, 1235), 'pytest.approx', 'pytest.approx', (['(7826.006538)', '(0.1)'], {}), '(7826.006538, 0.1)\n', (1217, 1235), False, 'import pytest\n'), ((1266, 1296), 'pytest.approx', 'pytest.approx', (['(0.0031531)', '(0.01)'], {}), '(0.0031531, 0.01)\n', (1279, 1296), False, 'import pytest\n'), ((1328, 1355), 'pytest.approx', 'pytest.approx', (['(101.754)', '(0.1)'], {}), '(101.754, 0.1)\n', (1341, 1355), False, 'import pytest\n'), ((1390, 1417), 'pytest.approx', 'pytest.approx', (['(352.864)', '(1.0)'], {}), '(352.864, 1.0)\n', (1403, 1417), False, 'import pytest\n'), ((1453, 1480), 'pytest.approx', 'pytest.approx', (['(195.737)', '(0.1)'], {}), '(195.737, 0.1)\n', (1466, 1480), False, 'import pytest\n'), ((1516, 1543), 'pytest.approx', 'pytest.approx', (['(117.261)', '(0.5)'], {}), '(117.261, 0.5)\n', (1529, 1543), False, 'import pytest\n'), ((1864, 1895), 'pytest.approx', 'pytest.approx', (['(26560.21419)', '(0.1)'], {}), '(26560.21419, 0.1)\n', (1877, 1895), False, 'import pytest\n'), ((1926, 1956), 'pytest.approx', 'pytest.approx', (['(0.0142955)', '(0.01)'], {}), '(0.0142955, 0.01)\n', (1939, 1956), False, 'import pytest\n'), ((1988, 2015), 'pytest.approx', 'pytest.approx', (['(54.4058)', '(0.1)'], {}), '(54.4058, 0.1)\n', (2001, 2015), False, 'import pytest\n'), ((2050, 2077), 'pytest.approx', 'pytest.approx', (['(74.4543)', '(1.0)'], {}), '(74.4543, 1.0)\n', (2063, 2077), False, 'import pytest\n'), ((2113, 2140), 'pytest.approx', 'pytest.approx', (['(84.8417)', '(0.1)'], {}), '(84.8417, 0.1)\n', (2126, 2140), False, 'import pytest\n'), ((2176, 2204), 'pytest.approx', 'pytest.approx', (['(193.5934)', '(0.5)'], {}), '(193.5934, 0.5)\n', (2189, 2204), False, 'import pytest\n'), ((2528, 2558), 'pytest.approx', 'pytest.approx', (['(7096.69719)', '(0.1)'], {}), '(7096.69719, 0.1)\n', (2541, 2558), False, 'import pytest\n'), ((2590, 2620), 'pytest.approx', 'pytest.approx', (['(0.0005088)', '(0.01)'], {}), '(0.0005088, 0.01)\n', (2603, 2620), False, 'import pytest\n'), ((2652, 2679), 'pytest.approx', 'pytest.approx', (['(92.0287)', '(0.1)'], {}), '(92.0287, 0.1)\n', (2665, 2679), False, 'import pytest\n'), ((2714, 2742), 'pytest.approx', 'pytest.approx', (['(298.0188)', '(1.0)'], {}), '(298.0188, 1.0)\n', (2727, 2742), False, 'import pytest\n'), ((2777, 2805), 'pytest.approx', 'pytest.approx', (['(282.8216)', '(0.1)'], {}), '(282.8216, 0.1)\n', (2790, 2805), False, 'import pytest\n'), ((2840, 2867), 'pytest.approx', 'pytest.approx', (['(62.0505)', '(0.5)'], {}), '(62.0505, 0.5)\n', (2853, 2867), False, 'import pytest\n'), ((3184, 3214), 'pytest.approx', 'pytest.approx', (['(7183.76381)', '(0.1)'], {}), '(7183.76381, 0.1)\n', (3197, 3214), False, 'import pytest\n'), ((3246, 3276), 'pytest.approx', 'pytest.approx', (['(0.0009478)', '(0.01)'], {}), '(0.0009478, 0.01)\n', (3259, 3276), False, 'import pytest\n'), ((3308, 3335), 'pytest.approx', 'pytest.approx', (['(98.7705)', '(0.1)'], {}), '(98.7705, 0.1)\n', (3321, 3335), False, 'import pytest\n'), ((3370, 3398), 'pytest.approx', 'pytest.approx', (['(307.8085)', '(1.0)'], {}), '(307.8085, 1.0)\n', (3383, 3398), False, 'import pytest\n'), ((3433, 3461), 'pytest.approx', 'pytest.approx', (['(158.2195)', '(0.1)'], {}), '(158.2195, 0.1)\n', (3446, 3461), False, 'import pytest\n'), ((3496, 3523), 'pytest.approx', 'pytest.approx', (['(52.2235)', '(0.5)'], {}), '(52.2235, 0.5)\n', (3509, 3523), False, 'import pytest\n'), ((3847, 3877), 'pytest.approx', 'pytest.approx', (['(7229.38911)', '(0.1)'], {}), '(7229.38911, 0.1)\n', (3860, 3877), False, 'import pytest\n'), ((3909, 3939), 'pytest.approx', 'pytest.approx', (['(0.0014092)', '(0.01)'], {}), '(0.0014092, 0.01)\n', (3922, 3939), False, 'import pytest\n'), ((3971, 3998), 'pytest.approx', 'pytest.approx', (['(99.1472)', '(0.1)'], {}), '(99.1472, 0.1)\n', (3984, 3998), False, 'import pytest\n'), ((4033, 4061), 'pytest.approx', 'pytest.approx', (['(197.4778)', '(1.0)'], {}), '(197.4778, 1.0)\n', (4046, 4061), False, 'import pytest\n'), ((4096, 4124), 'pytest.approx', 'pytest.approx', (['(176.6654)', '(0.1)'], {}), '(176.6654, 0.1)\n', (4109, 4124), False, 'import pytest\n'), ((4159, 4187), 'pytest.approx', 'pytest.approx', (['(162.5909)', '(0.5)'], {}), '(162.5909, 0.5)\n', (4172, 4187), False, 'import pytest\n'), ((4509, 4539), 'pytest.approx', 'pytest.approx', (['(7229.71889)', '(0.1)'], {}), '(7229.71889, 0.1)\n', (4522, 4539), False, 'import pytest\n'), ((4570, 4600), 'pytest.approx', 'pytest.approx', (['(0.0014753)', '(0.01)'], {}), '(0.0014753, 0.01)\n', (4583, 4600), False, 'import pytest\n'), ((4631, 4658), 'pytest.approx', 'pytest.approx', (['(99.1401)', '(0.1)'], {}), '(99.1401, 0.1)\n', (4644, 4658), False, 'import pytest\n'), ((4692, 4719), 'pytest.approx', 'pytest.approx', (['(44.0001)', '(1.0)'], {}), '(44.0001, 1.0)\n', (4705, 4719), False, 'import pytest\n'), ((4754, 4782), 'pytest.approx', 'pytest.approx', (['(119.3629)', '(0.1)'], {}), '(119.3629, 0.1)\n', (4767, 4782), False, 'import pytest\n'), ((4816, 4844), 'pytest.approx', 'pytest.approx', (['(316.2341)', '(0.5)'], {}), '(316.2341, 0.5)\n', (4829, 4844), False, 'import pytest\n'), ((5169, 5199), 'pytest.approx', 'pytest.approx', (['(26505.1836)', '(0.1)'], {}), '(26505.1836, 0.1)\n', (5182, 5199), False, 'import pytest\n'), ((5231, 5261), 'pytest.approx', 'pytest.approx', (['(0.7151443)', '(0.01)'], {}), '(0.7151443, 0.01)\n', (5244, 5261), False, 'import pytest\n'), ((5293, 5320), 'pytest.approx', 'pytest.approx', (['(63.2749)', '(0.1)'], {}), '(63.2749, 0.1)\n', (5306, 5320), False, 'import pytest\n'), ((5355, 5383), 'pytest.approx', 'pytest.approx', (['(294.4926)', '(1.0)'], {}), '(294.4926, 1.0)\n', (5368, 5383), False, 'import pytest\n'), ((5418, 5446), 'pytest.approx', 'pytest.approx', (['(254.2968)', '(0.1)'], {}), '(254.2968, 0.1)\n', (5431, 5446), False, 'import pytest\n'), ((5481, 5509), 'pytest.approx', 'pytest.approx', (['(65.56742)', '(0.5)'], {}), '(65.56742, 0.5)\n', (5494, 5509), False, 'import pytest\n'), ((5828, 5858), 'pytest.approx', 'pytest.approx', (['(6782.95812)', '(0.1)'], {}), '(6782.95812, 0.1)\n', (5841, 5858), False, 'import pytest\n'), ((5890, 5920), 'pytest.approx', 'pytest.approx', (['(0.0004084)', '(0.01)'], {}), '(0.0004084, 0.01)\n', (5903, 5920), False, 'import pytest\n'), ((5952, 5979), 'pytest.approx', 'pytest.approx', (['(51.6402)', '(0.1)'], {}), '(51.6402, 0.1)\n', (5965, 5979), False, 'import pytest\n'), ((6014, 6041), 'pytest.approx', 'pytest.approx', (['(108.214)', '(1.0)'], {}), '(108.214, 1.0)\n', (6027, 6041), False, 'import pytest\n'), ((6077, 6105), 'pytest.approx', 'pytest.approx', (['(150.4026)', '(0.1)'], {}), '(150.4026, 0.1)\n', (6090, 6105), False, 'import pytest\n'), ((6140, 6168), 'pytest.approx', 'pytest.approx', (['(238.0528)', '(0.5)'], {}), '(238.0528, 0.5)\n', (6153, 6168), False, 'import pytest\n')] |
import csv
import numpy as np
def getDataSource(data_path):
marks_in_percentage=[]
days_present=[]
with open(data_path) as csv_file:
csv_reader=csv.DictReader(csv_file)
for row in csv_reader:
marks_in_percentage.append(float(row["marksinpercentage"]))
days_present.append(float(row["dayspresent"]))
return {"x":marks_in_percentage,"y":days_present}
def findCorrelation(datasource):
correlation=np.corrcoef(datasource["x"],datasource["y"])
print("correlation between marks in percentage and days and present:- \n--->",correlation[0,1])
def setup():
data_path= "./data/Student Marks vs Days Present.csv"
datasource=getDataSource(data_path)
findCorrelation(datasource)
setup() | [
"csv.DictReader",
"numpy.corrcoef"
] | [((455, 500), 'numpy.corrcoef', 'np.corrcoef', (["datasource['x']", "datasource['y']"], {}), "(datasource['x'], datasource['y'])\n", (466, 500), True, 'import numpy as np\n'), ((164, 188), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (178, 188), False, 'import csv\n')] |
import numpy as np
from scipy.special import digamma
from scipy.special import gamma
import warnings
warnings.filterwarnings("error")
from utility import compute_normalized_volumes
class NNSingleFunctionalEstimator:
def __init__(self, ks=None, alphas=None, beta=0.):
"""
Parameters
----------
ks: np.array
array of k values
alphas: np.array
array of alpha values
beta: float
"""
self.ks = np.array(ks)
self.alphas = alphas
self.beta = beta
self.functional_names = \
[r'Shannon entropy'] + \
[r'{}-entropy'.format(alpha) for alpha in alphas] + \
[r'Logarithmic ${}$-entropy'.format(alpha) for alpha in alphas] + \
[r'Exponential $({},{})$-entropy'.format(alpha, beta) for alpha in alphas]
self.num_functionals = len(self.functional_names)
def phi(self, u):
r"""
Parameters
----------
u: np.array of size (m, len(self.ks))
array of normalized volume $U_{km}$ for k in self.ks
Returns
-------
phis: np.array of size (self.num_functionals, m, len(self.ks))
stack of estimator function values for each functional
"""
assert (u > 0).all()
phis = np.stack([phi_shannon_entropy(u, self.ks)] +
[phi_alpha_entropy(u, self.ks, alpha) for alpha in self.alphas] +
[phi_logarithmic_alpha_entropy(u, self.ks, alpha) for alpha in self.alphas] +
[phi_exponential_alpha_beta_entropy(u, self.ks, alpha, self.beta) for alpha in self.alphas],
0) # (num_functionals, m, len(ks))
return phis
def estimate(self, x):
"""
Arguments
---------
x: np.array
data points (m, dim)
"""
# Compute normalized volumes
u = compute_normalized_volumes(x, ks=self.ks) # (m, len(ks))
# Compute estimates for each k in ks by taking the mean over samples
return self.phi(u).mean(1) # (num_functionals, len(ks))
def phi_shannon_entropy(u, ks):
return np.log(u) - digamma(ks)
def phi_alpha_entropy(u, ks, alpha):
return np.exp(np.log(gamma(ks)) - np.log(gamma(np.maximum(ks, np.ceil(alpha - 1 + 1e-5)) - alpha + 1))) * \
(u ** (1 - alpha))
def phi_logarithmic_alpha_entropy(u, ks, alpha):
return np.exp(np.log(gamma(ks)) - np.log(gamma(np.maximum(ks, np.ceil(alpha - 1 + 1e-5)) - alpha + 1))) * \
(u ** (1 - alpha)) * (np.log(u) - digamma(np.maximum(ks, np.ceil(alpha - 1 + 1e-5)) - alpha + 1))
def phi_exponential_alpha_beta_entropy(u, ks, alpha, beta):
return np.exp(np.log(gamma(ks)) - np.log(gamma(np.maximum(ks, np.ceil(alpha - 1 + 1e-5)) - alpha + 1))) * \
((u - beta) * (u >= beta).astype(float)) ** np.maximum(ks - alpha, 0) / (u ** (ks - 1))
| [
"numpy.maximum",
"numpy.log",
"numpy.ceil",
"warnings.filterwarnings",
"scipy.special.digamma",
"numpy.array",
"utility.compute_normalized_volumes",
"scipy.special.gamma"
] | [((101, 133), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (124, 133), False, 'import warnings\n'), ((485, 497), 'numpy.array', 'np.array', (['ks'], {}), '(ks)\n', (493, 497), True, 'import numpy as np\n'), ((1952, 1993), 'utility.compute_normalized_volumes', 'compute_normalized_volumes', (['x'], {'ks': 'self.ks'}), '(x, ks=self.ks)\n', (1978, 1993), False, 'from utility import compute_normalized_volumes\n'), ((2198, 2207), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (2204, 2207), True, 'import numpy as np\n'), ((2210, 2221), 'scipy.special.digamma', 'digamma', (['ks'], {}), '(ks)\n', (2217, 2221), False, 'from scipy.special import digamma\n'), ((2599, 2608), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (2605, 2608), True, 'import numpy as np\n'), ((2904, 2929), 'numpy.maximum', 'np.maximum', (['(ks - alpha)', '(0)'], {}), '(ks - alpha, 0)\n', (2914, 2929), True, 'import numpy as np\n'), ((2286, 2295), 'scipy.special.gamma', 'gamma', (['ks'], {}), '(ks)\n', (2291, 2295), False, 'from scipy.special import gamma\n'), ((2479, 2488), 'scipy.special.gamma', 'gamma', (['ks'], {}), '(ks)\n', (2484, 2488), False, 'from scipy.special import gamma\n'), ((2762, 2771), 'scipy.special.gamma', 'gamma', (['ks'], {}), '(ks)\n', (2767, 2771), False, 'from scipy.special import gamma\n'), ((2634, 2660), 'numpy.ceil', 'np.ceil', (['(alpha - 1 + 1e-05)'], {}), '(alpha - 1 + 1e-05)\n', (2641, 2660), True, 'import numpy as np\n'), ((2327, 2353), 'numpy.ceil', 'np.ceil', (['(alpha - 1 + 1e-05)'], {}), '(alpha - 1 + 1e-05)\n', (2334, 2353), True, 'import numpy as np\n'), ((2520, 2546), 'numpy.ceil', 'np.ceil', (['(alpha - 1 + 1e-05)'], {}), '(alpha - 1 + 1e-05)\n', (2527, 2546), True, 'import numpy as np\n'), ((2803, 2829), 'numpy.ceil', 'np.ceil', (['(alpha - 1 + 1e-05)'], {}), '(alpha - 1 + 1e-05)\n', (2810, 2829), True, 'import numpy as np\n')] |
# Triples class for (T) corrections, CC3, etc.
import numpy as np
from opt_einsum import contract
class cctriples(object):
def __init__(self, ccwfn):
self.ccwfn = ccwfn
# Vikings' formulation
def t_vikings(self):
o = self.ccwfn.o
v = self.ccwfn.v
no = self.ccwfn.no
F = self.ccwfn.H.F
ERI = self.ccwfn.H.ERI
L = self.ccwfn.H.L
t1 = self.ccwfn.t1
t2 = self.ccwfn.t2
X1 = np.zeros_like(self.ccwfn.t1)
X2 = np.zeros_like(self.ccwfn.t2)
for i in range(no):
for j in range(no):
for k in range(no):
t3 = self.t3c_ijk(o, v, i, j, k, t2, ERI, F)
X1[i] += contract('abc,bc->a',(t3 - t3.swapaxes(0,2)), L[j,k,v,v])
X2[i,j] += contract('abc,c->ab',(t3 - t3.swapaxes(0,2)), F[k,v])
X2[i,j] += contract('abc,dbc->ad', (2.0*t3 - t3.swapaxes(1,2) - t3.swapaxes(0,2)),ERI[v,k,v,v])
X2[i] -= contract('abc,lc->lab', (2.0*t3 - t3.swapaxes(1,2) - t3.swapaxes(0,2)),ERI[j,k,o,v])
ET = 2.0 * contract('ia,ia->', t1, X1)
ET += contract('ijab,ijab->', (4.0*t2 - 2.0*t2.swapaxes(2,3)), X2)
return ET
# Vikings' formulation – inverted algorithm
def t_vikings_inverted(self):
o = self.ccwfn.o
v = self.ccwfn.v
no = self.ccwfn.no
nv = self.ccwfn.nv
F = self.ccwfn.H.F
ERI = self.ccwfn.H.ERI
L = self.ccwfn.H.L
t1 = self.ccwfn.t1
t2 = self.ccwfn.t2
X1 = np.zeros_like(t1.T)
X2 = np.zeros_like(t2.T)
for a in range(nv):
for b in range(nv):
for c in range(nv):
t3 = self.t3c_abc(o, v, a, b, c, t2, ERI, F, True)
X1[a] += contract('ijk,jk->i',(t3 - t3.swapaxes(0,2)), L[o,o,b+no,c+no])
X2[a,b] += contract('ijk,k->ij',(t3 - t3.swapaxes(0,2)), F[o,c+no])
X2[a] += contract('ijk,dk->dij', (2.0*t3 - t3.swapaxes(1,2) - t3.swapaxes(0,2)),ERI[v,o,b+no,c+no])
X2[a,b] -= contract('ijk,jkl->il', (2.0*t3 - t3.swapaxes(1,2) - t3.swapaxes(0,2)),ERI[o,o,o,c+no])
ET = 2.0 * contract('ia,ia->', t1, X1.T)
ET += contract('ijab,ijab->', (4.0*t2 - 2.0*t2.swapaxes(2,3)), X2.T)
return ET
# Lee and Rendell's formulation
def t_tjl(self):
o = self.ccwfn.o
v = self.ccwfn.v
no = self.ccwfn.no
nv = self.ccwfn.nv
F = self.ccwfn.H.F
ERI = self.ccwfn.H.ERI
t1 = self.ccwfn.t1
t2 = self.ccwfn.t2
ET = 0.0
for i in range(no):
for j in range(i+1):
for k in range(j+1):
W3 = self.t3c_ijk(o, v, i, j, k, t2, ERI, F, False)
V3 = self.t3d_ijk(o, v, i, j, k, t1, t2, ERI, F, False) + W3
for a in range(nv):
for b in range(nv):
for c in range(nv):
V3[a,b,c] /= (1.0 + int(a == b) + int(a == c) + int(b == c))
X3 = W3 * V3 # abc
X3 += W3.swapaxes(1,2) * V3.swapaxes(1,2) # acb
X3 += W3.swapaxes(0,1) * V3.swapaxes(0,1) # bac
X3 += W3.swapaxes(0,1).swapaxes(1,2) * V3.swapaxes(0,1).swapaxes(1,2) # bca
X3 += W3.swapaxes(0,1).swapaxes(0,2) * V3.swapaxes(0,1).swapaxes(0,2) # cab
X3 += W3.swapaxes(0,2) * V3.swapaxes(0,2) # cba
Y3 = V3 + V3.swapaxes(0,1).swapaxes(1,2) + V3.swapaxes(0,1).swapaxes(0,2)
Z3 = V3.swapaxes(1,2) + V3.swapaxes(0,1) + V3.swapaxes(0,2)
Fv = np.diag(F)[v]
denom = np.zeros_like(W3)
denom -= Fv.reshape(-1,1,1) + Fv.reshape(-1,1) + Fv
denom += F[i,i] + F[j,j] + F[k,k]
for a in range(nv):
for b in range(a+1):
for c in range(b+1):
ET += (
(Y3[a,b,c] - 2.0 * Z3[a,b,c]) * (W3[a,b,c] + W3[b,c,a] + W3[c,a,b])
+ (Z3[a,b,c] - 2.0 * Y3[a,b,c]) * (W3[a,c,b] + W3[b,a,c] + W3[c,b,a])
+ 3.0 * X3[a,b,c]) * (2.0 - (int(i == j) + int(i == k) + int(j == k)))/denom[a,b,c]
return ET
# Various triples formulations; useful for (T) corrections and CC3
def t3c_ijk(self, o, v, i, j, k, t2, ERI, F, WithDenom=True):
t3 = contract('eab,ce->abc', ERI[i,v,v,v], t2[k,j])
t3 += contract('eac,be->abc', ERI[i,v,v,v], t2[j,k])
t3 += contract('eca,be->abc', ERI[k,v,v,v], t2[j,i])
t3 += contract('ecb,ae->abc', ERI[k,v,v,v], t2[i,j])
t3 += contract('ebc,ae->abc', ERI[j,v,v,v], t2[i,k])
t3 += contract('eba,ce->abc', ERI[j,v,v,v], t2[k,i])
t3 -= contract('mc,mab->abc', ERI[j,k,o,v], t2[i])
t3 -= contract('mb,mac->abc', ERI[k,j,o,v], t2[i])
t3 -= contract('mb,mca->abc', ERI[i,j,o,v], t2[k])
t3 -= contract('ma,mcb->abc', ERI[j,i,o,v], t2[k])
t3 -= contract('ma,mbc->abc', ERI[k,i,o,v], t2[j])
t3 -= contract('mc,mba->abc', ERI[i,k,o,v], t2[j])
if WithDenom is True:
Fv = np.diag(F)[v]
denom = np.zeros_like(t3)
denom -= Fv.reshape(-1,1,1) + Fv.reshape(-1,1) + Fv
denom += F[i,i] + F[j,j] + F[k,k]
return t3/denom
else:
return t3
def t3c_abc(self, o, v, a, b, c, t2, ERI, F, WithDenom=True):
no = o.stop
t3 = contract('ie,kje->ijk', ERI[o,v,a+no,b+no], t2[o,o,c])
t3 += contract('ie,jke->ijk', ERI[o,v,a+no,c+no], t2[o,o,b])
t3 += contract('ke,jie->ijk', ERI[o,v,c+no,a+no], t2[o,o,b])
t3 += contract('ke,ije->ijk', ERI[o,v,c+no,b+no], t2[o,o,a])
t3 += contract('je,ike->ijk', ERI[o,v,b+no,c+no], t2[o,o,a])
t3 += contract('je,kie->ijk', ERI[o,v,b+no,a+no], t2[o,o,c])
t3 -= contract('jkm,im->ijk', ERI[o,o,o,c+no], t2[o,o,a,b])
t3 -= contract('kjm,im->ijk', ERI[o,o,o,b+no], t2[o,o,a,c])
t3 -= contract('ijm,km->ijk', ERI[o,o,o,b+no], t2[o,o,c,a])
t3 -= contract('jim,km->ijk', ERI[o,o,o,a+no], t2[o,o,c,b])
t3 -= contract('kim,jm->ijk', ERI[o,o,o,a+no], t2[o,o,b,c])
t3 -= contract('ikm,jm->ijk', ERI[o,o,o,c+no], t2[o,o,b,a])
if WithDenom is True:
Fo = np.diag(F)[o]
denom = np.zeros_like(t3)
denom += Fo.reshape(-1,1,1) + Fo.reshape(-1,1) + Fo
denom -= F[a+no,a+no] + F[b+no,b+no] + F[c+no,c+no]
return t3/denom
else:
return t3
def t3d_ijk(self, o, v, i, j, k, t1, t2, ERI, F, WithDenom=True):
t3 = contract('ab,c->abc', ERI[i,j,v,v], t1[k])
t3 += contract('ac,b->abc', ERI[i,k,v,v], t1[j])
t3 += contract('bc,a->abc', ERI[j,k,v,v], t1[i])
t3 += contract('ab,c->abc', t2[i,j], F[k,v])
t3 += contract('ac,b->abc', t2[i,k], F[j,v])
t3 += contract('bc,a->abc', t2[j,k], F[i,v])
if WithDenom is True:
Fv = np.diag(F)[v]
denom = np.zeros_like(t3)
denom -= Fv.reshape(-1,1,1) + Fv.reshape(-1,1) + Fv
denom += F[i,i] + F[j,j] + F[k,k]
return t3/denom
else:
return t3
| [
"opt_einsum.contract",
"numpy.zeros_like",
"numpy.diag"
] | [((467, 495), 'numpy.zeros_like', 'np.zeros_like', (['self.ccwfn.t1'], {}), '(self.ccwfn.t1)\n', (480, 495), True, 'import numpy as np\n'), ((509, 537), 'numpy.zeros_like', 'np.zeros_like', (['self.ccwfn.t2'], {}), '(self.ccwfn.t2)\n', (522, 537), True, 'import numpy as np\n'), ((1585, 1604), 'numpy.zeros_like', 'np.zeros_like', (['t1.T'], {}), '(t1.T)\n', (1598, 1604), True, 'import numpy as np\n'), ((1618, 1637), 'numpy.zeros_like', 'np.zeros_like', (['t2.T'], {}), '(t2.T)\n', (1631, 1637), True, 'import numpy as np\n'), ((4648, 4698), 'opt_einsum.contract', 'contract', (['"""eab,ce->abc"""', 'ERI[i, v, v, v]', 't2[k, j]'], {}), "('eab,ce->abc', ERI[i, v, v, v], t2[k, j])\n", (4656, 4698), False, 'from opt_einsum import contract\n'), ((4709, 4759), 'opt_einsum.contract', 'contract', (['"""eac,be->abc"""', 'ERI[i, v, v, v]', 't2[j, k]'], {}), "('eac,be->abc', ERI[i, v, v, v], t2[j, k])\n", (4717, 4759), False, 'from opt_einsum import contract\n'), ((4770, 4820), 'opt_einsum.contract', 'contract', (['"""eca,be->abc"""', 'ERI[k, v, v, v]', 't2[j, i]'], {}), "('eca,be->abc', ERI[k, v, v, v], t2[j, i])\n", (4778, 4820), False, 'from opt_einsum import contract\n'), ((4831, 4881), 'opt_einsum.contract', 'contract', (['"""ecb,ae->abc"""', 'ERI[k, v, v, v]', 't2[i, j]'], {}), "('ecb,ae->abc', ERI[k, v, v, v], t2[i, j])\n", (4839, 4881), False, 'from opt_einsum import contract\n'), ((4892, 4942), 'opt_einsum.contract', 'contract', (['"""ebc,ae->abc"""', 'ERI[j, v, v, v]', 't2[i, k]'], {}), "('ebc,ae->abc', ERI[j, v, v, v], t2[i, k])\n", (4900, 4942), False, 'from opt_einsum import contract\n'), ((4953, 5003), 'opt_einsum.contract', 'contract', (['"""eba,ce->abc"""', 'ERI[j, v, v, v]', 't2[k, i]'], {}), "('eba,ce->abc', ERI[j, v, v, v], t2[k, i])\n", (4961, 5003), False, 'from opt_einsum import contract\n'), ((5015, 5062), 'opt_einsum.contract', 'contract', (['"""mc,mab->abc"""', 'ERI[j, k, o, v]', 't2[i]'], {}), "('mc,mab->abc', ERI[j, k, o, v], t2[i])\n", (5023, 5062), False, 'from opt_einsum import contract\n'), ((5074, 5121), 'opt_einsum.contract', 'contract', (['"""mb,mac->abc"""', 'ERI[k, j, o, v]', 't2[i]'], {}), "('mb,mac->abc', ERI[k, j, o, v], t2[i])\n", (5082, 5121), False, 'from opt_einsum import contract\n'), ((5133, 5180), 'opt_einsum.contract', 'contract', (['"""mb,mca->abc"""', 'ERI[i, j, o, v]', 't2[k]'], {}), "('mb,mca->abc', ERI[i, j, o, v], t2[k])\n", (5141, 5180), False, 'from opt_einsum import contract\n'), ((5192, 5239), 'opt_einsum.contract', 'contract', (['"""ma,mcb->abc"""', 'ERI[j, i, o, v]', 't2[k]'], {}), "('ma,mcb->abc', ERI[j, i, o, v], t2[k])\n", (5200, 5239), False, 'from opt_einsum import contract\n'), ((5251, 5298), 'opt_einsum.contract', 'contract', (['"""ma,mbc->abc"""', 'ERI[k, i, o, v]', 't2[j]'], {}), "('ma,mbc->abc', ERI[k, i, o, v], t2[j])\n", (5259, 5298), False, 'from opt_einsum import contract\n'), ((5310, 5357), 'opt_einsum.contract', 'contract', (['"""mc,mba->abc"""', 'ERI[i, k, o, v]', 't2[j]'], {}), "('mc,mba->abc', ERI[i, k, o, v], t2[j])\n", (5318, 5357), False, 'from opt_einsum import contract\n'), ((5731, 5794), 'opt_einsum.contract', 'contract', (['"""ie,kje->ijk"""', 'ERI[o, v, a + no, b + no]', 't2[o, o, c]'], {}), "('ie,kje->ijk', ERI[o, v, a + no, b + no], t2[o, o, c])\n", (5739, 5794), False, 'from opt_einsum import contract\n'), ((5800, 5863), 'opt_einsum.contract', 'contract', (['"""ie,jke->ijk"""', 'ERI[o, v, a + no, c + no]', 't2[o, o, b]'], {}), "('ie,jke->ijk', ERI[o, v, a + no, c + no], t2[o, o, b])\n", (5808, 5863), False, 'from opt_einsum import contract\n'), ((5869, 5932), 'opt_einsum.contract', 'contract', (['"""ke,jie->ijk"""', 'ERI[o, v, c + no, a + no]', 't2[o, o, b]'], {}), "('ke,jie->ijk', ERI[o, v, c + no, a + no], t2[o, o, b])\n", (5877, 5932), False, 'from opt_einsum import contract\n'), ((5938, 6001), 'opt_einsum.contract', 'contract', (['"""ke,ije->ijk"""', 'ERI[o, v, c + no, b + no]', 't2[o, o, a]'], {}), "('ke,ije->ijk', ERI[o, v, c + no, b + no], t2[o, o, a])\n", (5946, 6001), False, 'from opt_einsum import contract\n'), ((6007, 6070), 'opt_einsum.contract', 'contract', (['"""je,ike->ijk"""', 'ERI[o, v, b + no, c + no]', 't2[o, o, a]'], {}), "('je,ike->ijk', ERI[o, v, b + no, c + no], t2[o, o, a])\n", (6015, 6070), False, 'from opt_einsum import contract\n'), ((6076, 6139), 'opt_einsum.contract', 'contract', (['"""je,kie->ijk"""', 'ERI[o, v, b + no, a + no]', 't2[o, o, c]'], {}), "('je,kie->ijk', ERI[o, v, b + no, a + no], t2[o, o, c])\n", (6084, 6139), False, 'from opt_einsum import contract\n'), ((6146, 6207), 'opt_einsum.contract', 'contract', (['"""jkm,im->ijk"""', 'ERI[o, o, o, c + no]', 't2[o, o, a, b]'], {}), "('jkm,im->ijk', ERI[o, o, o, c + no], t2[o, o, a, b])\n", (6154, 6207), False, 'from opt_einsum import contract\n'), ((6214, 6275), 'opt_einsum.contract', 'contract', (['"""kjm,im->ijk"""', 'ERI[o, o, o, b + no]', 't2[o, o, a, c]'], {}), "('kjm,im->ijk', ERI[o, o, o, b + no], t2[o, o, a, c])\n", (6222, 6275), False, 'from opt_einsum import contract\n'), ((6282, 6343), 'opt_einsum.contract', 'contract', (['"""ijm,km->ijk"""', 'ERI[o, o, o, b + no]', 't2[o, o, c, a]'], {}), "('ijm,km->ijk', ERI[o, o, o, b + no], t2[o, o, c, a])\n", (6290, 6343), False, 'from opt_einsum import contract\n'), ((6350, 6411), 'opt_einsum.contract', 'contract', (['"""jim,km->ijk"""', 'ERI[o, o, o, a + no]', 't2[o, o, c, b]'], {}), "('jim,km->ijk', ERI[o, o, o, a + no], t2[o, o, c, b])\n", (6358, 6411), False, 'from opt_einsum import contract\n'), ((6418, 6479), 'opt_einsum.contract', 'contract', (['"""kim,jm->ijk"""', 'ERI[o, o, o, a + no]', 't2[o, o, b, c]'], {}), "('kim,jm->ijk', ERI[o, o, o, a + no], t2[o, o, b, c])\n", (6426, 6479), False, 'from opt_einsum import contract\n'), ((6486, 6547), 'opt_einsum.contract', 'contract', (['"""ikm,jm->ijk"""', 'ERI[o, o, o, c + no]', 't2[o, o, b, a]'], {}), "('ikm,jm->ijk', ERI[o, o, o, c + no], t2[o, o, b, a])\n", (6494, 6547), False, 'from opt_einsum import contract\n'), ((6917, 6962), 'opt_einsum.contract', 'contract', (['"""ab,c->abc"""', 'ERI[i, j, v, v]', 't1[k]'], {}), "('ab,c->abc', ERI[i, j, v, v], t1[k])\n", (6925, 6962), False, 'from opt_einsum import contract\n'), ((6974, 7019), 'opt_einsum.contract', 'contract', (['"""ac,b->abc"""', 'ERI[i, k, v, v]', 't1[j]'], {}), "('ac,b->abc', ERI[i, k, v, v], t1[j])\n", (6982, 7019), False, 'from opt_einsum import contract\n'), ((7031, 7076), 'opt_einsum.contract', 'contract', (['"""bc,a->abc"""', 'ERI[j, k, v, v]', 't1[i]'], {}), "('bc,a->abc', ERI[j, k, v, v], t1[i])\n", (7039, 7076), False, 'from opt_einsum import contract\n'), ((7088, 7128), 'opt_einsum.contract', 'contract', (['"""ab,c->abc"""', 't2[i, j]', 'F[k, v]'], {}), "('ab,c->abc', t2[i, j], F[k, v])\n", (7096, 7128), False, 'from opt_einsum import contract\n'), ((7141, 7181), 'opt_einsum.contract', 'contract', (['"""ac,b->abc"""', 't2[i, k]', 'F[j, v]'], {}), "('ac,b->abc', t2[i, k], F[j, v])\n", (7149, 7181), False, 'from opt_einsum import contract\n'), ((7194, 7234), 'opt_einsum.contract', 'contract', (['"""bc,a->abc"""', 't2[j, k]', 'F[i, v]'], {}), "('bc,a->abc', t2[j, k], F[i, v])\n", (7202, 7234), False, 'from opt_einsum import contract\n'), ((1123, 1150), 'opt_einsum.contract', 'contract', (['"""ia,ia->"""', 't1', 'X1'], {}), "('ia,ia->', t1, X1)\n", (1131, 1150), False, 'from opt_einsum import contract\n'), ((2247, 2276), 'opt_einsum.contract', 'contract', (['"""ia,ia->"""', 't1', 'X1.T'], {}), "('ia,ia->', t1, X1.T)\n", (2255, 2276), False, 'from opt_einsum import contract\n'), ((5437, 5454), 'numpy.zeros_like', 'np.zeros_like', (['t3'], {}), '(t3)\n', (5450, 5454), True, 'import numpy as np\n'), ((6622, 6639), 'numpy.zeros_like', 'np.zeros_like', (['t3'], {}), '(t3)\n', (6635, 6639), True, 'import numpy as np\n'), ((7315, 7332), 'numpy.zeros_like', 'np.zeros_like', (['t3'], {}), '(t3)\n', (7328, 7332), True, 'import numpy as np\n'), ((5403, 5413), 'numpy.diag', 'np.diag', (['F'], {}), '(F)\n', (5410, 5413), True, 'import numpy as np\n'), ((6588, 6598), 'numpy.diag', 'np.diag', (['F'], {}), '(F)\n', (6595, 6598), True, 'import numpy as np\n'), ((7281, 7291), 'numpy.diag', 'np.diag', (['F'], {}), '(F)\n', (7288, 7291), True, 'import numpy as np\n'), ((3827, 3844), 'numpy.zeros_like', 'np.zeros_like', (['W3'], {}), '(W3)\n', (3840, 3844), True, 'import numpy as np\n'), ((3785, 3795), 'numpy.diag', 'np.diag', (['F'], {}), '(F)\n', (3792, 3795), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import glob
import logging
import os
import time
import numpy
from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add
from keras.models import Model
from keras.optimizers import Adam
from keras.regularizers import l2
from alphazero.nnet import NNet
from gomoku.env import ChessType
numpy.random.seed(1337) # for reproducibility
class GomokuNNet(NNet):
def __init__(self, env, args):
self.env = env
self.args = args
self.model = self.build()
def build(self):
def build_residual_block(input):
block = Conv2D(self.args.conv_filters, self.args.conv_kernel, padding="same", data_format='channels_first',
kernel_regularizer=l2(self.args.l2))(input)
block = BatchNormalization(axis=1)(block)
block = Activation('relu')(block)
block = Conv2D(self.args.conv_filters, self.args.conv_kernel, padding="same", data_format='channels_first',
kernel_regularizer=l2(self.args.l2))(block)
block = BatchNormalization(axis=1)(block)
block = Add()([input, block])
block = Activation('relu')(block)
return block
input = Input(shape=(self.args.history_num * 2 + 1, self.args.rows, self.args.columns))
residual = Conv2D(self.args.conv_filters, self.args.conv_kernel, padding="same", data_format='channels_first',
kernel_regularizer=l2(self.args.l2))(input)
residual = BatchNormalization(axis=1)(residual)
residual = Activation('relu')(residual)
for _ in range(self.args.residual_block_num):
residual = build_residual_block(residual)
policy = Conv2D(2, (1, 1), padding="same", data_format='channels_first', kernel_regularizer=l2(self.args.l2))(
residual)
policy = BatchNormalization(axis=1)(policy)
policy = Activation('relu')(policy)
policy = Flatten()(policy)
policy = Dense(self.args.columns * self.args.rows, activation="softmax")(policy)
value = Conv2D(1, (1, 1), padding="same", data_format='channels_first', kernel_regularizer=l2(self.args.l2))(
residual)
value = BatchNormalization(axis=1)(value)
value = Activation('relu')(value)
value = Dense(256)(value)
value = Activation('relu')(value)
value = Flatten()(value)
value = Dense(1, activation='tanh')(value)
model = Model(inputs=input, outputs=[policy, value])
model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(lr=self.args.lr))
# model.summary()
return model
def train(self, data):
boards, players, policies, values = zip(*data)
states = numpy.zeros((len(players), self.args.history_num * 2 + 1, self.args.rows, self.args.columns))
for i in range(len(players)):
states[i] = self.fit_transform(boards[i], players[i])
policies = numpy.array(policies)
values = numpy.array(values)
self.model.fit(x=states, y=[policies, values], batch_size=self.args.batch_size, epochs=self.args.epochs)
def predict(self, data):
board, player = data
states = numpy.zeros((1, self.args.history_num * 2 + 1, self.args.rows, self.args.columns))
states[0] = self.fit_transform(board, player)
policy, value = self.model.predict(states)
return policy[0], value[0]
def save_weights(self, filename):
self.model.save_weights("%s.%d" % (filename, time.time()))
def load_weights(self, filename):
files = glob.glob(filename + '*')
if len(files) < 1:
return
latest_file = max(files, key=os.path.getctime)
self.model.load_weights(latest_file)
logging.info("load weights from %s", latest_file)
def fit_transform(self, board, player):
def transform(board, player):
f = numpy.zeros((self.args.history_num, self.args.rows, self.args.columns))
actions = [self.env.dec_action(stone) for stone in board.split(self.env.semicolon) if
stone and stone[0] == player]
for i in range(self.args.history_num):
for (x, y) in actions[:len(actions) - i]:
f[self.args.history_num - i - 1][x][y] = 1
return f
feature = numpy.zeros((self.args.history_num * 2 + 1, self.args.rows, self.args.columns))
if player == ChessType.BLACK:
feature[-1] = numpy.ones((self.args.rows, self.args.columns))
new_board = self.player_insensitive_board(board, player)
feature[:self.args.history_num] = transform(new_board, ChessType.BLACK)
feature[self.args.history_num:self.args.history_num * 2] = transform(new_board, ChessType.WHITE)
return feature
def player_insensitive_board(self, board, player):
assert player != ChessType.EMPTY
if player == ChessType.BLACK:
return board
return "".join([c if c != ChessType.BLACK and c != ChessType.WHITE else self.env.next_player(c) for c in board])
| [
"keras.regularizers.l2",
"numpy.random.seed",
"keras.layers.Activation",
"numpy.zeros",
"keras.layers.Flatten",
"keras.models.Model",
"numpy.ones",
"keras.layers.Add",
"logging.info",
"keras.optimizers.Adam",
"keras.layers.Dense",
"numpy.array",
"time.time",
"glob.glob",
"keras.layers.In... | [((366, 389), 'numpy.random.seed', 'numpy.random.seed', (['(1337)'], {}), '(1337)\n', (383, 389), False, 'import numpy\n'), ((1287, 1366), 'keras.layers.Input', 'Input', ([], {'shape': '(self.args.history_num * 2 + 1, self.args.rows, self.args.columns)'}), '(shape=(self.args.history_num * 2 + 1, self.args.rows, self.args.columns))\n', (1292, 1366), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((2541, 2585), 'keras.models.Model', 'Model', ([], {'inputs': 'input', 'outputs': '[policy, value]'}), '(inputs=input, outputs=[policy, value])\n', (2546, 2585), False, 'from keras.models import Model\n'), ((3062, 3083), 'numpy.array', 'numpy.array', (['policies'], {}), '(policies)\n', (3073, 3083), False, 'import numpy\n'), ((3101, 3120), 'numpy.array', 'numpy.array', (['values'], {}), '(values)\n', (3112, 3120), False, 'import numpy\n'), ((3310, 3397), 'numpy.zeros', 'numpy.zeros', (['(1, self.args.history_num * 2 + 1, self.args.rows, self.args.columns)'], {}), '((1, self.args.history_num * 2 + 1, self.args.rows, self.args.\n columns))\n', (3321, 3397), False, 'import numpy\n'), ((3694, 3719), 'glob.glob', 'glob.glob', (["(filename + '*')"], {}), "(filename + '*')\n", (3703, 3719), False, 'import glob\n'), ((3874, 3923), 'logging.info', 'logging.info', (['"""load weights from %s"""', 'latest_file'], {}), "('load weights from %s', latest_file)\n", (3886, 3923), False, 'import logging\n'), ((4458, 4537), 'numpy.zeros', 'numpy.zeros', (['(self.args.history_num * 2 + 1, self.args.rows, self.args.columns)'], {}), '((self.args.history_num * 2 + 1, self.args.rows, self.args.columns))\n', (4469, 4537), False, 'import numpy\n'), ((1576, 1602), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (1594, 1602), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((1632, 1650), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1642, 1650), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((1928, 1954), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (1946, 1954), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((1980, 1998), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1990, 1998), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((2024, 2033), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2031, 2033), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((2059, 2122), 'keras.layers.Dense', 'Dense', (['(self.args.columns * self.args.rows)'], {'activation': '"""softmax"""'}), "(self.args.columns * self.args.rows, activation='softmax')\n", (2064, 2122), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((2288, 2314), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (2306, 2314), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((2338, 2356), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2348, 2356), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((2380, 2390), 'keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (2385, 2390), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((2414, 2432), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2424, 2432), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((2456, 2465), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2463, 2465), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((2489, 2516), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""tanh"""'}), "(1, activation='tanh')\n", (2494, 2516), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((4023, 4094), 'numpy.zeros', 'numpy.zeros', (['(self.args.history_num, self.args.rows, self.args.columns)'], {}), '((self.args.history_num, self.args.rows, self.args.columns))\n', (4034, 4094), False, 'import numpy\n'), ((4602, 4649), 'numpy.ones', 'numpy.ones', (['(self.args.rows, self.args.columns)'], {}), '((self.args.rows, self.args.columns))\n', (4612, 4649), False, 'import numpy\n'), ((832, 858), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (850, 858), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((886, 904), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (896, 904), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((1123, 1149), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (1141, 1149), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((1177, 1182), 'keras.layers.Add', 'Add', ([], {}), '()\n', (1180, 1182), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((1219, 1237), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1229, 1237), False, 'from keras.layers import Conv2D, BatchNormalization, Input, Activation, Flatten, Dense, Add\n'), ((2675, 2696), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.args.lr'}), '(lr=self.args.lr)\n', (2679, 2696), False, 'from keras.optimizers import Adam\n'), ((1532, 1548), 'keras.regularizers.l2', 'l2', (['self.args.l2'], {}), '(self.args.l2)\n', (1534, 1548), False, 'from keras.regularizers import l2\n'), ((1870, 1886), 'keras.regularizers.l2', 'l2', (['self.args.l2'], {}), '(self.args.l2)\n', (1872, 1886), False, 'from keras.regularizers import l2\n'), ((2231, 2247), 'keras.regularizers.l2', 'l2', (['self.args.l2'], {}), '(self.args.l2)\n', (2233, 2247), False, 'from keras.regularizers import l2\n'), ((3625, 3636), 'time.time', 'time.time', ([], {}), '()\n', (3634, 3636), False, 'import time\n'), ((787, 803), 'keras.regularizers.l2', 'l2', (['self.args.l2'], {}), '(self.args.l2)\n', (789, 803), False, 'from keras.regularizers import l2\n'), ((1078, 1094), 'keras.regularizers.l2', 'l2', (['self.args.l2'], {}), '(self.args.l2)\n', (1080, 1094), False, 'from keras.regularizers import l2\n')] |
from types import SimpleNamespace as NS
from copy import deepcopy, copy
import numpy as np
class coord:
"""
Base class for all coordinate systems
"""
# If the coordinate system is linear
is_linear = False
def __radd__(self, gg, inplace=False):
gg = gg if inplace else deepcopy(gg)
gg.coordinates = copy(self)
return gg
def setup_data(self, data):
"""
Allow the coordinate system to manipulate the layer data
Parameters
----------
data : list of dataframes
Data for each layer
Returns
-------
out : list of dataframes
Data for each layer
"""
return data
def setup_params(self, data):
"""
Create additional parameters
A coordinate system may need to create parameters
depending on the *original* data that the layers get.
Parameters
----------
data : list of dataframes
Data for each layer before it is manipulated in
any way.
"""
self.params = {}
def setup_layout(self, layout):
"""
Allow the coordinate system alter the layout dataframe
Parameters
----------
layout : dataframe
Dataframe in which data is assigned to panels and scales
Returns
-------
out : dataframe
layout dataframe altered to according to the requirements
of the coordinate system.
Notes
-----
The input dataframe may be changed.
"""
return layout
def aspect(self, panel_params):
"""
Return desired aspect ratio for the plot
If not overridden by the subclass, this method
returns ``None``, which means that the coordinate
system does not influence the aspect ratio.
"""
return None
def labels(self, label_lookup):
"""
Modify labels
Parameters
----------
label_lookup : dict_like
Dictionary is in which to lookup the current label
values. The keys are the axes e.g. 'x', 'y' and
the values are strings.
Returns
-------
out : dict
Modified labels. The dictionary is of the same form
as ``label_lookup``.
"""
return label_lookup
def transform(self, data, panel_params, munch=False):
"""
Transform data before it is plotted
This is used to "transform the coordinate axes".
Subclasses should override this method
"""
return data
def setup_panel_params(self, scale_x, scale_y):
"""
Compute the range and break information for the panel
"""
return dict()
def range(self, panel_params):
"""
Return the range along the dimensions of the coordinate system
"""
# Defaults to providing the 2D x-y ranges
return NS(x=panel_params.x.range,
y=panel_params.y.range)
def backtransform_range(self, panel_params):
"""
Get the panel range provided in panel_params and backtransforms it
to data coordinates
Coordinate systems that do any transformations should override
this method. e.g. coord_trans has to override this method.
"""
return self.range(panel_params)
def distance(self, x, y, panel_params):
msg = "The coordinate should implement this method."
raise NotImplementedError(msg)
def munch(self, data, panel_params):
ranges = self.backtransform_range(panel_params)
data.loc[data['x'] == -np.inf, 'x'] = ranges.x[0]
data.loc[data['x'] == np.inf, 'x'] = ranges.x[1]
data.loc[data['y'] == -np.inf, 'y'] = ranges.y[0]
data.loc[data['y'] == np.inf, 'y'] = ranges.y[1]
dist = self.distance(data['x'], data['y'], panel_params)
bool_idx = data['group'].iloc[1:].values != \
data['group'].iloc[:-1].values
dist[bool_idx] = np.nan
# Munch
munched = munch_data(data, dist)
return munched
def dist_euclidean(x, y):
x = np.asarray(x)
y = np.asarray(y)
return np.sqrt((x[:-1] - x[1:])**2 +
(y[:-1] - y[1:])**2)
def interp(start, end, n):
return np.linspace(start, end, n, endpoint=False)
def munch_data(data, dist):
x, y = data['x'], data['y']
segment_length = 0.01
# How many endpoints for each old segment,
# not counting the last one
dist[np.isnan(dist)] = 1
extra = np.maximum(np.floor(dist/segment_length), 1)
extra = extra.astype(int, copy=False)
# Generate extra pieces for x and y values
# The final point must be manually inserted at the end
x = [interp(start, end, n)
for start, end, n in zip(x[:-1], x[1:], extra)]
y = [interp(start, end, n)
for start, end, n in zip(y[:-1], y[1:], extra)]
x.append(data['x'].iloc[-1])
y.append(data['y'].iloc[-1])
x = np.hstack(x)
y = np.hstack(y)
# Replicate other aesthetics: defined by start point
# but also must include final point
idx = np.hstack([
np.repeat(data.index[:-1], extra),
data.index[-1]])
munched = data.loc[idx, data.columns.difference(['x', 'y'])]
munched['x'] = x
munched['y'] = y
munched.reset_index(drop=True, inplace=True)
return munched
| [
"copy.deepcopy",
"numpy.asarray",
"numpy.floor",
"copy.copy",
"numpy.isnan",
"numpy.hstack",
"numpy.repeat",
"numpy.linspace",
"types.SimpleNamespace",
"numpy.sqrt"
] | [((4238, 4251), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4248, 4251), True, 'import numpy as np\n'), ((4260, 4273), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (4270, 4273), True, 'import numpy as np\n'), ((4285, 4339), 'numpy.sqrt', 'np.sqrt', (['((x[:-1] - x[1:]) ** 2 + (y[:-1] - y[1:]) ** 2)'], {}), '((x[:-1] - x[1:]) ** 2 + (y[:-1] - y[1:]) ** 2)\n', (4292, 4339), True, 'import numpy as np\n'), ((4395, 4437), 'numpy.linspace', 'np.linspace', (['start', 'end', 'n'], {'endpoint': '(False)'}), '(start, end, n, endpoint=False)\n', (4406, 4437), True, 'import numpy as np\n'), ((5091, 5103), 'numpy.hstack', 'np.hstack', (['x'], {}), '(x)\n', (5100, 5103), True, 'import numpy as np\n'), ((5112, 5124), 'numpy.hstack', 'np.hstack', (['y'], {}), '(y)\n', (5121, 5124), True, 'import numpy as np\n'), ((342, 352), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (346, 352), False, 'from copy import deepcopy, copy\n'), ((3027, 3077), 'types.SimpleNamespace', 'NS', ([], {'x': 'panel_params.x.range', 'y': 'panel_params.y.range'}), '(x=panel_params.x.range, y=panel_params.y.range)\n', (3029, 3077), True, 'from types import SimpleNamespace as NS\n'), ((4615, 4629), 'numpy.isnan', 'np.isnan', (['dist'], {}), '(dist)\n', (4623, 4629), True, 'import numpy as np\n'), ((4658, 4689), 'numpy.floor', 'np.floor', (['(dist / segment_length)'], {}), '(dist / segment_length)\n', (4666, 4689), True, 'import numpy as np\n'), ((304, 316), 'copy.deepcopy', 'deepcopy', (['gg'], {}), '(gg)\n', (312, 316), False, 'from copy import deepcopy, copy\n'), ((5253, 5286), 'numpy.repeat', 'np.repeat', (['data.index[:-1]', 'extra'], {}), '(data.index[:-1], extra)\n', (5262, 5286), True, 'import numpy as np\n')] |
import numpy as np
from collections import namedtuple
def cosine_similarity(u, v):
return np.dot(np.squeeze(u),np.squeeze(v)) / (np.linalg.norm(u) * np.linalg.norm(v))
Batch = namedtuple("Batch", ["obs", "a", "returns", "s_diff", "ri", "gsum", "features"])
class FeudalBatch(object):
def __init__(self):
self.obs = []
self.a = []
self.returns = []
self.s_diff = []
self.ri = []
self.gsum = []
self.features = None
def add(self, obs, a, returns, s_diff, ri, gsum, features):
self.obs += [obs]
self.a += [a]
self.returns += [returns]
self.s_diff += [s_diff]
self.ri += [ri]
self.gsum += [gsum]
if not self.features:
self.features = features
def get_batch(self):
batch_obs = np.asarray(self.obs)
batch_a = np.asarray(self.a)
batch_r = np.asarray(self.returns)
batch_sd = np.squeeze(np.asarray(self.s_diff))
batch_ri = np.asarray(self.ri)
batch_gs = np.asarray(self.gsum)
return Batch(batch_obs,batch_a,batch_r,batch_sd,batch_ri,batch_gs,self.features)
class FeudalBatchProcessor(object):
"""
This class adapts the batch of PolicyOptimizer to a batch useable by
the FeudalPolicy.
"""
def __init__(self, c):
self.c = c
self.last_terminal = True
def _extend(self, batch):
if self.last_terminal:
self.last_terminal = False
self.s = [batch.s[0] for _ in range(self.c)]
self.g = [batch.g[0] for _ in range(self.c)]
# prepend with dummy values so indexing is the same
self.obs = [None for _ in range(self.c)]
self.a = [None for _ in range(self.c)]
self.returns = [None for _ in range(self.c)]
self.features = [None for _ in range(self.c)]
# extend with the actual values
self.obs.extend(batch.obs)
self.a.extend(batch.a)
self.returns.extend(batch.returns)
self.s.extend(batch.s)
self.g.extend(batch.g)
self.features.extend(batch.features)
# if this is a terminal batch, then append the final s and g c times
# note that both this and the above case can occur at the same time
if batch.terminal:
self.s.extend([batch.s[-1] for _ in range(self.c)])
self.g.extend([batch.g[-1] for _ in range(self.c)])
def process_batch(self, batch):
"""
Converts a normal batch into one used by the FeudalPolicy update.
FeudalPolicy requires a batch of the form:
c previous timesteps - batch size timesteps - c future timesteps
This class handles the tracking the leading and following timesteps over
time. Additionally, it also computes values across timesteps from the
batch to provide to FeudalPolicy.
"""
# extend with current batch
self._extend(batch)
# unpack and compute bounds
length = len(self.obs)
c = self.c
# normally we cannot compute samples for the last c elements, but
# in the terminal case, we halluciante values where necessary
end = length if batch.terminal else length - c
# collect samples to return in a FeudalBatch
feudal_batch = FeudalBatch()
for t in range(c, end):
# state difference
s_diff = self.s[t + c] - self.s[t]
# intrinsic reward
ri = 0
# note that this for loop considers s and g values
# 1 timestep to c timesteps (inclusively) ago
for i in range(1, c + 1):
ri_s_diff = self.s[t] - self.s[t - i]
if np.linalg.norm(ri_s_diff) != 0:
ri += cosine_similarity(ri_s_diff, self.g[t - i])
ri /= c
# sum of g values used to derive w, input to the linear transform
gsum = np.zeros_like(self.g[t - c])
for i in range(t - c, t + 1):
gsum += self.g[i]
# add to the batch
feudal_batch.add(self.obs[t], self.a[t], self.returns[t], s_diff,
ri, gsum, self.features[t])
# in the terminal case, set reset flag
if batch.terminal:
self.last_terminal = True
# in the general case, forget all but the last 2 * c elements
# reason being that the first c of those we have already computed
# a batch for, and the second c need those first c
else:
twoc = 2 * self.c
self.obs = self.obs[-twoc:]
self.a = self.a[-twoc:]
self.returns = self.returns[-twoc:]
self.s = self.s[-twoc:]
self.g = self.g[-twoc:]
self.features = self.features[-twoc:]
return feudal_batch.get_batch()
| [
"numpy.zeros_like",
"numpy.asarray",
"numpy.linalg.norm",
"collections.namedtuple",
"numpy.squeeze"
] | [((183, 268), 'collections.namedtuple', 'namedtuple', (['"""Batch"""', "['obs', 'a', 'returns', 's_diff', 'ri', 'gsum', 'features']"], {}), "('Batch', ['obs', 'a', 'returns', 's_diff', 'ri', 'gsum', 'features']\n )\n", (193, 268), False, 'from collections import namedtuple\n'), ((826, 846), 'numpy.asarray', 'np.asarray', (['self.obs'], {}), '(self.obs)\n', (836, 846), True, 'import numpy as np\n'), ((865, 883), 'numpy.asarray', 'np.asarray', (['self.a'], {}), '(self.a)\n', (875, 883), True, 'import numpy as np\n'), ((902, 926), 'numpy.asarray', 'np.asarray', (['self.returns'], {}), '(self.returns)\n', (912, 926), True, 'import numpy as np\n'), ((1001, 1020), 'numpy.asarray', 'np.asarray', (['self.ri'], {}), '(self.ri)\n', (1011, 1020), True, 'import numpy as np\n'), ((1040, 1061), 'numpy.asarray', 'np.asarray', (['self.gsum'], {}), '(self.gsum)\n', (1050, 1061), True, 'import numpy as np\n'), ((103, 116), 'numpy.squeeze', 'np.squeeze', (['u'], {}), '(u)\n', (113, 116), True, 'import numpy as np\n'), ((117, 130), 'numpy.squeeze', 'np.squeeze', (['v'], {}), '(v)\n', (127, 130), True, 'import numpy as np\n'), ((135, 152), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (149, 152), True, 'import numpy as np\n'), ((155, 172), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (169, 172), True, 'import numpy as np\n'), ((957, 980), 'numpy.asarray', 'np.asarray', (['self.s_diff'], {}), '(self.s_diff)\n', (967, 980), True, 'import numpy as np\n'), ((3964, 3992), 'numpy.zeros_like', 'np.zeros_like', (['self.g[t - c]'], {}), '(self.g[t - c])\n', (3977, 3992), True, 'import numpy as np\n'), ((3744, 3769), 'numpy.linalg.norm', 'np.linalg.norm', (['ri_s_diff'], {}), '(ri_s_diff)\n', (3758, 3769), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
from PIL import Image
import cv2
import numpy as np
from paddle.fluid.io import DataLoader
import os
from paddlex.cls import transforms
#import Albumentation as A
from glob import glob
import paddle.fluid as fluid
def loader(path):
x = Image.open(path).convert("RGB")
x = np.asarray(x).astype('float32')
#x = cv2.imread("work/"+path,cv2.IMREAD_COLOR)
x = cv2.cvtColor(x, cv2.COLOR_RGB2BGR)/255.0
x = (cv2.resize(x,(1024,1024))-0.5)/0.5
return x
def loader_test(path):
x = Image.open(path).convert("RGB")
x = np.asarray(x).astype('float32')
#x = cv2.imread("work/"+path,cv2.IMREAD_COLOR)
x = cv2.cvtColor(x, cv2.COLOR_RGB2BGR)/255.0
x = (cv2.resize(x,(1024,1024))-0.5)/0.5
return x
transform_ops = transforms.Compose(
[
#transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5]) # this will do 1/255.0
transforms.RandomHorizontalFlip(prob=0.5),
transforms.RandomRotate(rotate_range=30, prob=0.5),
transforms.RandomCrop(crop_size=224, lower_scale=0.7, lower_ratio=3. / 4, upper_ratio=4. / 3),
transforms.RandomDistort(brightness_range=0.1, brightness_prob=0.5, contrast_range=0.1, contrast_prob=0.5, saturation_range=0.1, saturation_prob=0.0, hue_range=0.1, hue_prob=0.0)
]
)
def transform(img):
#print("before transform: ",img.shape)
img = transform_ops(img)[0]
#print("after transform: ",img.shape)
return img
# class TrainDataset:
# def __init__(self,data):
# self._data = data
# self.size = data.shape[0]
# self.cur = 0
# def __len__(self):
# return self.size
# def __getitem__(self,idx):
# img, label = loader(self._data[idx,0]),self._data[idx,1]
# img = transform(img)
# img = img.transpose(2,0,1)
# return img, label
# def __iter__(self):
# return self
# def __next__(self):
# if self.cur<self.size:
# idx = self.cur
# self.cur = self.cur + 1
# return self.__getitem__(idx)
# raise StopIteration
class Dataset:
def __init__(self,data,transforms=None):
self._data = data
self.size = len(data)
self.cur = 0
self._transform = transforms
def __len__(self):
return self.size
def __getitem__(self,idx):
img = loader_test(self._data[idx])
if self._transform:
img = self._transform(img)
img = img.transpose(2,0,1)
#print(img.size,img.shape) #3145728 (3, 1024, 1024)
return img
def __iter__(self):
return self
def __next__(self):
if self.cur<self.size:
idx = self.cur
self.cur = self.cur + 1
return self.__getitem__(idx)
raise StopIteration
# ## batch/shuffle dataset with Reader
def data_loader(path):
search_pattern = os.path.join(path,"*.png")
test_list_temp = glob(search_pattern)
test_list = []
for path in test_list_temp:
test_list.append(path)
## create Dataset
dataset = Dataset(test_list)
def reader():
for i in range(len(dataset)):
yield dataset[i]
shuffled_reader = fluid.io.shuffle(reader,100) # shuffle buffer = 100
batch_reader = fluid.io.batch(shuffled_reader,1) # batch size = 1
return batch_reader
| [
"cv2.cvtColor",
"numpy.asarray",
"paddlex.cls.transforms.RandomCrop",
"paddlex.cls.transforms.RandomDistort",
"PIL.Image.open",
"paddle.fluid.io.batch",
"paddlex.cls.transforms.RandomHorizontalFlip",
"glob.glob",
"paddlex.cls.transforms.RandomRotate",
"os.path.join",
"cv2.resize",
"paddle.flui... | [((2905, 2932), 'os.path.join', 'os.path.join', (['path', '"""*.png"""'], {}), "(path, '*.png')\n", (2917, 2932), False, 'import os\n'), ((2953, 2973), 'glob.glob', 'glob', (['search_pattern'], {}), '(search_pattern)\n', (2957, 2973), False, 'from glob import glob\n'), ((3222, 3251), 'paddle.fluid.io.shuffle', 'fluid.io.shuffle', (['reader', '(100)'], {}), '(reader, 100)\n', (3238, 3251), True, 'import paddle.fluid as fluid\n'), ((3296, 3330), 'paddle.fluid.io.batch', 'fluid.io.batch', (['shuffled_reader', '(1)'], {}), '(shuffled_reader, 1)\n', (3310, 3330), True, 'import paddle.fluid as fluid\n'), ((413, 447), 'cv2.cvtColor', 'cv2.cvtColor', (['x', 'cv2.COLOR_RGB2BGR'], {}), '(x, cv2.COLOR_RGB2BGR)\n', (425, 447), False, 'import cv2\n'), ((675, 709), 'cv2.cvtColor', 'cv2.cvtColor', (['x', 'cv2.COLOR_RGB2BGR'], {}), '(x, cv2.COLOR_RGB2BGR)\n', (687, 709), False, 'import cv2\n'), ((900, 941), 'paddlex.cls.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'prob': '(0.5)'}), '(prob=0.5)\n', (931, 941), False, 'from paddlex.cls import transforms\n'), ((947, 997), 'paddlex.cls.transforms.RandomRotate', 'transforms.RandomRotate', ([], {'rotate_range': '(30)', 'prob': '(0.5)'}), '(rotate_range=30, prob=0.5)\n', (970, 997), False, 'from paddlex.cls import transforms\n'), ((1003, 1102), 'paddlex.cls.transforms.RandomCrop', 'transforms.RandomCrop', ([], {'crop_size': '(224)', 'lower_scale': '(0.7)', 'lower_ratio': '(3.0 / 4)', 'upper_ratio': '(4.0 / 3)'}), '(crop_size=224, lower_scale=0.7, lower_ratio=3.0 / 4,\n upper_ratio=4.0 / 3)\n', (1024, 1102), False, 'from paddlex.cls import transforms\n'), ((1102, 1288), 'paddlex.cls.transforms.RandomDistort', 'transforms.RandomDistort', ([], {'brightness_range': '(0.1)', 'brightness_prob': '(0.5)', 'contrast_range': '(0.1)', 'contrast_prob': '(0.5)', 'saturation_range': '(0.1)', 'saturation_prob': '(0.0)', 'hue_range': '(0.1)', 'hue_prob': '(0.0)'}), '(brightness_range=0.1, brightness_prob=0.5,\n contrast_range=0.1, contrast_prob=0.5, saturation_range=0.1,\n saturation_prob=0.0, hue_range=0.1, hue_prob=0.0)\n', (1126, 1288), False, 'from paddlex.cls import transforms\n'), ((282, 298), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (292, 298), False, 'from PIL import Image\n'), ((322, 335), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (332, 335), True, 'import numpy as np\n'), ((463, 490), 'cv2.resize', 'cv2.resize', (['x', '(1024, 1024)'], {}), '(x, (1024, 1024))\n', (473, 490), False, 'import cv2\n'), ((544, 560), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (554, 560), False, 'from PIL import Image\n'), ((584, 597), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (594, 597), True, 'import numpy as np\n'), ((725, 752), 'cv2.resize', 'cv2.resize', (['x', '(1024, 1024)'], {}), '(x, (1024, 1024))\n', (735, 752), False, 'import cv2\n')] |
##############################################################################
#
# Unit tests for probabilities of Fock autocomes in the Gaussian backend
# This DOES NOT test for sampling of the said probabilities
#
##############################################################################
import unittest
import os, sys
sys.path.append(os.getcwd())
import numpy as np
from itertools import combinations
from defaults import BaseTest, FockBaseTest
from strawberryfields import backends
from math import factorial
num_repeats = 50
###################################################################
def squeezed_matelem(r,n):
if n%2==0:
return (1/np.cosh(r))*(np.tanh(r)**(n))*factorial(n)/(2**(n/2)*factorial(n/2))**2
else:
return 0.0
class FockGaussianMeasurementTests(BaseTest):
num_subsystems = 1
def test_coherent_state(self):
"""Tests Fock probabilities on a coherent state."""
alpha=1
nmax=self.D - 1
self.circuit.prepare_coherent_state(alpha,0)
state = self.circuit.state()
if isinstance(self.backend, backends.BaseFock):
if state.is_pure:
bsd = state.ket()
else:
bsd = state.dm()
if self.kwargs['pure']:
if self.args.batched:
bsd = np.array([np.outer(b, np.conj(b)) for b in bsd])
else:
bsd = np.outer(bsd, np.conj(bsd))
else:
bsd = state.reduced_dm(0,cutoff=nmax+1)
bsd_diag = np.array([np.diag(b) for b in bsd]) if self.args.batched else np.diag(bsd)
gbs = np.empty((self.bsize,self.D))
for i in range(self.D):
gbs[:,i] = state.fock_prob([i])
alpha2=np.abs(alpha)**2
exact=np.array([ np.exp(-alpha2)*(alpha2**i)/factorial(i) for i in range(self.D)])
exact = np.tile(exact, self.bsize)
self.assertAllAlmostEqual(gbs.flatten(),bsd_diag.real.flatten(),delta=self.tol)
self.assertAllAlmostEqual(exact.flatten(),bsd_diag.real.flatten(),delta=self.tol)
def test_squeezed_state(self):
"""Tests Fock probabilities on a squeezed state."""
r=1.0
nmax = self.D - 1
self.circuit.prepare_squeezed_state(1.0,0,0)
state = self.circuit.state()
if isinstance(self.backend, backends.BaseFock):
if state.is_pure:
bsd = state.ket()
else:
bsd = state.dm()
if self.kwargs['pure']:
if self.args.batched:
bsd = np.array([np.outer(b, np.conj(b)) for b in bsd])
else:
bsd = np.outer(bsd, np.conj(bsd))
else:
bsd = state.reduced_dm(0,cutoff=nmax+1)
bsd_diag = np.array([np.diag(b) for b in bsd]) if self.args.batched else np.diag(bsd)
gbs = np.empty((self.bsize,self.D))
for i in range(self.D):
gbs[:,i] = state.fock_prob([i])
exact = np.array([squeezed_matelem(r,n) for n in range(self.D)] )
exact = np.tile(exact, self.bsize)
self.assertAllAlmostEqual(gbs.flatten(),bsd_diag.real.flatten(),delta=self.tol)
self.assertAllAlmostEqual(exact.flatten(),bsd_diag.real.flatten(),delta=self.tol)
def test_displaced_squeezed_state(self):
"""Tests Fock probabilities on a state that is squeezed then displaced."""
r=1.0
alpha=3+4*1j
nmax = self.D - 1
self.circuit.prepare_squeezed_state(1.0,0,0)
self.circuit.displacement(alpha,0)
state = self.circuit.state()
if isinstance(self.backend, backends.BaseFock):
if state.is_pure:
bsd = state.ket()
else:
bsd = state.dm()
if self.kwargs['pure']:
if self.args.batched:
bsd = np.array([np.outer(b, np.conj(b)) for b in bsd])
else:
bsd = np.outer(bsd, np.conj(bsd))
else:
bsd = state.reduced_dm(0,cutoff=nmax+1)
bsd_diag = np.array([np.diag(b) for b in bsd]) if self.args.batched else np.diag(bsd)
gbs = np.empty((self.bsize,self.D))
for i in range(self.D):
gbs[:,i] = state.fock_prob([i])
self.assertAllAlmostEqual(gbs.flatten(),bsd_diag.real.flatten(),delta=self.tol)
class FockGaussianMeasurementTestsMulti(BaseTest):
num_subsystems = 2
def test_two_mode_squeezed(self):
r = np.arcsinh(np.sqrt(1))
nmax = 3
self.circuit.prepare_squeezed_state(r,0,0)
self.circuit.prepare_squeezed_state(-r,0,1)
self.circuit.beamsplitter(np.sqrt(0.5),np.sqrt(0.5),0,1)
gbs = np.empty((self.bsize,nmax,nmax))
state = self.circuit.state()
for i in range(nmax):
for j in range(nmax):
gbs[:,i,j] = state.fock_prob(np.array([i,j]))
n = np.arange(nmax)
exact = np.diag((1/np.cosh(r)**2) * np.tanh(r)**(2*n))
exact = np.tile(exact.flatten(), self.bsize)
self.assertAllAlmostEqual(gbs.flatten(),exact.flatten(),delta=self.tol)
class FockGaussianMeasurementTestsMultiComplex(BaseTest):
num_subsystems = 2
def test_two_mode_squeezed(self):
r=np.arcsinh(np.sqrt(1))
nmax=3
self.circuit.prepare_squeezed_state(r,0,0)
self.circuit.prepare_squeezed_state(r,0,1)
self.circuit.beamsplitter(np.sqrt(0.5),1j*np.sqrt(0.5),0,1)
gbs=np.empty((self.bsize,nmax,nmax))
state = self.circuit.state()
for i in range(nmax):
for j in range(nmax):
gbs[:,i,j] = state.fock_prob(np.array([i,j]))
n = np.arange(nmax)
exact = np.diag((1/np.cosh(r)**2) * np.tanh(r)**(2*n))
exact = np.tile(exact.flatten(), self.bsize)
self.assertAllAlmostEqual(gbs.flatten(),exact.flatten(),delta=self.tol)
if __name__=="__main__":
# run the tests in this file
suite = unittest.TestSuite()
for t in (FockGaussianMeasurementTests, FockGaussianMeasurementTestsMulti, FockGaussianMeasurementTestsMultiComplex):
ttt = unittest.TestLoader().loadTestsFromTestCase(t)
suite.addTests(ttt)
unittest.TextTestRunner().run(suite)
| [
"numpy.conj",
"numpy.abs",
"unittest.TextTestRunner",
"unittest.TestSuite",
"numpy.tanh",
"os.getcwd",
"numpy.empty",
"numpy.arange",
"numpy.tile",
"math.factorial",
"unittest.TestLoader",
"numpy.array",
"numpy.diag",
"numpy.exp",
"numpy.cosh",
"numpy.sqrt"
] | [((343, 354), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (352, 354), False, 'import os, sys\n'), ((5452, 5472), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (5470, 5472), False, 'import unittest\n'), ((1513, 1543), 'numpy.empty', 'np.empty', (['(self.bsize, self.D)'], {}), '((self.bsize, self.D))\n', (1521, 1543), True, 'import numpy as np\n'), ((1737, 1763), 'numpy.tile', 'np.tile', (['exact', 'self.bsize'], {}), '(exact, self.bsize)\n', (1744, 1763), True, 'import numpy as np\n'), ((2619, 2649), 'numpy.empty', 'np.empty', (['(self.bsize, self.D)'], {}), '((self.bsize, self.D))\n', (2627, 2649), True, 'import numpy as np\n'), ((2798, 2824), 'numpy.tile', 'np.tile', (['exact', 'self.bsize'], {}), '(exact, self.bsize)\n', (2805, 2824), True, 'import numpy as np\n'), ((3769, 3799), 'numpy.empty', 'np.empty', (['(self.bsize, self.D)'], {}), '((self.bsize, self.D))\n', (3777, 3799), True, 'import numpy as np\n'), ((4288, 4322), 'numpy.empty', 'np.empty', (['(self.bsize, nmax, nmax)'], {}), '((self.bsize, nmax, nmax))\n', (4296, 4322), True, 'import numpy as np\n'), ((4471, 4486), 'numpy.arange', 'np.arange', (['nmax'], {}), '(nmax)\n', (4480, 4486), True, 'import numpy as np\n'), ((5000, 5034), 'numpy.empty', 'np.empty', (['(self.bsize, nmax, nmax)'], {}), '((self.bsize, nmax, nmax))\n', (5008, 5034), True, 'import numpy as np\n'), ((5183, 5198), 'numpy.arange', 'np.arange', (['nmax'], {}), '(nmax)\n', (5192, 5198), True, 'import numpy as np\n'), ((1489, 1501), 'numpy.diag', 'np.diag', (['bsd'], {}), '(bsd)\n', (1496, 1501), True, 'import numpy as np\n'), ((1621, 1634), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (1627, 1634), True, 'import numpy as np\n'), ((2595, 2607), 'numpy.diag', 'np.diag', (['bsd'], {}), '(bsd)\n', (2602, 2607), True, 'import numpy as np\n'), ((3745, 3757), 'numpy.diag', 'np.diag', (['bsd'], {}), '(bsd)\n', (3752, 3757), True, 'import numpy as np\n'), ((4092, 4102), 'numpy.sqrt', 'np.sqrt', (['(1)'], {}), '(1)\n', (4099, 4102), True, 'import numpy as np\n'), ((4242, 4254), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (4249, 4254), True, 'import numpy as np\n'), ((4255, 4267), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (4262, 4267), True, 'import numpy as np\n'), ((4806, 4816), 'numpy.sqrt', 'np.sqrt', (['(1)'], {}), '(1)\n', (4813, 4816), True, 'import numpy as np\n'), ((4953, 4965), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (4960, 4965), True, 'import numpy as np\n'), ((5677, 5702), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (5700, 5702), False, 'import unittest\n'), ((691, 703), 'math.factorial', 'factorial', (['n'], {}), '(n)\n', (700, 703), False, 'from math import factorial\n'), ((4969, 4981), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (4976, 4981), True, 'import numpy as np\n'), ((5603, 5624), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (5622, 5624), False, 'import unittest\n'), ((714, 730), 'math.factorial', 'factorial', (['(n / 2)'], {}), '(n / 2)\n', (723, 730), False, 'from math import factorial\n'), ((1437, 1447), 'numpy.diag', 'np.diag', (['b'], {}), '(b)\n', (1444, 1447), True, 'import numpy as np\n'), ((1687, 1699), 'math.factorial', 'factorial', (['i'], {}), '(i)\n', (1696, 1699), False, 'from math import factorial\n'), ((2543, 2553), 'numpy.diag', 'np.diag', (['b'], {}), '(b)\n', (2550, 2553), True, 'import numpy as np\n'), ((3693, 3703), 'numpy.diag', 'np.diag', (['b'], {}), '(b)\n', (3700, 3703), True, 'import numpy as np\n'), ((4445, 4461), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (4453, 4461), True, 'import numpy as np\n'), ((4527, 4537), 'numpy.tanh', 'np.tanh', (['r'], {}), '(r)\n', (4534, 4537), True, 'import numpy as np\n'), ((5157, 5173), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (5165, 5173), True, 'import numpy as np\n'), ((5239, 5249), 'numpy.tanh', 'np.tanh', (['r'], {}), '(r)\n', (5246, 5249), True, 'import numpy as np\n'), ((661, 671), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (668, 671), True, 'import numpy as np\n'), ((674, 684), 'numpy.tanh', 'np.tanh', (['r'], {}), '(r)\n', (681, 684), True, 'import numpy as np\n'), ((1342, 1354), 'numpy.conj', 'np.conj', (['bsd'], {}), '(bsd)\n', (1349, 1354), True, 'import numpy as np\n'), ((1659, 1674), 'numpy.exp', 'np.exp', (['(-alpha2)'], {}), '(-alpha2)\n', (1665, 1674), True, 'import numpy as np\n'), ((2448, 2460), 'numpy.conj', 'np.conj', (['bsd'], {}), '(bsd)\n', (2455, 2460), True, 'import numpy as np\n'), ((3598, 3610), 'numpy.conj', 'np.conj', (['bsd'], {}), '(bsd)\n', (3605, 3610), True, 'import numpy as np\n'), ((4510, 4520), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (4517, 4520), True, 'import numpy as np\n'), ((5222, 5232), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (5229, 5232), True, 'import numpy as np\n'), ((1271, 1281), 'numpy.conj', 'np.conj', (['b'], {}), '(b)\n', (1278, 1281), True, 'import numpy as np\n'), ((2377, 2387), 'numpy.conj', 'np.conj', (['b'], {}), '(b)\n', (2384, 2387), True, 'import numpy as np\n'), ((3527, 3537), 'numpy.conj', 'np.conj', (['b'], {}), '(b)\n', (3534, 3537), True, 'import numpy as np\n')] |
import numpy as np
import os
from keras.models import model_from_json, model_from_yaml
import tensorflow as tf
from tensorflow.python.ops import array_ops
def dataset_import(data_dir, data_type):
sets = []
print("Load from %s" % data_dir)
for dir_name, subdir_list, file_list in os.walk(data_dir):
for file_name in file_list:
if(data_type in file_name):
sets.append(dir_name + file_name)
sets.sort()
print(str(len(sets))+ " files loaded!")
return sets
# Create directories for different runs of training
def runs_management(path):
if not os.path.isdir(path + "run_0"):
new_dir = path + "run_0"
os.mkdir(new_dir)
else:
dir_list = []
for d in os.listdir(path):
if d != '':
index = int(d.split("_")[-1])
dir_list.append(index)
dir_list.sort()
new_dir = path + "run_" + str(dir_list[-1] + 1)
os.mkdir(new_dir)
os.mkdir(new_dir + "/model")
os.mkdir(new_dir + "/logging")
return new_dir
def padding_v2(score, context):
# Pad zeros to star and end
extended_score = np.array(score)
padding_dimensions = (context, ) + extended_score.shape[1:]
padding_start = np.zeros(padding_dimensions)
padding_end = np.zeros(padding_dimensions)
extended_score = np.concatenate((padding_start,
extended_score,
padding_end),
axis=0)
return extended_score
def get_metas(score, beat_resolution, len_context):
# Beat locations, start symbol, end symbol
# Number of bits
n_symbols = 2 # start and end
n_bits_beats = 1
n_bits_measures = 1
metas = np.zeros((score.shape[0], n_symbols + n_bits_beats + n_bits_measures))
# Adding information for current beat location
for time in range(0, len(score)):
# Start symbol
if time < len_context:
metas[time, 0] = 1
# End symbol
elif time >= (len(score) - len_context):
metas[time, 1] = 1
else:
# Within beat location, 0~beat_resolution-1
metas[time, 2] = (time % beat_resolution)/beat_resolution
# Within measures location, 0~4*(beat_resolution-1)
metas[time, 3] = (time % (beat_resolution*4))/(beat_resolution*4)
return metas
def get_representation(score, pitch_range, beat_resolution, len_context):
pitch_table = np.array(score[0])
pitch_counter = np.zeros(pitch_range, dtype="int64")
score_progress = (np.concatenate([score[1:], np.zeros((1, pitch_range))]) - score).astype('int64')
# Intensity as length
# 1 as onset, gradually decrease to zero
new_score = np.zeros((score.shape[0], score.shape[1], 3))
for t ,tt in enumerate(score):
for p in np.nonzero(pitch_table)[0]:
if pitch_table[p] and pitch_counter[p] == 0:
new_score[t, p, 0] = 1
pitch_counter[p] = 1
elif pitch_table[p] == -1:
new_score[t, p, 1] = 1
pitch_counter[p] -= 1
new_score[t - pitch_counter[p]:t, p, 2] = new_score[t - pitch_counter[p]:t, p, 2][::-1]
pitch_counter[p] = 0
for p in np.nonzero(pitch_counter)[0]:
new_score[t, p, 2] = pitch_counter[p]
pitch_counter[p] += 1
pitch_table = score_progress[t]
t +=1
for p in np.nonzero(pitch_table)[0]:
if pitch_table[p] == -1:
pitch_counter[p] -= 1
new_score[t - pitch_counter[p]:t, p, 2] = new_score[t - pitch_counter[p]:t, p, 2][::-1]
score_t = padding_v2(new_score , len_context)
meta = get_metas(score_t , beat_resolution, len_context)
return score_t, meta
def focal_loss(y_true, y_pred):
r"""Compute focal loss for predictions.
Multi-labels Focal loss formula:
FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)
,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: A float tensor of shape [batch_size, num_anchors]
alpha: A scalar tensor for focal loss alpha hyper-parameter
gamma: A scalar tensor for focal loss gamma hyper-parameter
Returns:
loss: A (scalar) tensor representing the value of the loss function
"""
alpha=0.6
gamma=2
threash_hold = 1e-4
pos_p_sub = tf.where(tf.greater(y_true, threash_hold), y_pred, tf.ones_like(y_pred))
pos_p_sub1 = tf.where(tf.greater(y_true, threash_hold), y_true, tf.ones_like(y_pred))
neg_p_sub = tf.where(tf.less_equal(y_true, threash_hold), y_pred, tf.zeros_like(y_pred))
per_entry_cross_ent = - alpha * ((1 - pos_p_sub) ** gamma) * tf.math.log(tf.clip_by_value(pos_p_sub, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * tf.math.log(tf.clip_by_value(1.0 - neg_p_sub, 1e-8, 1.0))
# per_entry_cross_ent = - alpha * (tf.abs(pos_p_sub1 - pos_p_sub) ** gamma) * tf.math.log(tf.clip_by_value(1.0 - tf.abs(pos_p_sub1 - pos_p_sub), 1e-8, 1.0)) - (1 - alpha) * (neg_p_sub ** gamma) * tf.math.log(tf.clip_by_value(1.0 - neg_p_sub, 1e-8, 1.0))
return tf.reduce_mean(per_entry_cross_ent)
def partial_focal_loss(y_true, y_pred):
# Based on binary cross entropy
# Loss function for transformer, update only for the masked timesteps
mask = tf.where(tf.equal(y_true, -1), tf.zeros_like(y_pred), tf.ones_like(y_pred))
alpha=0.6
gamma=2
threash_hold = 1e-4
pos_p_sub = tf.where(tf.greater(y_true, threash_hold), y_pred, tf.ones_like(y_pred))
neg_p_sub = tf.where(tf.less_equal(y_true, threash_hold), y_pred, tf.zeros_like(y_pred))
per_entry_cross_ent = - alpha * ((1 - pos_p_sub) ** gamma) * tf.math.log(tf.clip_by_value(pos_p_sub, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * tf.math.log(tf.clip_by_value(1.0 - neg_p_sub, 1e-8, 1.0))
per_entry_cross_ent *= mask
return tf.reduce_mean(per_entry_cross_ent)
def partial_loss(y_true, y_pred):
# Based on binary cross entropy
# Loss function for transformer, update only for the masked timesteps
mask = tf.where(tf.equal(y_true, -1), tf.zeros_like(y_pred), tf.ones_like(y_pred))
per_entry_cross_ent = - y_true * tf.math.log(tf.clip_by_value(y_pred, 1e-8, 1.0)) - (1 - y_true) * tf.math.log(tf.clip_by_value(1.0 - y_pred, 1e-8, 1.0))
per_entry_cross_ent *= mask
return tf.reduce_mean(per_entry_cross_ent)
def binary_crossentropy_mixup(y_true, y_pred):
# Based on binary cross entropy
per_entry_cross_ent = - y_true * tf.math.log(tf.clip_by_value(1.0 - tf.abs(y_true - y_pred), 1e-8, 1.0)) - (1 - y_true) * tf.math.log(tf.clip_by_value(1.0 - y_pred, 1e-8, 1.0))
return tf.reduce_mean(per_entry_cross_ent)
def partial_loss_mixup(y_true, y_pred):
# Based on binary cross entropy
# Loss function for transformer, update only for the masked timesteps
mask = tf.where(tf.equal(y_true, -1), tf.zeros_like(y_pred), tf.ones_like(y_pred))
per_entry_cross_ent = - y_true * tf.math.log(tf.clip_by_value(1.0 - tf.abs(y_true - y_pred), 1e-8, 1.0)) - (1 - y_true) * tf.math.log(tf.clip_by_value(1.0 - y_pred, 1e-8, 1.0))
per_entry_cross_ent *= mask
return tf.reduce_mean(per_entry_cross_ent)
def partial_binary_accuracy(y_true, y_pred, threshold=0.5):
# Use with partial loss
if threshold != 0.5:
threshold = tf.cast(threshold, y_pred.dtype)
y_pred = tf.cast(y_pred > threshold, y_pred.dtype)
mask0 = tf.where(tf.equal(y_true, -1), tf.zeros_like(y_pred), tf.ones_like(y_pred))
y_true = tf.where(tf.equal(y_true, -1), -1e9*tf.ones_like(y_pred), y_true)
return tf.reduce_sum(tf.cast(tf.equal(y_true, tf.round(y_pred)), dtype=y_pred.dtype))/tf.reduce_sum(mask0)
def current_l_binary_accuracy(y_true, y_pred, threshold=0.5):
# Use with partial loss, give only the accuracy for the current timestep
if threshold != 0.5:
threshold = tf.cast(threshold, y_pred.dtype)
y_pred = tf.cast(y_pred > threshold, y_pred.dtype)
mask0 = tf.where(tf.equal(y_true, -1), tf.zeros_like(y_pred), tf.ones_like(y_pred))
y_true = tf.where(tf.equal(y_true, -1), -1e9*tf.ones_like(y_pred), y_true)
return tf.reduce_sum(tf.cast(tf.equal(y_true, tf.round(y_pred))[:, -1, :], dtype=y_pred.dtype))/tf.reduce_sum(mask0[:, -1, :])
def current_r_binary_accuracy(y_true, y_pred, threshold=0.5):
# Use with partial loss, give only the accuracy for the current timestep
if threshold != 0.5:
threshold = tf.cast(threshold, y_pred.dtype)
y_pred = tf.cast(y_pred > threshold, y_pred.dtype)
mask0 = tf.where(tf.equal(y_true, -1), tf.zeros_like(y_pred), tf.ones_like(y_pred))
y_true = tf.where(tf.equal(y_true, -1), -1e9*tf.ones_like(y_pred), y_true)
return tf.reduce_sum(tf.cast(tf.equal(y_true, tf.round(y_pred))[:, 0, :], dtype=y_pred.dtype))/tf.reduce_sum(mask0[:, 0, :])
| [
"os.mkdir",
"tensorflow.reduce_sum",
"tensorflow.clip_by_value",
"os.walk",
"tensorflow.zeros_like",
"tensorflow.greater",
"tensorflow.abs",
"tensorflow.less_equal",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.reduce_mean",
"tensorflow.ones_like",
"tensorflow.round",
"os.listdir",
... | [((292, 309), 'os.walk', 'os.walk', (['data_dir'], {}), '(data_dir)\n', (299, 309), False, 'import os\n'), ((983, 1011), 'os.mkdir', 'os.mkdir', (["(new_dir + '/model')"], {}), "(new_dir + '/model')\n", (991, 1011), False, 'import os\n'), ((1016, 1046), 'os.mkdir', 'os.mkdir', (["(new_dir + '/logging')"], {}), "(new_dir + '/logging')\n", (1024, 1046), False, 'import os\n'), ((1152, 1167), 'numpy.array', 'np.array', (['score'], {}), '(score)\n', (1160, 1167), True, 'import numpy as np\n'), ((1258, 1286), 'numpy.zeros', 'np.zeros', (['padding_dimensions'], {}), '(padding_dimensions)\n', (1266, 1286), True, 'import numpy as np\n'), ((1305, 1333), 'numpy.zeros', 'np.zeros', (['padding_dimensions'], {}), '(padding_dimensions)\n', (1313, 1333), True, 'import numpy as np\n'), ((1356, 1424), 'numpy.concatenate', 'np.concatenate', (['(padding_start, extended_score, padding_end)'], {'axis': '(0)'}), '((padding_start, extended_score, padding_end), axis=0)\n', (1370, 1424), True, 'import numpy as np\n'), ((1775, 1845), 'numpy.zeros', 'np.zeros', (['(score.shape[0], n_symbols + n_bits_beats + n_bits_measures)'], {}), '((score.shape[0], n_symbols + n_bits_beats + n_bits_measures))\n', (1783, 1845), True, 'import numpy as np\n'), ((2531, 2549), 'numpy.array', 'np.array', (['score[0]'], {}), '(score[0])\n', (2539, 2549), True, 'import numpy as np\n'), ((2570, 2606), 'numpy.zeros', 'np.zeros', (['pitch_range'], {'dtype': '"""int64"""'}), "(pitch_range, dtype='int64')\n", (2578, 2606), True, 'import numpy as np\n'), ((2799, 2844), 'numpy.zeros', 'np.zeros', (['(score.shape[0], score.shape[1], 3)'], {}), '((score.shape[0], score.shape[1], 3))\n', (2807, 2844), True, 'import numpy as np\n'), ((5616, 5651), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_entry_cross_ent'], {}), '(per_entry_cross_ent)\n', (5630, 5651), True, 'import tensorflow as tf\n'), ((6423, 6458), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_entry_cross_ent'], {}), '(per_entry_cross_ent)\n', (6437, 6458), True, 'import tensorflow as tf\n'), ((6902, 6937), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_entry_cross_ent'], {}), '(per_entry_cross_ent)\n', (6916, 6937), True, 'import tensorflow as tf\n'), ((7215, 7250), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_entry_cross_ent'], {}), '(per_entry_cross_ent)\n', (7229, 7250), True, 'import tensorflow as tf\n'), ((7723, 7758), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_entry_cross_ent'], {}), '(per_entry_cross_ent)\n', (7737, 7758), True, 'import tensorflow as tf\n'), ((604, 633), 'os.path.isdir', 'os.path.isdir', (["(path + 'run_0')"], {}), "(path + 'run_0')\n", (617, 633), False, 'import os\n'), ((676, 693), 'os.mkdir', 'os.mkdir', (['new_dir'], {}), '(new_dir)\n', (684, 693), False, 'import os\n'), ((743, 759), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (753, 759), False, 'import os\n'), ((960, 977), 'os.mkdir', 'os.mkdir', (['new_dir'], {}), '(new_dir)\n', (968, 977), False, 'import os\n'), ((3560, 3583), 'numpy.nonzero', 'np.nonzero', (['pitch_table'], {}), '(pitch_table)\n', (3570, 3583), True, 'import numpy as np\n'), ((4852, 4884), 'tensorflow.greater', 'tf.greater', (['y_true', 'threash_hold'], {}), '(y_true, threash_hold)\n', (4862, 4884), True, 'import tensorflow as tf\n'), ((4894, 4914), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (4906, 4914), True, 'import tensorflow as tf\n'), ((4942, 4974), 'tensorflow.greater', 'tf.greater', (['y_true', 'threash_hold'], {}), '(y_true, threash_hold)\n', (4952, 4974), True, 'import tensorflow as tf\n'), ((4984, 5004), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (4996, 5004), True, 'import tensorflow as tf\n'), ((5036, 5071), 'tensorflow.less_equal', 'tf.less_equal', (['y_true', 'threash_hold'], {}), '(y_true, threash_hold)\n', (5049, 5071), True, 'import tensorflow as tf\n'), ((5081, 5102), 'tensorflow.zeros_like', 'tf.zeros_like', (['y_pred'], {}), '(y_pred)\n', (5094, 5102), True, 'import tensorflow as tf\n'), ((5824, 5844), 'tensorflow.equal', 'tf.equal', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (5832, 5844), True, 'import tensorflow as tf\n'), ((5846, 5867), 'tensorflow.zeros_like', 'tf.zeros_like', (['y_pred'], {}), '(y_pred)\n', (5859, 5867), True, 'import tensorflow as tf\n'), ((5869, 5889), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (5881, 5889), True, 'import tensorflow as tf\n'), ((5971, 6003), 'tensorflow.greater', 'tf.greater', (['y_true', 'threash_hold'], {}), '(y_true, threash_hold)\n', (5981, 6003), True, 'import tensorflow as tf\n'), ((6013, 6033), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (6025, 6033), True, 'import tensorflow as tf\n'), ((6060, 6095), 'tensorflow.less_equal', 'tf.less_equal', (['y_true', 'threash_hold'], {}), '(y_true, threash_hold)\n', (6073, 6095), True, 'import tensorflow as tf\n'), ((6105, 6126), 'tensorflow.zeros_like', 'tf.zeros_like', (['y_pred'], {}), '(y_pred)\n', (6118, 6126), True, 'import tensorflow as tf\n'), ((6624, 6644), 'tensorflow.equal', 'tf.equal', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (6632, 6644), True, 'import tensorflow as tf\n'), ((6646, 6667), 'tensorflow.zeros_like', 'tf.zeros_like', (['y_pred'], {}), '(y_pred)\n', (6659, 6667), True, 'import tensorflow as tf\n'), ((6669, 6689), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (6681, 6689), True, 'import tensorflow as tf\n'), ((7422, 7442), 'tensorflow.equal', 'tf.equal', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (7430, 7442), True, 'import tensorflow as tf\n'), ((7444, 7465), 'tensorflow.zeros_like', 'tf.zeros_like', (['y_pred'], {}), '(y_pred)\n', (7457, 7465), True, 'import tensorflow as tf\n'), ((7467, 7487), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (7479, 7487), True, 'import tensorflow as tf\n'), ((7893, 7925), 'tensorflow.cast', 'tf.cast', (['threshold', 'y_pred.dtype'], {}), '(threshold, y_pred.dtype)\n', (7900, 7925), True, 'import tensorflow as tf\n'), ((7943, 7984), 'tensorflow.cast', 'tf.cast', (['(y_pred > threshold)', 'y_pred.dtype'], {}), '(y_pred > threshold, y_pred.dtype)\n', (7950, 7984), True, 'import tensorflow as tf\n'), ((8011, 8031), 'tensorflow.equal', 'tf.equal', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (8019, 8031), True, 'import tensorflow as tf\n'), ((8033, 8054), 'tensorflow.zeros_like', 'tf.zeros_like', (['y_pred'], {}), '(y_pred)\n', (8046, 8054), True, 'import tensorflow as tf\n'), ((8056, 8076), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (8068, 8076), True, 'import tensorflow as tf\n'), ((8100, 8120), 'tensorflow.equal', 'tf.equal', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (8108, 8120), True, 'import tensorflow as tf\n'), ((8248, 8268), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask0'], {}), '(mask0)\n', (8261, 8268), True, 'import tensorflow as tf\n'), ((8454, 8486), 'tensorflow.cast', 'tf.cast', (['threshold', 'y_pred.dtype'], {}), '(threshold, y_pred.dtype)\n', (8461, 8486), True, 'import tensorflow as tf\n'), ((8504, 8545), 'tensorflow.cast', 'tf.cast', (['(y_pred > threshold)', 'y_pred.dtype'], {}), '(y_pred > threshold, y_pred.dtype)\n', (8511, 8545), True, 'import tensorflow as tf\n'), ((8572, 8592), 'tensorflow.equal', 'tf.equal', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (8580, 8592), True, 'import tensorflow as tf\n'), ((8594, 8615), 'tensorflow.zeros_like', 'tf.zeros_like', (['y_pred'], {}), '(y_pred)\n', (8607, 8615), True, 'import tensorflow as tf\n'), ((8617, 8637), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (8629, 8637), True, 'import tensorflow as tf\n'), ((8661, 8681), 'tensorflow.equal', 'tf.equal', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (8669, 8681), True, 'import tensorflow as tf\n'), ((8823, 8853), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask0[:, -1, :]'], {}), '(mask0[:, -1, :])\n', (8836, 8853), True, 'import tensorflow as tf\n'), ((9043, 9075), 'tensorflow.cast', 'tf.cast', (['threshold', 'y_pred.dtype'], {}), '(threshold, y_pred.dtype)\n', (9050, 9075), True, 'import tensorflow as tf\n'), ((9093, 9134), 'tensorflow.cast', 'tf.cast', (['(y_pred > threshold)', 'y_pred.dtype'], {}), '(y_pred > threshold, y_pred.dtype)\n', (9100, 9134), True, 'import tensorflow as tf\n'), ((9161, 9181), 'tensorflow.equal', 'tf.equal', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (9169, 9181), True, 'import tensorflow as tf\n'), ((9183, 9204), 'tensorflow.zeros_like', 'tf.zeros_like', (['y_pred'], {}), '(y_pred)\n', (9196, 9204), True, 'import tensorflow as tf\n'), ((9206, 9226), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (9218, 9226), True, 'import tensorflow as tf\n'), ((9250, 9270), 'tensorflow.equal', 'tf.equal', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (9258, 9270), True, 'import tensorflow as tf\n'), ((9411, 9440), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask0[:, 0, :]'], {}), '(mask0[:, 0, :])\n', (9424, 9440), True, 'import tensorflow as tf\n'), ((2897, 2920), 'numpy.nonzero', 'np.nonzero', (['pitch_table'], {}), '(pitch_table)\n', (2907, 2920), True, 'import numpy as np\n'), ((3361, 3386), 'numpy.nonzero', 'np.nonzero', (['pitch_counter'], {}), '(pitch_counter)\n', (3371, 3386), True, 'import numpy as np\n'), ((8127, 8147), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (8139, 8147), True, 'import tensorflow as tf\n'), ((8688, 8708), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (8700, 8708), True, 'import tensorflow as tf\n'), ((9277, 9297), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (9289, 9297), True, 'import tensorflow as tf\n'), ((5181, 5220), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['pos_p_sub', '(1e-08)', '(1.0)'], {}), '(pos_p_sub, 1e-08, 1.0)\n', (5197, 5220), True, 'import tensorflow as tf\n'), ((5300, 5345), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(1.0 - neg_p_sub)', '(1e-08)', '(1.0)'], {}), '(1.0 - neg_p_sub, 1e-08, 1.0)\n', (5316, 5345), True, 'import tensorflow as tf\n'), ((6205, 6244), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['pos_p_sub', '(1e-08)', '(1.0)'], {}), '(pos_p_sub, 1e-08, 1.0)\n', (6221, 6244), True, 'import tensorflow as tf\n'), ((6324, 6369), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(1.0 - neg_p_sub)', '(1e-08)', '(1.0)'], {}), '(1.0 - neg_p_sub, 1e-08, 1.0)\n', (6340, 6369), True, 'import tensorflow as tf\n'), ((6745, 6781), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y_pred', '(1e-08)', '(1.0)'], {}), '(y_pred, 1e-08, 1.0)\n', (6761, 6781), True, 'import tensorflow as tf\n'), ((6811, 6853), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(1.0 - y_pred)', '(1e-08)', '(1.0)'], {}), '(1.0 - y_pred, 1e-08, 1.0)\n', (6827, 6853), True, 'import tensorflow as tf\n'), ((7160, 7202), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(1.0 - y_pred)', '(1e-08)', '(1.0)'], {}), '(1.0 - y_pred, 1e-08, 1.0)\n', (7176, 7202), True, 'import tensorflow as tf\n'), ((7632, 7674), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(1.0 - y_pred)', '(1e-08)', '(1.0)'], {}), '(1.0 - y_pred, 1e-08, 1.0)\n', (7648, 7674), True, 'import tensorflow as tf\n'), ((8208, 8224), 'tensorflow.round', 'tf.round', (['y_pred'], {}), '(y_pred)\n', (8216, 8224), True, 'import tensorflow as tf\n'), ((2656, 2682), 'numpy.zeros', 'np.zeros', (['(1, pitch_range)'], {}), '((1, pitch_range))\n', (2664, 2682), True, 'import numpy as np\n'), ((7094, 7117), 'tensorflow.abs', 'tf.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (7100, 7117), True, 'import tensorflow as tf\n'), ((7566, 7589), 'tensorflow.abs', 'tf.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (7572, 7589), True, 'import tensorflow as tf\n'), ((8773, 8789), 'tensorflow.round', 'tf.round', (['y_pred'], {}), '(y_pred)\n', (8781, 8789), True, 'import tensorflow as tf\n'), ((9362, 9378), 'tensorflow.round', 'tf.round', (['y_pred'], {}), '(y_pred)\n', (9370, 9378), True, 'import tensorflow as tf\n')] |
import os
import numpy as np
def build_datasets(base, val_ratio=0.2):
ban_list = ["10020533.jpg"]
with open(base+'labels.txt', 'w') as f:
for i in range(20):
f.write(str(i)+'\n')
imgs = os.listdir(os.path.join(base, 'train/'))
np.random.seed(5)
np.random.shuffle(imgs)
val_num = int(val_ratio * len(imgs))
with open(os.path.join(base, 'train_list.txt'), 'w+') as f:
for pt in imgs[:-val_num]:
if pt in ban_list:
continue
img = 'train/'+pt
ann = 'train_label/'+pt.replace('.jpg', '.png')
info = img + ' ' + ann + '\n'
f.write(info)
with open(os.path.join(base, 'val_list.txt'), 'w+') as f:
for pt in imgs[-val_num:]:
if pt in ban_list:
continue
img = 'train/'+pt
ann = 'train_label/'+pt.replace('.jpg', '.png')
info = img + ' ' + ann + '\n'
f.write(info)
with open(os.path.join(base, 'test_list.txt'), 'w+') as f:
for pt in os.listdir(base+'test/'):
img = 'test/'+pt
info = img + '\n'
f.write(info)
if __name__ == '__main__':
path = r"/home/zxl/hualu-laneline-detection/data/"
build_datasets(path)
| [
"os.listdir",
"numpy.random.seed",
"os.path.join",
"numpy.random.shuffle"
] | [((267, 284), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (281, 284), True, 'import numpy as np\n'), ((289, 312), 'numpy.random.shuffle', 'np.random.shuffle', (['imgs'], {}), '(imgs)\n', (306, 312), True, 'import numpy as np\n'), ((233, 261), 'os.path.join', 'os.path.join', (['base', '"""train/"""'], {}), "(base, 'train/')\n", (245, 261), False, 'import os\n'), ((1062, 1088), 'os.listdir', 'os.listdir', (["(base + 'test/')"], {}), "(base + 'test/')\n", (1072, 1088), False, 'import os\n'), ((369, 405), 'os.path.join', 'os.path.join', (['base', '"""train_list.txt"""'], {}), "(base, 'train_list.txt')\n", (381, 405), False, 'import os\n'), ((683, 717), 'os.path.join', 'os.path.join', (['base', '"""val_list.txt"""'], {}), "(base, 'val_list.txt')\n", (695, 717), False, 'import os\n'), ((995, 1030), 'os.path.join', 'os.path.join', (['base', '"""test_list.txt"""'], {}), "(base, 'test_list.txt')\n", (1007, 1030), False, 'import os\n')] |
#!/usr/bin/env python3
"""
@author:Harold
@file: utils.py
@time: 27/09/2019
"""
import numpy as np
import pandas as pd
def load_3d_pt_cloud_data_with_delimiter(path_name: str, delimiter: str) -> np.array:
return pd.read_csv(
path_name, dtype=np.float32, delimiter=delimiter, header=None
).to_numpy()
def set_axes_radius(ax, origin, radius):
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),
])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
| [
"pandas.read_csv",
"numpy.mean",
"numpy.abs"
] | [((990, 1013), 'numpy.mean', 'np.mean', (['limits'], {'axis': '(1)'}), '(limits, axis=1)\n', (997, 1013), True, 'import numpy as np\n'), ((219, 293), 'pandas.read_csv', 'pd.read_csv', (['path_name'], {'dtype': 'np.float32', 'delimiter': 'delimiter', 'header': 'None'}), '(path_name, dtype=np.float32, delimiter=delimiter, header=None)\n', (230, 293), True, 'import pandas as pd\n'), ((1040, 1075), 'numpy.abs', 'np.abs', (['(limits[:, 1] - limits[:, 0])'], {}), '(limits[:, 1] - limits[:, 0])\n', (1046, 1075), True, 'import numpy as np\n')] |
import numpy as np
import torch
import pytest
from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN
padded_sequences = np.random.choice(np.arange(1, 100), (100, 48))
padded_sequences = np.hstack(
(np.repeat(np.array([[0, 0]]), 100, axis=0), padded_sequences)
)
pretrained_embeddings = np.random.rand(1000, 64).astype("float32")
vocab_size = 1000
# ###############################################################################
# # Test Basic Model with/without attention
# ###############################################################################
@pytest.mark.parametrize(
"attention",
[True, False],
)
def test_basic_model(attention):
if not attention:
model = BasicRNN(vocab_size=vocab_size, embed_dim=32, padding_idx=0)
else:
model = AttentiveRNN(vocab_size=vocab_size, embed_dim=32, padding_idx=0)
out = model(torch.from_numpy(padded_sequences))
res = []
res.append(out.size(0) == 100)
try:
if model.attn_concatenate:
res.append(out.size(1) == model.hidden_dim * 2)
except Exception:
res.append(out.size(1) == model.hidden_dim)
assert all(res)
###############################################################################
# Without Pretrained Embeddings and attention
###############################################################################
@pytest.mark.parametrize(
"bidirectional",
[True, False],
)
@pytest.mark.parametrize(
"attn_concatenate",
[True, False],
)
def test_basic_model_with_attn(bidirectional, attn_concatenate):
model = AttentiveRNN(
vocab_size=vocab_size,
embed_dim=32,
padding_idx=0,
hidden_dim=32,
bidirectional=bidirectional,
attn_concatenate=attn_concatenate,
)
out = model(torch.from_numpy(padded_sequences))
if attn_concatenate and bidirectional:
out_size_1_ok = out.size(1) == model.hidden_dim * 4
elif attn_concatenate or bidirectional:
out_size_1_ok = out.size(1) == model.hidden_dim * 2
else:
out_size_1_ok = out.size(1) == model.hidden_dim
attn_weights_ok = model.attn.attn_weights.size(1) == padded_sequences.shape[1]
assert out.size(0) == 100 and out_size_1_ok and attn_weights_ok
###############################################################################
# With Pretrained Embeddings
###############################################################################
@pytest.mark.parametrize(
"bidirectional",
[True, False],
)
def test_model_with_pretrained(bidirectional):
model = BasicRNN(
vocab_size=vocab_size,
embed_matrix=pretrained_embeddings,
padding_idx=0,
bidirectional=bidirectional,
)
out = model(torch.from_numpy(padded_sequences))
assert (
out.size(0) == 100 and out.size(1) == model.hidden_dim * 2
if bidirectional
else model.hidden_dim
)
###############################################################################
# Make sure it throws a UserWarning when the input embedding dimension and the
# dimension of the pretrained embeddings do not match.
###############################################################################
@pytest.mark.parametrize(
"model_name",
["basic", "stacked"],
)
def test_catch_warning(model_name):
with pytest.warns(UserWarning):
if model_name == "basic":
model = BasicRNN(
vocab_size=vocab_size,
embed_dim=32,
embed_matrix=pretrained_embeddings,
padding_idx=0,
)
elif model_name == "stacked":
model = StackedAttentiveRNN(
vocab_size=vocab_size,
embed_dim=32,
embed_matrix=pretrained_embeddings,
padding_idx=0,
n_blocks=2,
)
out = model(torch.from_numpy(padded_sequences))
assert out.size(0) == 100 and out.size(1) == 64
###############################################################################
# Without Pretrained Embeddings and head layers
###############################################################################
@pytest.mark.parametrize(
"attention",
[True, False],
)
def test_model_with_head_layers(attention):
if not attention:
model = BasicRNN(
vocab_size=vocab_size,
embed_dim=32,
padding_idx=0,
head_hidden_dims=[64, 16],
)
else:
model = AttentiveRNN(
vocab_size=vocab_size,
embed_dim=32,
padding_idx=0,
head_hidden_dims=[64, 16],
)
out = model(torch.from_numpy(padded_sequences))
assert out.size(0) == 100 and out.size(1) == 16
###############################################################################
# Pretrained Embeddings made non-trainable
###############################################################################
def test_embed_non_trainable():
model = BasicRNN(
vocab_size=vocab_size,
embed_matrix=pretrained_embeddings,
embed_trainable=False,
padding_idx=0,
)
out = model(torch.from_numpy(padded_sequences)) # noqa: F841
assert np.allclose(model.word_embed.weight.numpy(), pretrained_embeddings)
# ##############################################################################
# GRU and using output
# ##############################################################################
@pytest.mark.parametrize(
"bidirectional",
[True, False],
)
def test_gru_and_using_ouput(bidirectional):
model = BasicRNN(
vocab_size=vocab_size,
rnn_type="gru",
embed_dim=32,
bidirectional=bidirectional,
padding_idx=0,
use_hidden_state=False,
)
out = model(torch.from_numpy(padded_sequences)) # noqa: F841
assert out.size(0) == 100 and out.size(1) == model.output_dim
# ###############################################################################
# # Test StackedAttentiveRNN
# ###############################################################################
@pytest.mark.parametrize(
"rnn_type",
["lstm", "gru"],
)
@pytest.mark.parametrize(
"bidirectional",
[True, False],
)
@pytest.mark.parametrize(
"attn_concatenate",
[True, False],
)
@pytest.mark.parametrize(
"with_addnorm",
[True, False],
)
@pytest.mark.parametrize(
"with_head",
[True, False],
)
def test_stacked_attentive_rnn(
rnn_type, bidirectional, attn_concatenate, with_addnorm, with_head
):
model = StackedAttentiveRNN(
vocab_size=vocab_size,
embed_dim=32,
hidden_dim=32,
n_blocks=2,
padding_idx=0,
rnn_type=rnn_type,
bidirectional=bidirectional,
attn_concatenate=attn_concatenate,
with_addnorm=with_addnorm,
head_hidden_dims=[50] if with_head else None,
)
out = model(torch.from_numpy(padded_sequences))
res = []
res.append(out.size(0) == 100)
if with_head:
res.append(out.size(1) == 50)
else:
if bidirectional and attn_concatenate:
out_dim = model.hidden_dim * 4
elif bidirectional or attn_concatenate:
out_dim = model.hidden_dim * 2
else:
out_dim = model.hidden_dim
res.append(out.size(1) == out_dim)
assert all(res)
def test_stacked_attentive_rnn_embed_non_trainable():
model = StackedAttentiveRNN(
vocab_size=vocab_size,
embed_matrix=pretrained_embeddings,
embed_trainable=False,
n_blocks=2,
padding_idx=0,
)
out = model(torch.from_numpy(padded_sequences)) # noqa: F841
assert np.allclose(model.word_embed.weight.numpy(), pretrained_embeddings)
# ###############################################################################
# # Test Attn weights are ok
# ###############################################################################
@pytest.mark.parametrize(
"stacked",
[True, False],
)
def test_attn_weights(stacked):
if stacked:
model = StackedAttentiveRNN(
vocab_size=vocab_size,
embed_dim=32,
n_blocks=2,
padding_idx=0,
)
else:
model = AttentiveRNN(
vocab_size=vocab_size,
embed_dim=32,
padding_idx=0,
head_hidden_dims=[64, 16],
)
out = model(torch.from_numpy(padded_sequences)) # noqa: F841
attn_w = model.attention_weights
if stacked:
assert len(attn_w) == model.n_blocks and attn_w[0].size() == torch.Size(
[100, 50]
)
else:
assert attn_w.size() == torch.Size([100, 50])
| [
"pytest.warns",
"pytorch_widedeep.models.BasicRNN",
"pytorch_widedeep.models.StackedAttentiveRNN",
"numpy.arange",
"numpy.array",
"torch.Size",
"numpy.random.rand",
"pytest.mark.parametrize",
"pytorch_widedeep.models.AttentiveRNN",
"torch.from_numpy"
] | [((591, 642), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""attention"""', '[True, False]'], {}), "('attention', [True, False])\n", (614, 642), False, 'import pytest\n'), ((1389, 1444), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bidirectional"""', '[True, False]'], {}), "('bidirectional', [True, False])\n", (1412, 1444), False, 'import pytest\n'), ((1457, 1515), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""attn_concatenate"""', '[True, False]'], {}), "('attn_concatenate', [True, False])\n", (1480, 1515), False, 'import pytest\n'), ((2475, 2530), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bidirectional"""', '[True, False]'], {}), "('bidirectional', [True, False])\n", (2498, 2530), False, 'import pytest\n'), ((3244, 3303), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model_name"""', "['basic', 'stacked']"], {}), "('model_name', ['basic', 'stacked'])\n", (3267, 3303), False, 'import pytest\n'), ((4207, 4258), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""attention"""', '[True, False]'], {}), "('attention', [True, False])\n", (4230, 4258), False, 'import pytest\n'), ((5511, 5566), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bidirectional"""', '[True, False]'], {}), "('bidirectional', [True, False])\n", (5534, 5566), False, 'import pytest\n'), ((6150, 6202), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""rnn_type"""', "['lstm', 'gru']"], {}), "('rnn_type', ['lstm', 'gru'])\n", (6173, 6202), False, 'import pytest\n'), ((6215, 6270), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bidirectional"""', '[True, False]'], {}), "('bidirectional', [True, False])\n", (6238, 6270), False, 'import pytest\n'), ((6283, 6341), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""attn_concatenate"""', '[True, False]'], {}), "('attn_concatenate', [True, False])\n", (6306, 6341), False, 'import pytest\n'), ((6354, 6408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_addnorm"""', '[True, False]'], {}), "('with_addnorm', [True, False])\n", (6377, 6408), False, 'import pytest\n'), ((6421, 6472), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_head"""', '[True, False]'], {}), "('with_head', [True, False])\n", (6444, 6472), False, 'import pytest\n'), ((7998, 8047), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stacked"""', '[True, False]'], {}), "('stacked', [True, False])\n", (8021, 8047), False, 'import pytest\n'), ((164, 181), 'numpy.arange', 'np.arange', (['(1)', '(100)'], {}), '(1, 100)\n', (173, 181), True, 'import numpy as np\n'), ((1604, 1752), 'pytorch_widedeep.models.AttentiveRNN', 'AttentiveRNN', ([], {'vocab_size': 'vocab_size', 'embed_dim': '(32)', 'padding_idx': '(0)', 'hidden_dim': '(32)', 'bidirectional': 'bidirectional', 'attn_concatenate': 'attn_concatenate'}), '(vocab_size=vocab_size, embed_dim=32, padding_idx=0, hidden_dim\n =32, bidirectional=bidirectional, attn_concatenate=attn_concatenate)\n', (1616, 1752), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((2601, 2716), 'pytorch_widedeep.models.BasicRNN', 'BasicRNN', ([], {'vocab_size': 'vocab_size', 'embed_matrix': 'pretrained_embeddings', 'padding_idx': '(0)', 'bidirectional': 'bidirectional'}), '(vocab_size=vocab_size, embed_matrix=pretrained_embeddings,\n padding_idx=0, bidirectional=bidirectional)\n', (2609, 2716), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((5031, 5140), 'pytorch_widedeep.models.BasicRNN', 'BasicRNN', ([], {'vocab_size': 'vocab_size', 'embed_matrix': 'pretrained_embeddings', 'embed_trainable': '(False)', 'padding_idx': '(0)'}), '(vocab_size=vocab_size, embed_matrix=pretrained_embeddings,\n embed_trainable=False, padding_idx=0)\n', (5039, 5140), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((5635, 5769), 'pytorch_widedeep.models.BasicRNN', 'BasicRNN', ([], {'vocab_size': 'vocab_size', 'rnn_type': '"""gru"""', 'embed_dim': '(32)', 'bidirectional': 'bidirectional', 'padding_idx': '(0)', 'use_hidden_state': '(False)'}), "(vocab_size=vocab_size, rnn_type='gru', embed_dim=32, bidirectional\n =bidirectional, padding_idx=0, use_hidden_state=False)\n", (5643, 5769), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((6603, 6871), 'pytorch_widedeep.models.StackedAttentiveRNN', 'StackedAttentiveRNN', ([], {'vocab_size': 'vocab_size', 'embed_dim': '(32)', 'hidden_dim': '(32)', 'n_blocks': '(2)', 'padding_idx': '(0)', 'rnn_type': 'rnn_type', 'bidirectional': 'bidirectional', 'attn_concatenate': 'attn_concatenate', 'with_addnorm': 'with_addnorm', 'head_hidden_dims': '([50] if with_head else None)'}), '(vocab_size=vocab_size, embed_dim=32, hidden_dim=32,\n n_blocks=2, padding_idx=0, rnn_type=rnn_type, bidirectional=\n bidirectional, attn_concatenate=attn_concatenate, with_addnorm=\n with_addnorm, head_hidden_dims=[50] if with_head else None)\n', (6622, 6871), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((7479, 7612), 'pytorch_widedeep.models.StackedAttentiveRNN', 'StackedAttentiveRNN', ([], {'vocab_size': 'vocab_size', 'embed_matrix': 'pretrained_embeddings', 'embed_trainable': '(False)', 'n_blocks': '(2)', 'padding_idx': '(0)'}), '(vocab_size=vocab_size, embed_matrix=\n pretrained_embeddings, embed_trainable=False, n_blocks=2, padding_idx=0)\n', (7498, 7612), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((317, 341), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(64)'], {}), '(1000, 64)\n', (331, 341), True, 'import numpy as np\n'), ((725, 785), 'pytorch_widedeep.models.BasicRNN', 'BasicRNN', ([], {'vocab_size': 'vocab_size', 'embed_dim': '(32)', 'padding_idx': '(0)'}), '(vocab_size=vocab_size, embed_dim=32, padding_idx=0)\n', (733, 785), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((812, 876), 'pytorch_widedeep.models.AttentiveRNN', 'AttentiveRNN', ([], {'vocab_size': 'vocab_size', 'embed_dim': '(32)', 'padding_idx': '(0)'}), '(vocab_size=vocab_size, embed_dim=32, padding_idx=0)\n', (824, 876), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((893, 927), 'torch.from_numpy', 'torch.from_numpy', (['padded_sequences'], {}), '(padded_sequences)\n', (909, 927), False, 'import torch\n'), ((1819, 1853), 'torch.from_numpy', 'torch.from_numpy', (['padded_sequences'], {}), '(padded_sequences)\n', (1835, 1853), False, 'import torch\n'), ((2768, 2802), 'torch.from_numpy', 'torch.from_numpy', (['padded_sequences'], {}), '(padded_sequences)\n', (2784, 2802), False, 'import torch\n'), ((3360, 3385), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (3372, 3385), False, 'import pytest\n'), ((3906, 3940), 'torch.from_numpy', 'torch.from_numpy', (['padded_sequences'], {}), '(padded_sequences)\n', (3922, 3940), False, 'import torch\n'), ((4352, 4443), 'pytorch_widedeep.models.BasicRNN', 'BasicRNN', ([], {'vocab_size': 'vocab_size', 'embed_dim': '(32)', 'padding_idx': '(0)', 'head_hidden_dims': '[64, 16]'}), '(vocab_size=vocab_size, embed_dim=32, padding_idx=0,\n head_hidden_dims=[64, 16])\n', (4360, 4443), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((4525, 4620), 'pytorch_widedeep.models.AttentiveRNN', 'AttentiveRNN', ([], {'vocab_size': 'vocab_size', 'embed_dim': '(32)', 'padding_idx': '(0)', 'head_hidden_dims': '[64, 16]'}), '(vocab_size=vocab_size, embed_dim=32, padding_idx=0,\n head_hidden_dims=[64, 16])\n', (4537, 4620), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((4692, 4726), 'torch.from_numpy', 'torch.from_numpy', (['padded_sequences'], {}), '(padded_sequences)\n', (4708, 4726), False, 'import torch\n'), ((5192, 5226), 'torch.from_numpy', 'torch.from_numpy', (['padded_sequences'], {}), '(padded_sequences)\n', (5208, 5226), False, 'import torch\n'), ((5836, 5870), 'torch.from_numpy', 'torch.from_numpy', (['padded_sequences'], {}), '(padded_sequences)\n', (5852, 5870), False, 'import torch\n'), ((6961, 6995), 'torch.from_numpy', 'torch.from_numpy', (['padded_sequences'], {}), '(padded_sequences)\n', (6977, 6995), False, 'import torch\n'), ((7671, 7705), 'torch.from_numpy', 'torch.from_numpy', (['padded_sequences'], {}), '(padded_sequences)\n', (7687, 7705), False, 'import torch\n'), ((8123, 8210), 'pytorch_widedeep.models.StackedAttentiveRNN', 'StackedAttentiveRNN', ([], {'vocab_size': 'vocab_size', 'embed_dim': '(32)', 'n_blocks': '(2)', 'padding_idx': '(0)'}), '(vocab_size=vocab_size, embed_dim=32, n_blocks=2,\n padding_idx=0)\n', (8142, 8210), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((8292, 8387), 'pytorch_widedeep.models.AttentiveRNN', 'AttentiveRNN', ([], {'vocab_size': 'vocab_size', 'embed_dim': '(32)', 'padding_idx': '(0)', 'head_hidden_dims': '[64, 16]'}), '(vocab_size=vocab_size, embed_dim=32, padding_idx=0,\n head_hidden_dims=[64, 16])\n', (8304, 8387), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((8460, 8494), 'torch.from_numpy', 'torch.from_numpy', (['padded_sequences'], {}), '(padded_sequences)\n', (8476, 8494), False, 'import torch\n'), ((239, 257), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (247, 257), True, 'import numpy as np\n'), ((3441, 3542), 'pytorch_widedeep.models.BasicRNN', 'BasicRNN', ([], {'vocab_size': 'vocab_size', 'embed_dim': '(32)', 'embed_matrix': 'pretrained_embeddings', 'padding_idx': '(0)'}), '(vocab_size=vocab_size, embed_dim=32, embed_matrix=\n pretrained_embeddings, padding_idx=0)\n', (3449, 3542), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((8720, 8741), 'torch.Size', 'torch.Size', (['[100, 50]'], {}), '([100, 50])\n', (8730, 8741), False, 'import torch\n'), ((3675, 3799), 'pytorch_widedeep.models.StackedAttentiveRNN', 'StackedAttentiveRNN', ([], {'vocab_size': 'vocab_size', 'embed_dim': '(32)', 'embed_matrix': 'pretrained_embeddings', 'padding_idx': '(0)', 'n_blocks': '(2)'}), '(vocab_size=vocab_size, embed_dim=32, embed_matrix=\n pretrained_embeddings, padding_idx=0, n_blocks=2)\n', (3694, 3799), False, 'from pytorch_widedeep.models import BasicRNN, AttentiveRNN, StackedAttentiveRNN\n'), ((8634, 8655), 'torch.Size', 'torch.Size', (['[100, 50]'], {}), '([100, 50])\n', (8644, 8655), False, 'import torch\n')] |
"""Unit Tests for inferences module"""
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from statsmodels.tsa.statespace.structural import UnobservedComponents
from statsmodels.tsa.arima_process import ArmaProcess
import causalimpact
compile_posterior = causalimpact.inferences.compile_posterior_inferences
np.random.seed(1)
@pytest.fixture
def data():
ar = np.r_[1, 0.9]
ma = np.array([1])
arma_process = ArmaProcess(ar, ma)
X = 1 + arma_process.generate_sample(nsample=100)
X = X.reshape(-1, 1)
y = 1.2 * X + np.random.normal(size=(100, 1))
data = np.concatenate((y, X), axis=1)
data = pd.DataFrame(data)
return data
def test_compile_posterior_inferences_w_data(data):
pre_period = [0, 70]
post_period = [71, 100]
df_pre = data.loc[pre_period[0]: pre_period[1], :]
df_post = data.loc[post_period[0]: post_period[1], :]
post_period_response = None
alpha = 0.05
orig_std_params = (0., 1.)
model = UnobservedComponents(
endog=df_pre.iloc[:, 0].values,
level='llevel',
exog=df_pre.iloc[:, 1:].values
)
trained_model = model.fit()
inferences = compile_posterior(
trained_model,
data,
df_pre,
df_post,
post_period_response,
alpha,
orig_std_params
)
expected_response = pd.Series(data.iloc[:, 0], name='response')
assert_series_equal(expected_response, inferences['series']['response'])
expected_cumsum = pd.Series(
np.cumsum(expected_response),
name='cum_response'
)
assert_series_equal(expected_cumsum, inferences['series']['cum_response'])
predictor = trained_model.get_prediction()
forecaster = trained_model.get_forecast(
steps=len(df_post),
exog=df_post.iloc[:, 1].values.reshape(-1, 1),
alpha=alpha
)
pre_pred = predictor.predicted_mean
post_pred = forecaster.predicted_mean
point_pred = np.concatenate([pre_pred, post_pred])
expected_point_pred = pd.Series(point_pred, name='point_pred')
assert_series_equal(
expected_point_pred,
inferences['series']['point_pred']
)
pre_ci = pd.DataFrame(predictor.conf_int(alpha=alpha))
pre_ci.index = df_pre.index
post_ci = pd.DataFrame(forecaster.conf_int(alpha=alpha))
post_ci.index = df_post.index
ci = pd.concat([pre_ci, post_ci])
expected_pred_upper = ci.iloc[:, 1]
expected_pred_upper = expected_pred_upper.rename('point_pred_upper')
expected_pred_lower = ci.iloc[:, 0]
expected_pred_lower = expected_pred_lower.rename('point_pred_lower')
assert_series_equal(
expected_pred_upper,
inferences['series']['point_pred_upper']
)
assert_series_equal(
expected_pred_lower,
inferences['series']['point_pred_lower']
)
expected_cum_pred = pd.Series(
np.cumsum(point_pred),
name='cum_pred'
)
assert_series_equal(
expected_cum_pred,
inferences['series']['cum_pred']
)
expected_cum_pred_lower = pd.Series(
np.cumsum(expected_pred_lower),
name='cum_pred_lower'
)
assert_series_equal(
expected_cum_pred_lower,
inferences['series']['cum_pred_lower']
)
expected_cum_pred_upper = pd.Series(
np.cumsum(expected_pred_upper),
name='cum_pred_upper'
)
assert_series_equal(
expected_cum_pred_upper,
inferences['series']['cum_pred_upper']
)
expected_point_effect = pd.Series(
expected_response - expected_point_pred,
name='point_effect'
)
assert_series_equal(
expected_point_effect,
inferences['series']['point_effect']
)
expected_point_effect_lower = pd.Series(
expected_response - expected_pred_lower,
name='point_effect_lower'
)
assert_series_equal(
expected_point_effect_lower,
inferences['series']['point_effect_lower']
)
expected_point_effect_upper = pd.Series(
expected_response - expected_pred_upper,
name='point_effect_upper'
)
assert_series_equal(
expected_point_effect_upper,
inferences['series']['point_effect_upper']
)
expected_cum_effect = pd.Series(
np.concatenate((np.zeros(len(df_pre)),
np.cumsum(expected_point_effect.iloc[len(df_pre):]))),
name='cum_effect'
)
assert_series_equal(
expected_cum_effect,
inferences['series']['cum_effect']
)
expected_cum_effect_lower = pd.Series(
np.concatenate(
(np.zeros(len(df_pre)),
np.cumsum(expected_point_effect_lower.iloc[len(df_pre):]))),
name='cum_effect_lower'
)
assert_series_equal(
expected_cum_effect_lower,
inferences['series']['cum_effect_lower']
)
expected_cum_effect_upper = pd.Series(
np.concatenate((
np.zeros(len(df_pre)),
np.cumsum(expected_point_effect_upper.iloc[len(df_pre):])
)),
name='cum_effect_upper'
)
assert_series_equal(
expected_cum_effect_upper,
inferences['series']['cum_effect_upper']
)
def test_compile_posterior_inferences_w_post_period_response(data):
pre_period = [0, 70]
post_period = [71, 100]
df_pre = data.loc[pre_period[0]: pre_period[1], :]
df_post = data.loc[post_period[0]: post_period[1], :]
post_period_response = df_post.loc[post_period[0]: post_period[1]]
X = df_post.iloc[:, 1:]
y = X.copy()
y[:] = np.nan
df_post = pd.DataFrame(np.concatenate([y, X], axis=1))
data_index = data.index
data = pd.concat([df_pre, df_post], axis=0)
data.index = data_index
alpha = 0.05
orig_std_params = (0., 1.)
model = UnobservedComponents(
endog=data.iloc[:, 0].values,
level='llevel',
exog=data.iloc[:, 1:].values
)
trained_model = model.fit()
inferences = compile_posterior(
trained_model,
data,
df_pre,
None,
post_period_response,
alpha,
orig_std_params
)
expected_response = pd.Series(data.iloc[:, 0], name='response')
assert_series_equal(expected_response, inferences['series']['response'])
expected_cumsum = pd.Series(
np.cumsum(expected_response),
name='cum_response'
)
assert_series_equal(expected_cumsum, inferences['series']['cum_response'])
predictor = trained_model.get_prediction(end=len(df_pre) - 1)
forecaster = trained_model.get_prediction(start=len(df_pre))
pre_pred = predictor.predicted_mean
post_pred = forecaster.predicted_mean
point_pred = np.concatenate([pre_pred, post_pred])
expected_point_pred = pd.Series(point_pred, name='point_pred')
assert_series_equal(
expected_point_pred,
inferences['series']['point_pred']
)
pre_ci = pd.DataFrame(predictor.conf_int(alpha=alpha))
pre_ci.index = df_pre.index
post_ci = pd.DataFrame(forecaster.conf_int(alpha=alpha))
post_ci.index = df_post.index
ci = pd.concat([pre_ci, post_ci])
expected_pred_upper = ci.iloc[:, 1]
expected_pred_upper = expected_pred_upper.rename('point_pred_upper')
expected_pred_upper.index = data.index
expected_pred_lower = ci.iloc[:, 0]
expected_pred_lower = expected_pred_lower.rename('point_pred_lower')
expected_pred_lower.index = data.index
assert_series_equal(
expected_pred_upper,
inferences['series']['point_pred_upper']
)
assert_series_equal(
expected_pred_lower,
inferences['series']['point_pred_lower']
)
expected_cum_pred = pd.Series(
np.cumsum(point_pred),
name='cum_pred'
)
assert_series_equal(
expected_cum_pred,
inferences['series']['cum_pred']
)
expected_cum_pred_lower = pd.Series(
np.cumsum(expected_pred_lower),
name='cum_pred_lower'
)
assert_series_equal(
expected_cum_pred_lower,
inferences['series']['cum_pred_lower']
)
expected_cum_pred_upper = pd.Series(
np.cumsum(expected_pred_upper),
name='cum_pred_upper'
)
assert_series_equal(
expected_cum_pred_upper,
inferences['series']['cum_pred_upper']
)
expected_point_effect = pd.Series(
expected_response - expected_point_pred,
name='point_effect'
)
assert_series_equal(
expected_point_effect,
inferences['series']['point_effect']
)
expected_point_effect_lower = pd.Series(
expected_response - expected_pred_lower,
name='point_effect_lower'
)
assert_series_equal(
expected_point_effect_lower,
inferences['series']['point_effect_lower']
)
expected_point_effect_upper = pd.Series(
expected_response - expected_pred_upper,
name='point_effect_upper'
)
assert_series_equal(
expected_point_effect_upper,
inferences['series']['point_effect_upper']
)
expected_cum_effect = pd.Series(
np.concatenate((
np.zeros(len(df_pre)),
np.cumsum(expected_point_effect.iloc[len(df_pre):])
)),
name='cum_effect'
)
assert_series_equal(
expected_cum_effect,
inferences['series']['cum_effect']
)
expected_cum_effect_lower = pd.Series(
np.concatenate((
np.zeros(len(df_pre)),
np.cumsum(expected_point_effect_lower.iloc[len(df_pre):])
)),
name='cum_effect_lower'
)
assert_series_equal(
expected_cum_effect_lower,
inferences['series']['cum_effect_lower']
)
expected_cum_effect_upper = pd.Series(
np.concatenate((
np.zeros(len(df_pre)),
np.cumsum(expected_point_effect_upper.iloc[len(df_pre):])
)),
name='cum_effect_upper'
)
assert_series_equal(
expected_cum_effect_upper,
inferences['series']['cum_effect_upper']
)
| [
"pandas.DataFrame",
"statsmodels.tsa.statespace.structural.UnobservedComponents",
"numpy.random.seed",
"numpy.cumsum",
"numpy.array",
"pandas.Series",
"numpy.random.normal",
"statsmodels.tsa.arima_process.ArmaProcess",
"pandas.testing.assert_series_equal",
"pandas.concat",
"numpy.concatenate"
] | [((361, 378), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (375, 378), True, 'import numpy as np\n'), ((441, 454), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (449, 454), True, 'import numpy as np\n'), ((474, 493), 'statsmodels.tsa.arima_process.ArmaProcess', 'ArmaProcess', (['ar', 'ma'], {}), '(ar, ma)\n', (485, 493), False, 'from statsmodels.tsa.arima_process import ArmaProcess\n'), ((635, 665), 'numpy.concatenate', 'np.concatenate', (['(y, X)'], {'axis': '(1)'}), '((y, X), axis=1)\n', (649, 665), True, 'import numpy as np\n'), ((677, 695), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (689, 695), True, 'import pandas as pd\n'), ((1027, 1132), 'statsmodels.tsa.statespace.structural.UnobservedComponents', 'UnobservedComponents', ([], {'endog': 'df_pre.iloc[:, 0].values', 'level': '"""llevel"""', 'exog': 'df_pre.iloc[:, 1:].values'}), "(endog=df_pre.iloc[:, 0].values, level='llevel', exog=\n df_pre.iloc[:, 1:].values)\n", (1047, 1132), False, 'from statsmodels.tsa.statespace.structural import UnobservedComponents\n'), ((1398, 1441), 'pandas.Series', 'pd.Series', (['data.iloc[:, 0]'], {'name': '"""response"""'}), "(data.iloc[:, 0], name='response')\n", (1407, 1441), True, 'import pandas as pd\n'), ((1446, 1518), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_response', "inferences['series']['response']"], {}), "(expected_response, inferences['series']['response'])\n", (1465, 1518), False, 'from pandas.testing import assert_series_equal\n'), ((1630, 1704), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cumsum', "inferences['series']['cum_response']"], {}), "(expected_cumsum, inferences['series']['cum_response'])\n", (1649, 1704), False, 'from pandas.testing import assert_series_equal\n'), ((2008, 2045), 'numpy.concatenate', 'np.concatenate', (['[pre_pred, post_pred]'], {}), '([pre_pred, post_pred])\n', (2022, 2045), True, 'import numpy as np\n'), ((2073, 2113), 'pandas.Series', 'pd.Series', (['point_pred'], {'name': '"""point_pred"""'}), "(point_pred, name='point_pred')\n", (2082, 2113), True, 'import pandas as pd\n'), ((2118, 2194), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_point_pred', "inferences['series']['point_pred']"], {}), "(expected_point_pred, inferences['series']['point_pred'])\n", (2137, 2194), False, 'from pandas.testing import assert_series_equal\n'), ((2414, 2442), 'pandas.concat', 'pd.concat', (['[pre_ci, post_ci]'], {}), '([pre_ci, post_ci])\n', (2423, 2442), True, 'import pandas as pd\n'), ((2675, 2762), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_pred_upper', "inferences['series']['point_pred_upper']"], {}), "(expected_pred_upper, inferences['series'][\n 'point_pred_upper'])\n", (2694, 2762), False, 'from pandas.testing import assert_series_equal\n'), ((2784, 2871), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_pred_lower', "inferences['series']['point_pred_lower']"], {}), "(expected_pred_lower, inferences['series'][\n 'point_pred_lower'])\n", (2803, 2871), False, 'from pandas.testing import assert_series_equal\n'), ((2990, 3062), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_pred', "inferences['series']['cum_pred']"], {}), "(expected_cum_pred, inferences['series']['cum_pred'])\n", (3009, 3062), False, 'from pandas.testing import assert_series_equal\n'), ((3207, 3296), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_pred_lower', "inferences['series']['cum_pred_lower']"], {}), "(expected_cum_pred_lower, inferences['series'][\n 'cum_pred_lower'])\n", (3226, 3296), False, 'from pandas.testing import assert_series_equal\n'), ((3436, 3525), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_pred_upper', "inferences['series']['cum_pred_upper']"], {}), "(expected_cum_pred_upper, inferences['series'][\n 'cum_pred_upper'])\n", (3455, 3525), False, 'from pandas.testing import assert_series_equal\n'), ((3572, 3643), 'pandas.Series', 'pd.Series', (['(expected_response - expected_point_pred)'], {'name': '"""point_effect"""'}), "(expected_response - expected_point_pred, name='point_effect')\n", (3581, 3643), True, 'import pandas as pd\n'), ((3670, 3755), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_point_effect', "inferences['series']['point_effect']"], {}), "(expected_point_effect, inferences['series']['point_effect']\n )\n", (3689, 3755), False, 'from pandas.testing import assert_series_equal\n'), ((3808, 3885), 'pandas.Series', 'pd.Series', (['(expected_response - expected_pred_lower)'], {'name': '"""point_effect_lower"""'}), "(expected_response - expected_pred_lower, name='point_effect_lower')\n", (3817, 3885), True, 'import pandas as pd\n'), ((3912, 4009), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_point_effect_lower', "inferences['series']['point_effect_lower']"], {}), "(expected_point_effect_lower, inferences['series'][\n 'point_effect_lower'])\n", (3931, 4009), False, 'from pandas.testing import assert_series_equal\n'), ((4062, 4139), 'pandas.Series', 'pd.Series', (['(expected_response - expected_pred_upper)'], {'name': '"""point_effect_upper"""'}), "(expected_response - expected_pred_upper, name='point_effect_upper')\n", (4071, 4139), True, 'import pandas as pd\n'), ((4166, 4263), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_point_effect_upper', "inferences['series']['point_effect_upper']"], {}), "(expected_point_effect_upper, inferences['series'][\n 'point_effect_upper'])\n", (4185, 4263), False, 'from pandas.testing import assert_series_equal\n'), ((4481, 4557), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_effect', "inferences['series']['cum_effect']"], {}), "(expected_cum_effect, inferences['series']['cum_effect'])\n", (4500, 4557), False, 'from pandas.testing import assert_series_equal\n'), ((4800, 4893), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_effect_lower', "inferences['series']['cum_effect_lower']"], {}), "(expected_cum_effect_lower, inferences['series'][\n 'cum_effect_lower'])\n", (4819, 4893), False, 'from pandas.testing import assert_series_equal\n'), ((5139, 5232), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_effect_upper', "inferences['series']['cum_effect_upper']"], {}), "(expected_cum_effect_upper, inferences['series'][\n 'cum_effect_upper'])\n", (5158, 5232), False, 'from pandas.testing import assert_series_equal\n'), ((5722, 5758), 'pandas.concat', 'pd.concat', (['[df_pre, df_post]'], {'axis': '(0)'}), '([df_pre, df_post], axis=0)\n', (5731, 5758), True, 'import pandas as pd\n'), ((5850, 5951), 'statsmodels.tsa.statespace.structural.UnobservedComponents', 'UnobservedComponents', ([], {'endog': 'data.iloc[:, 0].values', 'level': '"""llevel"""', 'exog': 'data.iloc[:, 1:].values'}), "(endog=data.iloc[:, 0].values, level='llevel', exog=\n data.iloc[:, 1:].values)\n", (5870, 5951), False, 'from statsmodels.tsa.statespace.structural import UnobservedComponents\n'), ((6214, 6257), 'pandas.Series', 'pd.Series', (['data.iloc[:, 0]'], {'name': '"""response"""'}), "(data.iloc[:, 0], name='response')\n", (6223, 6257), True, 'import pandas as pd\n'), ((6262, 6334), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_response', "inferences['series']['response']"], {}), "(expected_response, inferences['series']['response'])\n", (6281, 6334), False, 'from pandas.testing import assert_series_equal\n'), ((6446, 6520), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cumsum', "inferences['series']['cum_response']"], {}), "(expected_cumsum, inferences['series']['cum_response'])\n", (6465, 6520), False, 'from pandas.testing import assert_series_equal\n'), ((6754, 6791), 'numpy.concatenate', 'np.concatenate', (['[pre_pred, post_pred]'], {}), '([pre_pred, post_pred])\n', (6768, 6791), True, 'import numpy as np\n'), ((6819, 6859), 'pandas.Series', 'pd.Series', (['point_pred'], {'name': '"""point_pred"""'}), "(point_pred, name='point_pred')\n", (6828, 6859), True, 'import pandas as pd\n'), ((6864, 6940), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_point_pred', "inferences['series']['point_pred']"], {}), "(expected_point_pred, inferences['series']['point_pred'])\n", (6883, 6940), False, 'from pandas.testing import assert_series_equal\n'), ((7160, 7188), 'pandas.concat', 'pd.concat', (['[pre_ci, post_ci]'], {}), '([pre_ci, post_ci])\n', (7169, 7188), True, 'import pandas as pd\n'), ((7508, 7595), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_pred_upper', "inferences['series']['point_pred_upper']"], {}), "(expected_pred_upper, inferences['series'][\n 'point_pred_upper'])\n", (7527, 7595), False, 'from pandas.testing import assert_series_equal\n'), ((7617, 7704), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_pred_lower', "inferences['series']['point_pred_lower']"], {}), "(expected_pred_lower, inferences['series'][\n 'point_pred_lower'])\n", (7636, 7704), False, 'from pandas.testing import assert_series_equal\n'), ((7823, 7895), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_pred', "inferences['series']['cum_pred']"], {}), "(expected_cum_pred, inferences['series']['cum_pred'])\n", (7842, 7895), False, 'from pandas.testing import assert_series_equal\n'), ((8040, 8129), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_pred_lower', "inferences['series']['cum_pred_lower']"], {}), "(expected_cum_pred_lower, inferences['series'][\n 'cum_pred_lower'])\n", (8059, 8129), False, 'from pandas.testing import assert_series_equal\n'), ((8269, 8358), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_pred_upper', "inferences['series']['cum_pred_upper']"], {}), "(expected_cum_pred_upper, inferences['series'][\n 'cum_pred_upper'])\n", (8288, 8358), False, 'from pandas.testing import assert_series_equal\n'), ((8405, 8476), 'pandas.Series', 'pd.Series', (['(expected_response - expected_point_pred)'], {'name': '"""point_effect"""'}), "(expected_response - expected_point_pred, name='point_effect')\n", (8414, 8476), True, 'import pandas as pd\n'), ((8503, 8588), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_point_effect', "inferences['series']['point_effect']"], {}), "(expected_point_effect, inferences['series']['point_effect']\n )\n", (8522, 8588), False, 'from pandas.testing import assert_series_equal\n'), ((8641, 8718), 'pandas.Series', 'pd.Series', (['(expected_response - expected_pred_lower)'], {'name': '"""point_effect_lower"""'}), "(expected_response - expected_pred_lower, name='point_effect_lower')\n", (8650, 8718), True, 'import pandas as pd\n'), ((8745, 8842), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_point_effect_lower', "inferences['series']['point_effect_lower']"], {}), "(expected_point_effect_lower, inferences['series'][\n 'point_effect_lower'])\n", (8764, 8842), False, 'from pandas.testing import assert_series_equal\n'), ((8895, 8972), 'pandas.Series', 'pd.Series', (['(expected_response - expected_pred_upper)'], {'name': '"""point_effect_upper"""'}), "(expected_response - expected_pred_upper, name='point_effect_upper')\n", (8904, 8972), True, 'import pandas as pd\n'), ((8999, 9096), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_point_effect_upper', "inferences['series']['point_effect_upper']"], {}), "(expected_point_effect_upper, inferences['series'][\n 'point_effect_upper'])\n", (9018, 9096), False, 'from pandas.testing import assert_series_equal\n'), ((9324, 9400), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_effect', "inferences['series']['cum_effect']"], {}), "(expected_cum_effect, inferences['series']['cum_effect'])\n", (9343, 9400), False, 'from pandas.testing import assert_series_equal\n'), ((9651, 9744), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_effect_lower', "inferences['series']['cum_effect_lower']"], {}), "(expected_cum_effect_lower, inferences['series'][\n 'cum_effect_lower'])\n", (9670, 9744), False, 'from pandas.testing import assert_series_equal\n'), ((9990, 10083), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['expected_cum_effect_upper', "inferences['series']['cum_effect_upper']"], {}), "(expected_cum_effect_upper, inferences['series'][\n 'cum_effect_upper'])\n", (10009, 10083), False, 'from pandas.testing import assert_series_equal\n'), ((592, 623), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100, 1)'}), '(size=(100, 1))\n', (608, 623), True, 'import numpy as np\n'), ((1561, 1589), 'numpy.cumsum', 'np.cumsum', (['expected_response'], {}), '(expected_response)\n', (1570, 1589), True, 'import numpy as np\n'), ((2933, 2954), 'numpy.cumsum', 'np.cumsum', (['point_pred'], {}), '(point_pred)\n', (2942, 2954), True, 'import numpy as np\n'), ((3135, 3165), 'numpy.cumsum', 'np.cumsum', (['expected_pred_lower'], {}), '(expected_pred_lower)\n', (3144, 3165), True, 'import numpy as np\n'), ((3364, 3394), 'numpy.cumsum', 'np.cumsum', (['expected_pred_upper'], {}), '(expected_pred_upper)\n', (3373, 3394), True, 'import numpy as np\n'), ((5651, 5681), 'numpy.concatenate', 'np.concatenate', (['[y, X]'], {'axis': '(1)'}), '([y, X], axis=1)\n', (5665, 5681), True, 'import numpy as np\n'), ((6377, 6405), 'numpy.cumsum', 'np.cumsum', (['expected_response'], {}), '(expected_response)\n', (6386, 6405), True, 'import numpy as np\n'), ((7766, 7787), 'numpy.cumsum', 'np.cumsum', (['point_pred'], {}), '(point_pred)\n', (7775, 7787), True, 'import numpy as np\n'), ((7968, 7998), 'numpy.cumsum', 'np.cumsum', (['expected_pred_lower'], {}), '(expected_pred_lower)\n', (7977, 7998), True, 'import numpy as np\n'), ((8197, 8227), 'numpy.cumsum', 'np.cumsum', (['expected_pred_upper'], {}), '(expected_pred_upper)\n', (8206, 8227), True, 'import numpy as np\n')] |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '4'
import cv2
import numpy as np
from maskrcnn_benchmark.config import cfg
from demo.predictor import ICDARDemo, RRPNDemo
from maskrcnn_benchmark.utils.visualize import vis_image, write_result_ICDAR_RRPN2polys, zip_dir, write_result_ICDAR_MASKRRPN2polys
from PIL import Image
import time
import json
from tqdm import tqdm
from Pascal_VOC import eval_func
from link_boxes import merge
import pycocotools.mask as maskUtils
from skimage.measure import find_contours
def topoly(mask):
# mask = maskUtils.decode(rle)
# print(maskUtils.area(rle[0]))
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
# print('masksum:', np.sum(mask), mask.shape)
area = np.sum(mask).astype(np.float) # float(maskUtils.area(rle))
if len(contours) == 0:
return [[]], area
# print(contours)
poly = np.fliplr(contours[0]).tolist()
return poly, area
def get_mask(box,shape):
"""根据box获取对应的掩膜"""
tmp_mask=np.zeros(shape,dtype="uint8")
tmp=np.array(box,dtype=np.int32).reshape(-1,2)
cv2.fillPoly(tmp_mask, [tmp], (255))
# tmp_mask=cv2.bitwise_and(tmp_mask,mask)
return tmp_mask,cv2.countNonZero(tmp_mask)
def comput_mmi(area_a,area_b,intersect):
"""
计算MMI,2018.11.23 add
:param mask_a: 实例文本a的mask的面积
:param mask_b: 实例文本b的mask的面积
:param intersect: 实例文本a和实例文本b的相交面积
:return:
"""
if area_a==0 or area_b==0:
area_a+=EPS
area_b+=EPS
print("the area of text is 0")
return max(float(intersect)/area_a,float(intersect)/area_b)
def mask_nms(dets, shape, thres=0.3,conf_thres=0.5):
"""
mask nms 实现函数
:param dets: 检测结果,[{'points':[[],[],[]],'confidence':int},{},{}]
:param mask: 当前检测的mask
:param thres: 检测的阈值
"""
# 获取bbox及对应的score
bbox_infos=[]
areas=[]
scores=[]
for quyu in dets:
if quyu['confidence']>conf_thres:
bbox_infos.append(quyu['points'])
areas.append(quyu['area'])
scores.append(quyu['confidence'])
# print('before ',len(bbox_infos))
keep=[]
order=np.array(scores).argsort()[::-1]
# print("order:{}".format(order))
nums=len(bbox_infos)
suppressed=np.zeros((nums), dtype=np.int)
# print("lens:{}".format(nums))
# 循环遍历
for i in range(nums):
idx=order[i]
if suppressed[idx]==1:
continue
keep.append(idx)
mask_a,area_a=get_mask(bbox_infos[idx],shape)
for j in range(i,nums):
idx_j=order[j]
if suppressed[idx_j]==1:
continue
mask_b, area_b = get_mask(bbox_infos[idx_j],shape)
# 获取两个文本的相交面积
merge_mask=cv2.bitwise_and(mask_a,mask_b)
area_intersect=cv2.countNonZero(merge_mask)
#计算MMI
mmi=comput_mmi(area_a,area_b,area_intersect)
# print("area_a:{},area_b:{},inte:{},mmi:{}".format(area_a,area_b,area_intersect,mmi))
if mmi >= thres :
suppressed[idx_j] = 1
# ormask=cv2.bitwise_or(mask_a,mask_b)
# sumarea=cv2.countNonZero(ormask)
# padded_mask = np.zeros((ormask.shape[0] + 2, ormask.shape[1] + 2), dtype=np.uint8)
# padded_mask[1:-1, 1:-1] = ormask
# contours = find_contours(padded_mask, 0.5)
# poly=np.fliplr(contours[0]).tolist()
# bbox_infos[idx]=poly
# areas[idx]=sumarea
dets=[]
for kk in keep:
dets.append({
'points': bbox_infos[kk],
'confidence':scores[kk]
})
return dets
def res2json(result_dir):
res_list = os.listdir(result_dir)
res_dict = {}
for rf in tqdm(res_list):
if rf[-4:] == '.txt':
respath = os.path.join(result_dir, rf)
reslines = open(respath, 'r').readlines()
reskey = rf[4:-4]
polys = []
for l in reslines:
poly_pts = np.array(l.replace('\n', '').split(','), np.int).reshape(-1, 2)
if poly_pts.shape[0] > 2:
polys.append({'points':poly_pts.tolist()})
res_dict[reskey] = polys#[{'points':np.array(l.replace('\n', '').split(','), np.int).reshape(-1, 2).tolist()} for l in reslines]
# print('res_dict[reskey]:', res_dict[reskey])
json_tarf = os.path.join(result_dir, 'res.json')
if os.path.isfile(json_tarf):
print('Json file found, removing it...')
os.remove(json_tarf)
j_f = open(json_tarf, 'w')
json.dump(res_dict, j_f)
print('json dump done', json_tarf)
return json_tarf
config_file = 'configs/Mask_RRPN/e2e_rrpn_R_50_C4_1x_LSVT_val_MASK_RFPN_word_margin.yaml' #'#"configs/ICDAR2019_det_RRPN/e2e_rrpn_R_50_C4_1x_LSVT_val_4scales_angle_norm.yaml" #e2e_rrpn_R_50_C4_1x_ICDAR13_15_trial_test.yaml
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cuda"])
# cfg.freeze()
# cfg.MODEL.WEIGHT = 'models/IC-13-15-17-Trial/model_0155000.pth'
vis = True
merge_box = cfg.TEST.MERGE_BOX
result_dir = os.path.join('results', config_file.split('/')[-1].split('.')[0], cfg.MODEL.WEIGHT.split('/')[-1].split('.')[0])
if merge_box:
result_dir += '_merge_box'
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
coco_demo = RRPNDemo(
cfg,
min_image_size=896,
confidence_threshold=0.87,
)
dataset_name = cfg.TEST.DATASET_NAME
testing_dataset = {
'LSVT': {
'testing_image_dir': '../datasets/LSVT/train_full_images_0/train_full_images_0/',
'off': [0, 3000]
},
'ArT': {
'testing_image_dir': '../datasets/ArT/ArT_detect_train/train_images',
'off': [4000, 5603]
},
}
image_dir = testing_dataset[dataset_name]['testing_image_dir']
# vocab_dir = testing_dataset[dataset_name]['test_vocal_dir']
off_group = testing_dataset[dataset_name]['off']
# load image and then run prediction
# image_dir = '../datasets/ICDAR13/Challenge2_Test_Task12_Images/'
# imlist = os.listdir(image_dir)[off_group[0]:off_group[1]]
print('************* META INFO ***************')
print('config_file:', config_file)
print('result_dir:', result_dir)
print('image_dir:', image_dir)
print('weights:', cfg.MODEL.WEIGHT)
print('merge_box:', merge_box)
print('***************************************')
thres=0.8 #mask nms的阈值,大于thres的文本区域剔除
conf_thres=0.4
#num_images = len(imlist)
cnt = 0
num_images = off_group[1] - off_group[0]
for idx in range(off_group[0], off_group[1]):
image = 'gt_' + str(idx) + '.jpg'
impath = os.path.join(image_dir, image)
# print('image:', impath)
img = cv2.imread(impath)
cnt += 1
tic = time.time()
predictions, bounding_boxes = coco_demo.run_on_opencv_image(img)
toc = time.time()
print('time cost:', str(toc - tic)[:6], '|', str(cnt) + '/' + str(num_images))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
bboxes_np = bounding_boxes.bbox.data.cpu().numpy()
bboxes_np[:, 2:4] /= cfg.MODEL.RRPN.GT_BOX_MARGIN
mask_list = bounding_boxes.get_field('mask')
score_list = bounding_boxes.get_field('scores')
if merge_box:
bboxes_np_reverse = bboxes_np.copy()
bboxes_np_reverse[:, 2:4] = bboxes_np_reverse[:, 3:1:-1]
bboxes_np_reverse = merge(bboxes_np_reverse)
bboxes_np_reverse[:, 2:4] = bboxes_np_reverse[:, 3:1:-1]
bboxes_np = bboxes_np_reverse
width, height = bounding_boxes.size
if vis:
# predictions.show()
# print('mask_list:', len(mask_list), mask_list[0].shape)
mask_total = np.zeros(img.shape[:2])
for idx in range(len(mask_list)):
# print('box_list:', bboxes_np[idx])
mask = mask_list[idx]
mask_np = mask.data.cpu().numpy()
mask_total += mask_np[0] * 255
mask_im = Image.fromarray(mask_total.astype(np.uint8))
scale = 768.0 / mask_im.size[0]
scaled_size = (int(scale * mask_im.size[0]), int(scale * mask_im.size[1]))
mask_im = mask_im.resize(scaled_size)
#mask_im.show()
mask_im.save('re_img/mask_' + image, 'jpeg')
pil_image = vis_image(Image.fromarray(img), bboxes_np)
pil_image = pil_image.resize(scaled_size)
pil_image.save('re_img/box_' + image, 'jpeg')
# time.sleep(20)
# else:
poly_list = []
# mask_total = np.zeros(mask_list[0].shape[1:])
res_list = []
for idx in range(len(mask_list)):
# print('box_list:', bboxes_np[idx])
mask = mask_list[idx]
mask_np = mask.data.cpu().numpy()[0]
score = score_list[idx].data.cpu().numpy()
# print('mask_np', mask_np.shape, np.unique(mask_np))
contours = cv2.findContours(((mask_np > 0) * 1).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#print('mask_np:', np.unique(mask_np), contours)
if len(contours[1]) > 0:
poly_list.append(contours[1][0].reshape(-1, 2))
'''
poly, area = topoly(mask_np)
res_list.append({
'points': poly,
'confidence': score,
'area': area,
'size': mask_np.shape
})
'''
# res_list = mask_nms(res_list, res_list[0]['size'], thres, conf_thres)
# for res in res_list:
# poly_list.append(np.array(res['points']).reshape(-1, 2))
write_result_ICDAR_MASKRRPN2polys(image[:-4], poly_list, threshold=0.7, result_dir=result_dir, height=height, width=width)
#im_file, dets, threshold, result_dir, height, width
#cv2.imshow('win', predictions)
#cv2.waitKey(0)
if dataset_name == 'IC15':
zipfilename = os.path.join(result_dir, 'submit_' + config_file.split('/')[-1].split('.')[0] + '_' + cfg.MODEL.WEIGHT.split('/')[-1].split('.')[0] + '.zip')
if os.path.isfile(zipfilename):
print('Zip file exists, removing it...')
os.remove(zipfilename)
zip_dir(result_dir, zipfilename)
comm = 'curl -i -F "submissionFile=@' + zipfilename + '" http://127.0.0.1:8080/evaluate'
# print(comm)
print(os.popen(comm, 'r'))
elif dataset_name == 'LSVT':
# input_json_path = 'results/e2e_rrpn_R_50_C4_1x_LSVT_val/model_0190000/res.json'
gt_json_path = '../datasets/LSVT/train_full_labels.json'
# to json
input_json_path = res2json(result_dir)
eval_func(input_json_path, gt_json_path)
elif dataset_name == 'ArT':
# input_json_path = 'results/e2e_rrpn_R_50_C4_1x_LSVT_val/model_0190000/res.json'
gt_json_path = '../datasets/ArT/ArT_detect_train/train_labels.json'
# to json
input_json_path = res2json(result_dir)
eval_func(input_json_path, gt_json_path)
else:
pass
| [
"os.remove",
"numpy.sum",
"cv2.bitwise_and",
"os.popen",
"maskrcnn_benchmark.config.cfg.MODEL.WEIGHT.split",
"cv2.fillPoly",
"os.path.isfile",
"Pascal_VOC.eval_func",
"maskrcnn_benchmark.config.cfg.merge_from_list",
"skimage.measure.find_contours",
"os.path.join",
"cv2.cvtColor",
"maskrcnn_b... | [((5213, 5245), 'maskrcnn_benchmark.config.cfg.merge_from_file', 'cfg.merge_from_file', (['config_file'], {}), '(config_file)\n', (5232, 5245), False, 'from maskrcnn_benchmark.config import cfg\n'), ((5279, 5324), 'maskrcnn_benchmark.config.cfg.merge_from_list', 'cfg.merge_from_list', (["['MODEL.DEVICE', 'cuda']"], {}), "(['MODEL.DEVICE', 'cuda'])\n", (5298, 5324), False, 'from maskrcnn_benchmark.config import cfg\n'), ((5713, 5773), 'demo.predictor.RRPNDemo', 'RRPNDemo', (['cfg'], {'min_image_size': '(896)', 'confidence_threshold': '(0.87)'}), '(cfg, min_image_size=896, confidence_threshold=0.87)\n', (5721, 5773), False, 'from demo.predictor import ICDARDemo, RRPNDemo\n'), ((646, 710), 'numpy.zeros', 'np.zeros', (['(mask.shape[0] + 2, mask.shape[1] + 2)'], {'dtype': 'np.uint8'}), '((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n', (654, 710), True, 'import numpy as np\n'), ((763, 794), 'skimage.measure.find_contours', 'find_contours', (['padded_mask', '(0.5)'], {}), '(padded_mask, 0.5)\n', (776, 794), False, 'from skimage.measure import find_contours\n'), ((1136, 1166), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': '"""uint8"""'}), "(shape, dtype='uint8')\n", (1144, 1166), True, 'import numpy as np\n'), ((1223, 1257), 'cv2.fillPoly', 'cv2.fillPoly', (['tmp_mask', '[tmp]', '(255)'], {}), '(tmp_mask, [tmp], 255)\n', (1235, 1257), False, 'import cv2\n'), ((2413, 2441), 'numpy.zeros', 'np.zeros', (['nums'], {'dtype': 'np.int'}), '(nums, dtype=np.int)\n', (2421, 2441), True, 'import numpy as np\n'), ((3928, 3950), 'os.listdir', 'os.listdir', (['result_dir'], {}), '(result_dir)\n', (3938, 3950), False, 'import os\n'), ((3989, 4003), 'tqdm.tqdm', 'tqdm', (['res_list'], {}), '(res_list)\n', (3993, 4003), False, 'from tqdm import tqdm\n'), ((4652, 4688), 'os.path.join', 'os.path.join', (['result_dir', '"""res.json"""'], {}), "(result_dir, 'res.json')\n", (4664, 4688), False, 'import os\n'), ((4699, 4724), 'os.path.isfile', 'os.path.isfile', (['json_tarf'], {}), '(json_tarf)\n', (4713, 4724), False, 'import os\n'), ((4845, 4869), 'json.dump', 'json.dump', (['res_dict', 'j_f'], {}), '(res_dict, j_f)\n', (4854, 4869), False, 'import json\n'), ((5640, 5665), 'os.path.isdir', 'os.path.isdir', (['result_dir'], {}), '(result_dir)\n', (5653, 5665), False, 'import os\n'), ((5672, 5695), 'os.makedirs', 'os.makedirs', (['result_dir'], {}), '(result_dir)\n', (5683, 5695), False, 'import os\n'), ((7004, 7034), 'os.path.join', 'os.path.join', (['image_dir', 'image'], {}), '(image_dir, image)\n', (7016, 7034), False, 'import os\n'), ((7077, 7095), 'cv2.imread', 'cv2.imread', (['impath'], {}), '(impath)\n', (7087, 7095), False, 'import cv2\n'), ((7121, 7132), 'time.time', 'time.time', ([], {}), '()\n', (7130, 7132), False, 'import time\n'), ((7214, 7225), 'time.time', 'time.time', ([], {}), '()\n', (7223, 7225), False, 'import time\n'), ((7325, 7361), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (7337, 7361), False, 'import cv2\n'), ((9870, 9996), 'maskrcnn_benchmark.utils.visualize.write_result_ICDAR_MASKRRPN2polys', 'write_result_ICDAR_MASKRRPN2polys', (['image[:-4]', 'poly_list'], {'threshold': '(0.7)', 'result_dir': 'result_dir', 'height': 'height', 'width': 'width'}), '(image[:-4], poly_list, threshold=0.7,\n result_dir=result_dir, height=height, width=width)\n', (9903, 9996), False, 'from maskrcnn_benchmark.utils.visualize import vis_image, write_result_ICDAR_RRPN2polys, zip_dir, write_result_ICDAR_MASKRRPN2polys\n'), ((10310, 10337), 'os.path.isfile', 'os.path.isfile', (['zipfilename'], {}), '(zipfilename)\n', (10324, 10337), False, 'import os\n'), ((10426, 10458), 'maskrcnn_benchmark.utils.visualize.zip_dir', 'zip_dir', (['result_dir', 'zipfilename'], {}), '(result_dir, zipfilename)\n', (10433, 10458), False, 'from maskrcnn_benchmark.utils.visualize import vis_image, write_result_ICDAR_RRPN2polys, zip_dir, write_result_ICDAR_MASKRRPN2polys\n'), ((1328, 1354), 'cv2.countNonZero', 'cv2.countNonZero', (['tmp_mask'], {}), '(tmp_mask)\n', (1344, 1354), False, 'import cv2\n'), ((4785, 4805), 'os.remove', 'os.remove', (['json_tarf'], {}), '(json_tarf)\n', (4794, 4805), False, 'import os\n'), ((7740, 7764), 'link_boxes.merge', 'merge', (['bboxes_np_reverse'], {}), '(bboxes_np_reverse)\n', (7745, 7764), False, 'from link_boxes import merge\n'), ((8047, 8070), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (8055, 8070), True, 'import numpy as np\n'), ((10398, 10420), 'os.remove', 'os.remove', (['zipfilename'], {}), '(zipfilename)\n', (10407, 10420), False, 'import os\n'), ((10583, 10602), 'os.popen', 'os.popen', (['comm', '"""r"""'], {}), "(comm, 'r')\n", (10591, 10602), False, 'import os\n'), ((10847, 10887), 'Pascal_VOC.eval_func', 'eval_func', (['input_json_path', 'gt_json_path'], {}), '(input_json_path, gt_json_path)\n', (10856, 10887), False, 'from Pascal_VOC import eval_func\n'), ((858, 870), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (864, 870), True, 'import numpy as np\n'), ((1011, 1033), 'numpy.fliplr', 'np.fliplr', (['contours[0]'], {}), '(contours[0])\n', (1020, 1033), True, 'import numpy as np\n'), ((1175, 1204), 'numpy.array', 'np.array', (['box'], {'dtype': 'np.int32'}), '(box, dtype=np.int32)\n', (1183, 1204), True, 'import numpy as np\n'), ((2921, 2952), 'cv2.bitwise_and', 'cv2.bitwise_and', (['mask_a', 'mask_b'], {}), '(mask_a, mask_b)\n', (2936, 2952), False, 'import cv2\n'), ((2980, 3008), 'cv2.countNonZero', 'cv2.countNonZero', (['merge_mask'], {}), '(merge_mask)\n', (2996, 3008), False, 'import cv2\n'), ((4059, 4087), 'os.path.join', 'os.path.join', (['result_dir', 'rf'], {}), '(result_dir, rf)\n', (4071, 4087), False, 'import os\n'), ((8638, 8658), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (8653, 8658), False, 'from PIL import Image\n'), ((11141, 11181), 'Pascal_VOC.eval_func', 'eval_func', (['input_json_path', 'gt_json_path'], {}), '(input_json_path, gt_json_path)\n', (11150, 11181), False, 'from Pascal_VOC import eval_func\n'), ((2299, 2315), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (2307, 2315), True, 'import numpy as np\n'), ((5534, 5561), 'maskrcnn_benchmark.config.cfg.MODEL.WEIGHT.split', 'cfg.MODEL.WEIGHT.split', (['"""/"""'], {}), "('/')\n", (5556, 5561), False, 'from maskrcnn_benchmark.config import cfg\n'), ((10246, 10273), 'maskrcnn_benchmark.config.cfg.MODEL.WEIGHT.split', 'cfg.MODEL.WEIGHT.split', (['"""/"""'], {}), "('/')\n", (10268, 10273), False, 'from maskrcnn_benchmark.config import cfg\n')] |
import unittest
import numpy as np
from ocgis.util.helpers import iter_array
class Test(unittest.TestCase):
def test_iter_array(self):
values = np.random.rand(2,2,4,4)
mask = np.random.random_integers(0,1,values.shape)
values = np.ma.array(values,mask=mask)
for idx in iter_array(values):
self.assertFalse(values.mask[idx])
self.assertEqual(len(list(iter_array(values,use_mask=True))),len(values.compressed()))
self.assertEqual(len(list(iter_array(values,use_mask=False))),len(values.data.flatten()))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
"unittest.main",
"ocgis.util.helpers.iter_array",
"numpy.ma.array",
"numpy.random.rand",
"numpy.random.random_integers"
] | [((650, 665), 'unittest.main', 'unittest.main', ([], {}), '()\n', (663, 665), False, 'import unittest\n'), ((159, 185), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)', '(4)', '(4)'], {}), '(2, 2, 4, 4)\n', (173, 185), True, 'import numpy as np\n'), ((198, 243), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(1)', 'values.shape'], {}), '(0, 1, values.shape)\n', (223, 243), True, 'import numpy as np\n'), ((259, 289), 'numpy.ma.array', 'np.ma.array', (['values'], {'mask': 'mask'}), '(values, mask=mask)\n', (270, 289), True, 'import numpy as np\n'), ((308, 326), 'ocgis.util.helpers.iter_array', 'iter_array', (['values'], {}), '(values)\n', (318, 326), False, 'from ocgis.util.helpers import iter_array\n'), ((409, 442), 'ocgis.util.helpers.iter_array', 'iter_array', (['values'], {'use_mask': '(True)'}), '(values, use_mask=True)\n', (419, 442), False, 'from ocgis.util.helpers import iter_array\n'), ((504, 538), 'ocgis.util.helpers.iter_array', 'iter_array', (['values'], {'use_mask': '(False)'}), '(values, use_mask=False)\n', (514, 538), False, 'from ocgis.util.helpers import iter_array\n')] |
from __future__ import print_function
import photutils
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import gklib as gk
from astropy.stats import SigmaClip
class CircularBackgroundSubractor(object):
"""
A class to calculate and subtract the background in a FITS image after masking out a circle of radius r at
position x and y.
NOTE:
For the HPF FRD measurements, have r ~330
"""
def __init__(self,data,x,y,r,box_size=(205,205)):
self.data = data
self.x = x
self.y = y
self.r = r
self.box_size = box_size
def subtract_background(self,subtract_min_value=True,plot_background=False,ax=None):
"""
A function to subtract the background for the FRD tests
INPUT:
subtract_min_value - subtracts the .min value (no negative numbers)
plot_background - if True, plots the estimated background with the meshes it used.
"""
self.aper = photutils.CircularAperture(positions=(self.x,self.y),r=self.r)
# get the mask from the aperture
# hack
mask = self.aper.to_mask()[0].to_image(self.data.shape)
# use sigma clipping
sigma_clip = SigmaClip(sigma=3., iters=10)
bkg_estimator = photutils.MedianBackground()
self.bkg = photutils.Background2D(self.data, self.box_size, filter_size=(3, 3),
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator,mask=mask,edge_method="crop")
self.data_background = self.data - self.bkg.background
if subtract_min_value:
self.subtracted_value = self.data_background.min()
print("Subtracted min value:",self.subtracted_value)
self.data_background -= self.subtracted_value
if plot_background:
if ax==None:
self.fig, self.ax = plt.subplots()
else:
self.ax = ax
im = self.ax.imshow(self.bkg.background, origin='lower', cmap='Greys_r')
self.ax.set_xlim(0,self.bkg.background.shape[1])
self.ax.set_ylim(0,self.bkg.background.shape[0])
self.ax.set_title("Estimated Background",y=1.02)
self.ax.set_xlabel("X pixels")
self.ax.set_ylabel("Y pixels")
if ax==None:
# Only do this if ax is not supplied
# Don't know how to deal with this without passing the figure explicitly
self.fig.colorbar(im)
self.bkg.plot_meshes(outlines=True, color='#1f77b4',ax=self.ax)
return self.data_background
def howell_center(postage_stamp):
"""
Howell centroiding, from Howell's Handbook of CCD astronomy
INPUT:
postage_stamp - A 2d numpy array to do the centroiding
OUTPUT:
x and y center of the numpy array
NOTES:
Many thanks to <NAME> and the MINERVAphot.py pipeline for this method
see here: https://github.com/TGBeatty/MINERVAphot/blob/master/MINERVAphot.py
"""
xpixels = np.arange(postage_stamp.shape[1])
ypixels = np.arange(postage_stamp.shape[0])
I = np.sum(postage_stamp, axis=0)
J = np.sum(postage_stamp, axis=1)
Isub = I-np.sum(I)/I.size
Isub[Isub<0] = 0
Jsub = J-np.sum(J)/J.size
Jsub[Jsub<0] = 0
xc = np.sum(Isub*xpixels)/np.sum(Isub)
yc = np.sum(Jsub*ypixels)/np.sum(Jsub)
return xc, yc
def get_encircled_energy_and_rad_at_EE(data,x,y,radii,get_rad_at_EE=0.9,plot=False,ax=None):
"""
A function to calculate the encircled energy at a given position, summing up the flux in apertures of size *radii*,
normalizing the EE to the last flux value.
INPUT:
data - a two dimensional np.array
x - x centroid
y - y centroid
radii - an array of radii to calculate the EE
get_rad_at_EE - the EE of which to calculate the radius value
plot - plot a EE vs radii plot
OUTPUT:
df
a dataframe with two columns:
- radii
- EE
rad_at_EE
- the radius when EE is a given input value
NOTE:
Assumes that the data is background subtracted
EXAMPLE:
radii = np.arange(1,450)
df_ee, r_at_EE90 = phothelp.get_encircled_energy(fimg.data,fimg.xcenter,fimg.ycenter,radii,plot=True)
"""
apertures = [photutils.CircularAperture((x,y), r=r) for r in radii]
phot_table = photutils.aperture_photometry(data, apertures)
df = phot_table[phot_table.colnames[3:]].to_pandas()
EE = df.loc[0]/df.loc[0][-1]
df = pd.DataFrame(list(zip(radii,EE)),columns=["radii","EE"])
#r_at_EE = df[df["EE"] > get_rad_at_EE]["radii"].values[0]
r_at_EE = np.interp(get_rad_at_EE,df.EE.values,df.radii.values)
if plot:
if ax==None:
fig, ax = plt.subplots()
ax.plot(radii,EE.values)
ax.set_xlabel("Radii")
ax.set_ylabel("Encircled Energy")
ax.set_title("EE"+gk.num2str(get_rad_at_EE*100,1)+"% at r="+str(r_at_EE))
ax.vlines(r_at_EE,0,get_rad_at_EE,color="green",linestyle="--")
ax.hlines(get_rad_at_EE,0,r_at_EE,color="green",linestyle="--")
ax.minorticks_on()
return df, r_at_EE
| [
"numpy.sum",
"photutils.aperture_photometry",
"photutils.MedianBackground",
"photutils.CircularAperture",
"photutils.Background2D",
"gklib.num2str",
"numpy.arange",
"numpy.interp",
"astropy.stats.SigmaClip",
"matplotlib.pyplot.subplots"
] | [((3081, 3114), 'numpy.arange', 'np.arange', (['postage_stamp.shape[1]'], {}), '(postage_stamp.shape[1])\n', (3090, 3114), True, 'import numpy as np\n'), ((3129, 3162), 'numpy.arange', 'np.arange', (['postage_stamp.shape[0]'], {}), '(postage_stamp.shape[0])\n', (3138, 3162), True, 'import numpy as np\n'), ((3171, 3200), 'numpy.sum', 'np.sum', (['postage_stamp'], {'axis': '(0)'}), '(postage_stamp, axis=0)\n', (3177, 3200), True, 'import numpy as np\n'), ((3209, 3238), 'numpy.sum', 'np.sum', (['postage_stamp'], {'axis': '(1)'}), '(postage_stamp, axis=1)\n', (3215, 3238), True, 'import numpy as np\n'), ((4423, 4469), 'photutils.aperture_photometry', 'photutils.aperture_photometry', (['data', 'apertures'], {}), '(data, apertures)\n', (4452, 4469), False, 'import photutils\n'), ((4709, 4764), 'numpy.interp', 'np.interp', (['get_rad_at_EE', 'df.EE.values', 'df.radii.values'], {}), '(get_rad_at_EE, df.EE.values, df.radii.values)\n', (4718, 4764), True, 'import numpy as np\n'), ((1007, 1071), 'photutils.CircularAperture', 'photutils.CircularAperture', ([], {'positions': '(self.x, self.y)', 'r': 'self.r'}), '(positions=(self.x, self.y), r=self.r)\n', (1033, 1071), False, 'import photutils\n'), ((1242, 1272), 'astropy.stats.SigmaClip', 'SigmaClip', ([], {'sigma': '(3.0)', 'iters': '(10)'}), '(sigma=3.0, iters=10)\n', (1251, 1272), False, 'from astropy.stats import SigmaClip\n'), ((1297, 1325), 'photutils.MedianBackground', 'photutils.MedianBackground', ([], {}), '()\n', (1323, 1325), False, 'import photutils\n'), ((1345, 1504), 'photutils.Background2D', 'photutils.Background2D', (['self.data', 'self.box_size'], {'filter_size': '(3, 3)', 'sigma_clip': 'sigma_clip', 'bkg_estimator': 'bkg_estimator', 'mask': 'mask', 'edge_method': '"""crop"""'}), "(self.data, self.box_size, filter_size=(3, 3),\n sigma_clip=sigma_clip, bkg_estimator=bkg_estimator, mask=mask,\n edge_method='crop')\n", (1367, 1504), False, 'import photutils\n'), ((3351, 3373), 'numpy.sum', 'np.sum', (['(Isub * xpixels)'], {}), '(Isub * xpixels)\n', (3357, 3373), True, 'import numpy as np\n'), ((3372, 3384), 'numpy.sum', 'np.sum', (['Isub'], {}), '(Isub)\n', (3378, 3384), True, 'import numpy as np\n'), ((3394, 3416), 'numpy.sum', 'np.sum', (['(Jsub * ypixels)'], {}), '(Jsub * ypixels)\n', (3400, 3416), True, 'import numpy as np\n'), ((3415, 3427), 'numpy.sum', 'np.sum', (['Jsub'], {}), '(Jsub)\n', (3421, 3427), True, 'import numpy as np\n'), ((4351, 4390), 'photutils.CircularAperture', 'photutils.CircularAperture', (['(x, y)'], {'r': 'r'}), '((x, y), r=r)\n', (4377, 4390), False, 'import photutils\n'), ((3253, 3262), 'numpy.sum', 'np.sum', (['I'], {}), '(I)\n', (3259, 3262), True, 'import numpy as np\n'), ((3304, 3313), 'numpy.sum', 'np.sum', (['J'], {}), '(J)\n', (3310, 3313), True, 'import numpy as np\n'), ((4820, 4834), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4832, 4834), True, 'import matplotlib.pyplot as plt\n'), ((1914, 1928), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1926, 1928), True, 'import matplotlib.pyplot as plt\n'), ((4967, 5001), 'gklib.num2str', 'gk.num2str', (['(get_rad_at_EE * 100)', '(1)'], {}), '(get_rad_at_EE * 100, 1)\n', (4977, 5001), True, 'import gklib as gk\n')] |
#############################################
# Copy and modify based on DiGCN
# https://github.com/flyingtango/DiGCN
#############################################
import os.path as osp
import numpy as np
import scipy.sparse as sp
import networkx as nx
import pandas as pd
import os
import torch
import sys
import torch_geometric.transforms as T
from torch_geometric.data import Data
from torch_geometric.utils import to_undirected, is_undirected, to_networkx
from networkx.algorithms.components import is_weakly_connected
from torch_geometric.utils import add_remaining_self_loops, add_self_loops, remove_self_loops
from torch_scatter import scatter_add
import scipy
from torch_geometric.data import Dataset
def load_citation_link(root="./data"):
g = load_npz_dataset(root)
adj = g['A']
coo = adj.tocoo()
values = coo.data
indices = np.vstack((coo.row, coo.col))
indices = torch.from_numpy(indices).long()
data = Data(x=values, edge_index=indices, edge_weight=None, y=None)
return [data]
def citation_datasets(root="./data", alpha=0.1, data_split = 10):
# path = os.path.join(save_path, dataset)
#os.makedirs(path, exist_ok=True)
#dataset_path = os.path.join(path, '{}.npz'.format(dataset))
g = load_npz_dataset(root)
adj, features, labels = g['A'], g['X'], g['z']
coo = adj.tocoo()
values = coo.data
indices = np.vstack((coo.row, coo.col))
indices = torch.from_numpy(indices).long()
features = torch.from_numpy(features.todense()).float()
# Set new random splits:
# * 20 * num_classes labels for training
# * 500 labels for validation
# * the rest for testing
masks = {}
masks['train'], masks['val'], masks['test'] = [], [] , []
for split in range(data_split):
mask = train_test_split(labels, seed=split, train_examples_per_class=20, val_size=500, test_size=None)
mask['train'] = torch.from_numpy(mask['train']).bool()
mask['val'] = torch.from_numpy(mask['val']).bool()
mask['test'] = torch.from_numpy(mask['test']).bool()
masks['train'].append(mask['train'].unsqueeze(-1))
masks['val'].append(mask['val'].unsqueeze(-1))
masks['test'].append(mask['test'].unsqueeze(-1))
labels = torch.from_numpy(labels).long()
data = Data(x=features, edge_index=indices, edge_weight=None, y=labels)
data.train_mask = torch.cat(masks['train'], axis=-1)
data.val_mask = torch.cat(masks['val'], axis=-1)
data.test_mask = torch.cat(masks['test'], axis=-1)
return [data]
def load_npz_dataset(file_name):
"""Load a graph from a Numpy binary file.
Parameters
----------
file_name : str
Name of the file to load.
Returns
-------
graph : dict
Dictionary that contains:
* 'A' : The adjacency matrix in sparse matrix format
* 'X' : The attribute matrix in sparse matrix format
* 'z' : The ground truth class labels
* Further dictionaries mapping node, class and attribute IDs
"""
if not file_name.endswith('.npz'):
file_name += file_name.split('/')[-2]+'.npz'
with np.load(file_name, allow_pickle=True) as loader:
loader = dict(loader)
edge_index = loader['adj_indices'].copy()
A = sp.csr_matrix((loader['adj_data'], loader['adj_indices'],
loader['adj_indptr']), shape=loader['adj_shape'])
X = sp.csr_matrix((loader['attr_data'], loader['attr_indices'],
loader['attr_indptr']), shape=loader['attr_shape'])
z = loader.get('labels')
graph = {
'A': A,
'X': X,
'z': z
}
idx_to_node = loader.get('idx_to_node')
if idx_to_node:
idx_to_node = idx_to_node.tolist()
graph['idx_to_node'] = idx_to_node
idx_to_attr = loader.get('idx_to_attr')
if idx_to_attr:
idx_to_attr = idx_to_attr.tolist()
graph['idx_to_attr'] = idx_to_attr
idx_to_class = loader.get('idx_to_class')
if idx_to_class:
idx_to_class = idx_to_class.tolist()
graph['idx_to_class'] = idx_to_class
return graph
def sample_per_class(random_state, labels, num_examples_per_class, forbidden_indices=None):
num_samples = labels.shape[0]
num_classes = labels.max()+1
sample_indices_per_class = {index: [] for index in range(num_classes)}
# get indices sorted by class
for class_index in range(num_classes):
for sample_index in range(num_samples):
if labels[sample_index] == class_index:
if forbidden_indices is None or sample_index not in forbidden_indices:
sample_indices_per_class[class_index].append(sample_index)
# get specified number of indices for each class
return np.concatenate(
[random_state.choice(sample_indices_per_class[class_index], num_examples_per_class, replace=False)
for class_index in range(len(sample_indices_per_class))
])
def get_train_val_test_split(random_state,
labels,
train_examples_per_class=None, val_examples_per_class=None,
test_examples_per_class=None,
train_size=None, val_size=None, test_size=None):
num_samples = labels.shape[0]
num_classes = labels.max()+1
remaining_indices = list(range(num_samples))
if train_examples_per_class is not None:
train_indices = sample_per_class(
random_state, labels, train_examples_per_class)
else:
# select train examples with no respect to class distribution
train_indices = random_state.choice(
remaining_indices, train_size, replace=False)
if val_examples_per_class is not None:
val_indices = sample_per_class(
random_state, labels, val_examples_per_class, forbidden_indices=train_indices)
else:
remaining_indices = np.setdiff1d(remaining_indices, train_indices)
val_indices = random_state.choice(
remaining_indices, val_size, replace=False)
forbidden_indices = np.concatenate((train_indices, val_indices))
if test_examples_per_class is not None:
test_indices = sample_per_class(random_state, labels, test_examples_per_class,
forbidden_indices=forbidden_indices)
elif test_size is not None:
remaining_indices = np.setdiff1d(remaining_indices, forbidden_indices)
test_indices = random_state.choice(
remaining_indices, test_size, replace=False)
else:
test_indices = np.setdiff1d(remaining_indices, forbidden_indices)
# assert that there are no duplicates in sets
assert len(set(train_indices)) == len(train_indices)
assert len(set(val_indices)) == len(val_indices)
assert len(set(test_indices)) == len(test_indices)
# assert sets are mutually exclusive
assert len(set(train_indices) - set(val_indices)
) == len(set(train_indices))
assert len(set(train_indices) - set(test_indices)
) == len(set(train_indices))
assert len(set(val_indices) - set(test_indices)) == len(set(val_indices))
if test_size is None and test_examples_per_class is None:
# all indices must be part of the split
assert len(np.concatenate(
(train_indices, val_indices, test_indices))) == num_samples
if train_examples_per_class is not None:
train_labels = labels[train_indices]
train_sum = np.sum(train_labels, axis=0)
# assert all classes have equal cardinality
assert np.unique(train_sum).size == 1
if val_examples_per_class is not None:
val_labels = labels[val_indices]
val_sum = np.sum(val_labels, axis=0)
# assert all classes have equal cardinality
assert np.unique(val_sum).size == 1
if test_examples_per_class is not None:
test_labels = labels[test_indices]
test_sum = np.sum(test_labels, axis=0)
# assert all classes have equal cardinality
assert np.unique(test_sum).size == 1
return train_indices, val_indices, test_indices
def train_test_split(labels, seed, train_examples_per_class=None, val_examples_per_class=None, test_examples_per_class=None, train_size=None, val_size=None, test_size=None):
random_state = np.random.RandomState(seed)
train_indices, val_indices, test_indices = get_train_val_test_split(
random_state, labels, train_examples_per_class, val_examples_per_class, test_examples_per_class, train_size, val_size, test_size)
#print('number of training: {}'.format(len(train_indices)))
#print('number of validation: {}'.format(len(val_indices)))
#print('number of testing: {}'.format(len(test_indices)))
train_mask = np.zeros((labels.shape[0], 1), dtype=int)
train_mask[train_indices, 0] = 1
train_mask = np.squeeze(train_mask, 1)
val_mask = np.zeros((labels.shape[0], 1), dtype=int)
val_mask[val_indices, 0] = 1
val_mask = np.squeeze(val_mask, 1)
test_mask = np.zeros((labels.shape[0], 1), dtype=int)
test_mask[test_indices, 0] = 1
test_mask = np.squeeze(test_mask, 1)
mask = {}
mask['train'] = train_mask
mask['val'] = val_mask
mask['test'] = test_mask
return mask
if __name__ == "__main__":
data = citation_datasets(root="../../dataset/data/nips_data/cora_ml/raw/", dataset='cora_ml')
print(data.train_mask.shape)
# print_dataset_info()
# get_npz_data(dataset='amazon_photo')
### already fixed split dataset!!!
#if opt.dataset == 'all':
# for mode in ['cora', 'cora_ml','citeseer','dblp','pubmed']:
# get_npz_data(dataset = mode)
#else:
# get_npz_data(dataset = opt.dataset) | [
"numpy.load",
"numpy.sum",
"numpy.unique",
"numpy.zeros",
"torch.cat",
"numpy.random.RandomState",
"numpy.setdiff1d",
"scipy.sparse.csr_matrix",
"torch_geometric.data.Data",
"numpy.squeeze",
"numpy.vstack",
"numpy.concatenate",
"torch.from_numpy"
] | [((857, 886), 'numpy.vstack', 'np.vstack', (['(coo.row, coo.col)'], {}), '((coo.row, coo.col))\n', (866, 886), True, 'import numpy as np\n'), ((950, 1010), 'torch_geometric.data.Data', 'Data', ([], {'x': 'values', 'edge_index': 'indices', 'edge_weight': 'None', 'y': 'None'}), '(x=values, edge_index=indices, edge_weight=None, y=None)\n', (954, 1010), False, 'from torch_geometric.data import Data\n'), ((1390, 1419), 'numpy.vstack', 'np.vstack', (['(coo.row, coo.col)'], {}), '((coo.row, coo.col))\n', (1399, 1419), True, 'import numpy as np\n'), ((2306, 2370), 'torch_geometric.data.Data', 'Data', ([], {'x': 'features', 'edge_index': 'indices', 'edge_weight': 'None', 'y': 'labels'}), '(x=features, edge_index=indices, edge_weight=None, y=labels)\n', (2310, 2370), False, 'from torch_geometric.data import Data\n'), ((2394, 2428), 'torch.cat', 'torch.cat', (["masks['train']"], {'axis': '(-1)'}), "(masks['train'], axis=-1)\n", (2403, 2428), False, 'import torch\n'), ((2452, 2484), 'torch.cat', 'torch.cat', (["masks['val']"], {'axis': '(-1)'}), "(masks['val'], axis=-1)\n", (2461, 2484), False, 'import torch\n'), ((2507, 2540), 'torch.cat', 'torch.cat', (["masks['test']"], {'axis': '(-1)'}), "(masks['test'], axis=-1)\n", (2516, 2540), False, 'import torch\n'), ((6227, 6271), 'numpy.concatenate', 'np.concatenate', (['(train_indices, val_indices)'], {}), '((train_indices, val_indices))\n', (6241, 6271), True, 'import numpy as np\n'), ((8466, 8493), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (8487, 8493), True, 'import numpy as np\n'), ((8914, 8955), 'numpy.zeros', 'np.zeros', (['(labels.shape[0], 1)'], {'dtype': 'int'}), '((labels.shape[0], 1), dtype=int)\n', (8922, 8955), True, 'import numpy as np\n'), ((9010, 9035), 'numpy.squeeze', 'np.squeeze', (['train_mask', '(1)'], {}), '(train_mask, 1)\n', (9020, 9035), True, 'import numpy as np\n'), ((9051, 9092), 'numpy.zeros', 'np.zeros', (['(labels.shape[0], 1)'], {'dtype': 'int'}), '((labels.shape[0], 1), dtype=int)\n', (9059, 9092), True, 'import numpy as np\n'), ((9141, 9164), 'numpy.squeeze', 'np.squeeze', (['val_mask', '(1)'], {}), '(val_mask, 1)\n', (9151, 9164), True, 'import numpy as np\n'), ((9181, 9222), 'numpy.zeros', 'np.zeros', (['(labels.shape[0], 1)'], {'dtype': 'int'}), '((labels.shape[0], 1), dtype=int)\n', (9189, 9222), True, 'import numpy as np\n'), ((9274, 9298), 'numpy.squeeze', 'np.squeeze', (['test_mask', '(1)'], {}), '(test_mask, 1)\n', (9284, 9298), True, 'import numpy as np\n'), ((3164, 3201), 'numpy.load', 'np.load', (['file_name'], {'allow_pickle': '(True)'}), '(file_name, allow_pickle=True)\n', (3171, 3201), True, 'import numpy as np\n'), ((3305, 3417), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (["(loader['adj_data'], loader['adj_indices'], loader['adj_indptr'])"], {'shape': "loader['adj_shape']"}), "((loader['adj_data'], loader['adj_indices'], loader[\n 'adj_indptr']), shape=loader['adj_shape'])\n", (3318, 3417), True, 'import scipy.sparse as sp\n'), ((3453, 3569), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (["(loader['attr_data'], loader['attr_indices'], loader['attr_indptr'])"], {'shape': "loader['attr_shape']"}), "((loader['attr_data'], loader['attr_indices'], loader[\n 'attr_indptr']), shape=loader['attr_shape'])\n", (3466, 3569), True, 'import scipy.sparse as sp\n'), ((6056, 6102), 'numpy.setdiff1d', 'np.setdiff1d', (['remaining_indices', 'train_indices'], {}), '(remaining_indices, train_indices)\n', (6068, 6102), True, 'import numpy as np\n'), ((7634, 7662), 'numpy.sum', 'np.sum', (['train_labels'], {'axis': '(0)'}), '(train_labels, axis=0)\n', (7640, 7662), True, 'import numpy as np\n'), ((7864, 7890), 'numpy.sum', 'np.sum', (['val_labels'], {'axis': '(0)'}), '(val_labels, axis=0)\n', (7870, 7890), True, 'import numpy as np\n'), ((8094, 8121), 'numpy.sum', 'np.sum', (['test_labels'], {'axis': '(0)'}), '(test_labels, axis=0)\n', (8100, 8121), True, 'import numpy as np\n'), ((901, 926), 'torch.from_numpy', 'torch.from_numpy', (['indices'], {}), '(indices)\n', (917, 926), False, 'import torch\n'), ((1434, 1459), 'torch.from_numpy', 'torch.from_numpy', (['indices'], {}), '(indices)\n', (1450, 1459), False, 'import torch\n'), ((2263, 2287), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (2279, 2287), False, 'import torch\n'), ((6540, 6590), 'numpy.setdiff1d', 'np.setdiff1d', (['remaining_indices', 'forbidden_indices'], {}), '(remaining_indices, forbidden_indices)\n', (6552, 6590), True, 'import numpy as np\n'), ((6725, 6775), 'numpy.setdiff1d', 'np.setdiff1d', (['remaining_indices', 'forbidden_indices'], {}), '(remaining_indices, forbidden_indices)\n', (6737, 6775), True, 'import numpy as np\n'), ((1914, 1945), 'torch.from_numpy', 'torch.from_numpy', (["mask['train']"], {}), "(mask['train'])\n", (1930, 1945), False, 'import torch\n'), ((1975, 2004), 'torch.from_numpy', 'torch.from_numpy', (["mask['val']"], {}), "(mask['val'])\n", (1991, 2004), False, 'import torch\n'), ((2035, 2065), 'torch.from_numpy', 'torch.from_numpy', (["mask['test']"], {}), "(mask['test'])\n", (2051, 2065), False, 'import torch\n'), ((7435, 7493), 'numpy.concatenate', 'np.concatenate', (['(train_indices, val_indices, test_indices)'], {}), '((train_indices, val_indices, test_indices))\n', (7449, 7493), True, 'import numpy as np\n'), ((7730, 7750), 'numpy.unique', 'np.unique', (['train_sum'], {}), '(train_sum)\n', (7739, 7750), True, 'import numpy as np\n'), ((7958, 7976), 'numpy.unique', 'np.unique', (['val_sum'], {}), '(val_sum)\n', (7967, 7976), True, 'import numpy as np\n'), ((8189, 8208), 'numpy.unique', 'np.unique', (['test_sum'], {}), '(test_sum)\n', (8198, 8208), True, 'import numpy as np\n')] |
import torch, time
import numpy as np
import joblib
import logging as log
from training_pipeline.topic_finder import TopicFinder
cuda = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.cuda.empty_cache()
class modelClass:
def __init__(self, config, statistical_similarity_matrix, vocabulary, hierarchical_matrix, hyperbolic_embeddings):
self.config = config
self.topic_finder_obj = TopicFinder(config, vocabulary, hyperbolic_embeddings)
self.model_output_hierarchy = {}
self.topic_finder_time = {}
self.statistical_similarity_matrix = statistical_similarity_matrix
self.hierarchical_matrix = hierarchical_matrix
self.vocabulary = vocabulary
self.precompute_hierarchy_aware_docTerm_matrix()
def p_onmf(self, X, rank, H_init=None, W_init=None):
alpha = self.config['orthogonality_alpha']
max_iter = self.config['iterations']
m, n = X.shape
W = torch.rand(m, rank).to(cuda) if isinstance(W_init, type(None)) else W_init
H = torch.rand(rank, n).to(cuda) if isinstance(H_init, type(None)) else H_init
for itr in range(max_iter):
enum = torch.mm(X, torch.transpose(H, 0, 1))
denom = torch.mm(W, torch.mm(H, torch.transpose(H, 0, 1)))
W = torch.nan_to_num(torch.mul(W, torch.div(enum, denom)))
HHTH = torch.mm(torch.mm(H, torch.transpose(H, 0, 1)), H)
enum = torch.mm(torch.transpose(W, 0, 1), X) + torch.mul(H, alpha)
denom = torch.mm(torch.mm(torch.transpose(W, 0, 1), W), H) + torch.mul(HHTH, 2.0 * alpha)
H = torch.nan_to_num(torch.mul(H, torch.div(enum, denom)))
return W, H
# Remove this -> Reduce and rerun
def reduceTopics(self, H):
topic_number, _ = H.shape
# count zero rows
sum_h = torch.sum(H, dim=1)
diff = len(sum_h[(sum_h == 0)])
h = H[(sum_h != 0)]
if (len(h) == 0):
return 0
# count redundant rows
i = torch.mm(h, torch.transpose(h, 0, 1))
# scale values between 0 and 1
den, _ = torch.max(i, axis=1)
scaled_i = torch.div(i, den)
curr = 0
inds = torch.tensor([]).to(cuda)
while (curr < len(h)):
# find indices where vectors are similar
similar = torch.flatten(((scaled_i[curr] > self.config['reduce_thresh']) & (scaled_i[curr] < 1)).nonzero())
s = similar.detach().cpu().tolist()
i = inds.detach().cpu().tolist()
common = set(s).intersection(i)
# print("Common",common)
if (len(common) != 0):
similar = torch.cat((similar, torch.tensor([curr]).to(cuda)))
else:
similar = similar[(similar != curr)]
# if there are vectors similar to current topic
if (len(similar)):
inds = torch.cat((inds, similar), 0) # add to list of indices to remove
inds = torch.unique(inds)
curr += 1
while (curr < len(h) and (curr in inds)):
# go to next topic that has not been added to list of indices to remove
curr += 1
diff1 = len(torch.unique(inds))
# if there are zero rows or redundant rows
updated_topic_number = -1
if (diff + diff1 > 0):
updated_topic_number = topic_number - diff - diff1
print("Reduced k from {} ---> {}".format(topic_number, updated_topic_number))
return updated_topic_number if updated_topic_number!=-1 else updated_topic_number
def get_document_for_topic(self, documents_split, topic_idx):
# Find documents belonging to this topic
return torch.flatten((documents_split == topic_idx).nonzero())
def get_topWords_for_topic(self, topic_word_distribution, top_words=20):
inds = torch.argsort(topic_word_distribution, descending=True)[:top_words] # get top 20 words and return the indices in bow representation.
inds_of_inds = torch.where(topic_word_distribution[inds] != 0)[0] # keep those indices where prob of word belonging to topic is not 0.
topWords = inds[inds_of_inds]
return topWords
def save_model(self, W, H, parent):
model_file_path = "{}/nmf_{}.pkl".format(self.config['result_folder'], parent)
joblib.dump((W.detach().cpu().numpy(), H.detach().cpu().numpy()), model_file_path)
def write_topics(self, current_topic, depth, print_words=20):
output = open('{}/hierarchical_struture.txt'.format(self.config['result_folder']), 'a', encoding="utf-8")
tabs = "\t" * depth
topic = " ".join(current_topic[:print_words])
output.write("{}{}\n".format(tabs, topic))
output.close()
def precompute_hierarchy_aware_docTerm_matrix(self):
hierarchical_matrix = self.hierarchical_matrix.clone().to(cuda)
statistical_similarity_matrix = self.statistical_similarity_matrix
# for i in range(len(self.vocabulary)):
# if i not in parent_topic_words:
# third_matrix_prime[i] = 0
self.hierarchy_aware_matrix = torch.mm(statistical_similarity_matrix.to(cuda), hierarchical_matrix.to(cuda))
del hierarchical_matrix
del statistical_similarity_matrix
def train(self, document_idx, current_hierarchy, parent_topic_words):
parent = current_hierarchy.split("_")[-1]
depth = int(current_hierarchy.split("_")[-2])
logging_text = "{depth}_{parent_topic}".format(depth=depth + 1, parent_topic=parent)
if not(len(document_idx) > 0 and depth + 1 < self.config['max_depth']):
log.info("Too few documents {prefix}".format(prefix=logging_text))
#STOP RECURSION FOR THIS BRANCH
return
log.info("Exploring {} {}".format(parent, depth))
if depth not in self.topic_finder_time.keys():
self.topic_finder_time[depth] = []
if depth==0:
statistical_similarity_matrix = self.statistical_similarity_matrix.to(cuda)
W, H = self.p_onmf(statistical_similarity_matrix, rank = self.config['k_max'])
if self.config['reduceK'] == True:
updated_topic_number = self.reduceTopics(H)
# check if redundant or zero rows were found
if (updated_topic_number != H.shape[0]):
W, H = self.p_onmf(statistical_similarity_matrix, rank=updated_topic_number)
del statistical_similarity_matrix
else:
time_start = time.time()
H_init = self.topic_finder_obj.topicfinder(parent_topic_words)
self.topic_finder_time[depth].append(time.time() - time_start)
W, H = self.p_onmf(self.hierarchy_aware_matrix[document_idx], rank=H_init.shape[0], H_init=H_init.to(cuda))
del H_init
self.save_model(W, H, parent)
_, topics_documents = torch.max(W, dim=1)
del W
for topic_idx, topic_word_distribution in enumerate(H):
document_idx = self.get_document_for_topic(topics_documents, topic_idx)
topic_word_idx = self.get_topWords_for_topic(topic_word_distribution, top_words=50)
current_topic_words = list(np.asanyarray(self.vocabulary)[topic_word_idx.detach().cpu().numpy()])
self.write_topics(current_topic_words, depth, print_words=10)
explore_hierarchy = current_hierarchy.split("_")[:-2] + "_" + str(depth+1) + "_" + parent + " " + str(topic_idx)
self.train(document_idx, explore_hierarchy, current_topic_words)
del H
| [
"torch.unique",
"torch.where",
"numpy.asanyarray",
"torch.argsort",
"torch.cat",
"time.time",
"torch.mul",
"torch.max",
"torch.cuda.is_available",
"torch.cuda.empty_cache",
"torch.rand",
"torch.tensor",
"torch.sum",
"torch.div",
"training_pipeline.topic_finder.TopicFinder",
"torch.tran... | [((198, 222), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (220, 222), False, 'import torch, time\n'), ((160, 185), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (183, 185), False, 'import torch, time\n'), ((423, 477), 'training_pipeline.topic_finder.TopicFinder', 'TopicFinder', (['config', 'vocabulary', 'hyperbolic_embeddings'], {}), '(config, vocabulary, hyperbolic_embeddings)\n', (434, 477), False, 'from training_pipeline.topic_finder import TopicFinder\n'), ((1862, 1881), 'torch.sum', 'torch.sum', (['H'], {'dim': '(1)'}), '(H, dim=1)\n', (1871, 1881), False, 'import torch, time\n'), ((2136, 2156), 'torch.max', 'torch.max', (['i'], {'axis': '(1)'}), '(i, axis=1)\n', (2145, 2156), False, 'import torch, time\n'), ((2176, 2193), 'torch.div', 'torch.div', (['i', 'den'], {}), '(i, den)\n', (2185, 2193), False, 'import torch, time\n'), ((7000, 7019), 'torch.max', 'torch.max', (['W'], {'dim': '(1)'}), '(W, dim=1)\n', (7009, 7019), False, 'import torch, time\n'), ((2053, 2077), 'torch.transpose', 'torch.transpose', (['h', '(0)', '(1)'], {}), '(h, 0, 1)\n', (2068, 2077), False, 'import torch, time\n'), ((3249, 3267), 'torch.unique', 'torch.unique', (['inds'], {}), '(inds)\n', (3261, 3267), False, 'import torch, time\n'), ((3911, 3966), 'torch.argsort', 'torch.argsort', (['topic_word_distribution'], {'descending': '(True)'}), '(topic_word_distribution, descending=True)\n', (3924, 3966), False, 'import torch, time\n'), ((4068, 4115), 'torch.where', 'torch.where', (['(topic_word_distribution[inds] != 0)'], {}), '(topic_word_distribution[inds] != 0)\n', (4079, 4115), False, 'import torch, time\n'), ((6623, 6634), 'time.time', 'time.time', ([], {}), '()\n', (6632, 6634), False, 'import torch, time\n'), ((1202, 1226), 'torch.transpose', 'torch.transpose', (['H', '(0)', '(1)'], {}), '(H, 0, 1)\n', (1217, 1226), False, 'import torch, time\n'), ((1501, 1520), 'torch.mul', 'torch.mul', (['H', 'alpha'], {}), '(H, alpha)\n', (1510, 1520), False, 'import torch, time\n'), ((1594, 1622), 'torch.mul', 'torch.mul', (['HHTH', '(2.0 * alpha)'], {}), '(HHTH, 2.0 * alpha)\n', (1603, 1622), False, 'import torch, time\n'), ((2226, 2242), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (2238, 2242), False, 'import torch, time\n'), ((2929, 2958), 'torch.cat', 'torch.cat', (['(inds, similar)', '(0)'], {}), '((inds, similar), 0)\n', (2938, 2958), False, 'import torch, time\n'), ((3018, 3036), 'torch.unique', 'torch.unique', (['inds'], {}), '(inds)\n', (3030, 3036), False, 'import torch, time\n'), ((972, 991), 'torch.rand', 'torch.rand', (['m', 'rank'], {}), '(m, rank)\n', (982, 991), False, 'import torch, time\n'), ((1059, 1078), 'torch.rand', 'torch.rand', (['rank', 'n'], {}), '(rank, n)\n', (1069, 1078), False, 'import torch, time\n'), ((1273, 1297), 'torch.transpose', 'torch.transpose', (['H', '(0)', '(1)'], {}), '(H, 0, 1)\n', (1288, 1297), False, 'import torch, time\n'), ((1346, 1368), 'torch.div', 'torch.div', (['enum', 'denom'], {}), '(enum, denom)\n', (1355, 1368), False, 'import torch, time\n'), ((1412, 1436), 'torch.transpose', 'torch.transpose', (['H', '(0)', '(1)'], {}), '(H, 0, 1)\n', (1427, 1436), False, 'import torch, time\n'), ((1470, 1494), 'torch.transpose', 'torch.transpose', (['W', '(0)', '(1)'], {}), '(W, 0, 1)\n', (1485, 1494), False, 'import torch, time\n'), ((1669, 1691), 'torch.div', 'torch.div', (['enum', 'denom'], {}), '(enum, denom)\n', (1678, 1691), False, 'import torch, time\n'), ((6759, 6770), 'time.time', 'time.time', ([], {}), '()\n', (6768, 6770), False, 'import torch, time\n'), ((7323, 7353), 'numpy.asanyarray', 'np.asanyarray', (['self.vocabulary'], {}), '(self.vocabulary)\n', (7336, 7353), True, 'import numpy as np\n'), ((1559, 1583), 'torch.transpose', 'torch.transpose', (['W', '(0)', '(1)'], {}), '(W, 0, 1)\n', (1574, 1583), False, 'import torch, time\n'), ((2711, 2731), 'torch.tensor', 'torch.tensor', (['[curr]'], {}), '([curr])\n', (2723, 2731), False, 'import torch, time\n')] |
# -*- coding: utf-8 -*-
"""SHERIFS
Seismic Hazard and Earthquake Rates In Fault Systems
Version 1.0
This code open the interface to select the options explored in the logic tree
@author: <NAME>
"""
import numpy as np
#import tkinter as tk
#from tkinter import ttk, Label, Text, INSERT,END, StringVar,Listbox,Button,Entry,Checkbutton
#from tkinter.ttk import Combobox
#from tkinter import messagebox
class S_LT():
def __init__(self,File_geom,File_prop,Run_Name):
self.File_geom = File_geom
self.File_prop = File_prop
self.Run_Name = Run_Name
self.initialize()
def initialize(self):
self.get_available_scaling_laws()
self.windows_nd_ScL()
# structure of branch : [model,ScL,use_all_data,dimentio_used,µ,bmin_bmax,bg_hyp]
self.branches = []
for self.model_i in self.selected_Model:
self.nb_mfd_hyp()
self.bmin_hyp = []
self.bmax_hyp = []
for self.i in range(self.nb_of_mfd):
self.nb_b_hyp()
self.bmin_hyp.append(self.bmin_hyp_i)
self.bmax_hyp.append(self.bmax_hyp_i)
self.nb_sc_hyp()
self.nb_bg_hyp()
for ScL_i,use_all_i,dim_i in zip(self.selected_ScL,self.use_all_ScL_data,self.dimention_used) :
index_mfd = 0
for mfd_i in self.mfd_hyp:
for bmin_i,bmax_i in zip(self.bmin_hyp[index_mfd],self.bmax_hyp[index_mfd]):
for bg_hyp_i in self.bg_names:
for sc_name in self.sc_names:
branch_i = [self.model_i,ScL_i,use_all_i,dim_i,mfd_i,str(bmin_i)+'_'+str(bmax_i),bg_hyp_i,sc_name]
self.branches.append(branch_i)
index_mfd += 1
# file containing the branches names in the logic tree
LT_log_name = str(self.Run_Name)+'/LT_log.txt'
LT_log = open(LT_log_name, 'w')
LT_log.write('Models\n')
for self.model_i in self.selected_Model:
LT_log.write(self.model_i+'\t')
LT_log.write('\nSaling Laws\n')
for ScL_i,use_all_i,dim_i in zip(self.selected_ScL,self.use_all_ScL_data,self.dimention_used) :
if use_all_i == True :
str_all_data = 'a' # ' a ' is for 'all data is used'
else :
str_all_data = 'm' # ' m ' is for 'mechanic specific data only'
LT_log.write(ScL_i+' '+dim_i+' '+str_all_data+'\t')
LT_log.write('\nMFD\tb value\n')
index_mfd = 0
for mfd_i in self.mfd_hyp:
LT_log.write('MFD_'+mfd_i+'\t')
for bmin_i,bmax_i in zip(self.bmin_hyp[index_mfd],self.bmax_hyp[index_mfd]):
LT_log.write('bmin_'+str(bmin_i)+'_bmax_'+bmax_i+'\t')
LT_log.write('\n')
index_mfd += 1
LT_log.write('Background\n')
for bg_hyp_i in self.bg_names:
LT_log.write('bg_'+bg_hyp_i+'\t')
LT_log.write('\nscenario set\n')
for sc_name in self.sc_names:
LT_log.write('sc_'+sc_name+'\t')
LT_log.close()
'''#####################'''
def windows_nd_ScL(self):
self.w_ScL_nb = tk.Tk()
self.w_ScL_nb.title('Number of scaling laws')
label = Label(self.w_ScL_nb, text="\nHow many Scaling laws do you want to use?")
label.pack()
self.nb_of_scl = Entry(self.w_ScL_nb)
self.nb_of_scl.pack()
self.nb_of_scl.insert(INSERT,1)
bou_ok_ScL = Button(self.w_ScL_nb, text=u'OK', command = self.OK_nb_Scl)
bou_ok_ScL.pack()
self.w_ScL_nb.mainloop()
def OK_nb_Scl(self) :
self.nb_of_scl = int(self.nb_of_scl.get())
self.w_ScL_nb.destroy()
self.windows_ScL()
def windows_ScL(self):
self.var = {}
self.dimention_used_box = {}
self.ScLSelect_dim_used = {}
self.ScLname_used = {}
self.ScL_name = {}
self.w_ScL = tk.Tk()
self.w_ScL.title('Scaling laws')
self.w_ScL.grid()
row_i = 1
for i in range(self.nb_of_scl):
self.ScLname_used["ScLname{0}".format(i)] = StringVar()
self.ScL_name["ScLname{0}".format(i)] = Combobox(self.w_ScL, textvariable=self.ScLname_used["ScLname{0}".format(i)], values=self.available_scaling_laws,state = 'readonly', height = 5, width = 30)
self.ScL_name["ScLname{0}".format(i)].current(0)
self.ScL_name["ScLname{0}".format(i)].grid(column=0,row=row_i)
self.var["use_all_ScL_data_{0}".format(i)] = StringVar()
self.use_all_ScL_data_button = Checkbutton(self.w_ScL, text="Respect fault kinematic", variable=self.var["use_all_ScL_data_{0}".format(i)],onvalue="False", offvalue="True")
self.use_all_ScL_data_button.grid(column=4,row= row_i)
self.ScLSelect_dim_used["dimention_used_{0}".format(i)] = StringVar()
self.dimention_used_box["dimention_used_{0}".format(i)] = Combobox(self.w_ScL, textvariable=self.ScLSelect_dim_used["dimention_used_{0}".format(i)], values=['Area','Length'],state = 'readonly', height = 5, width = 30)
self.dimention_used_box["dimention_used_{0}".format(i)].current(0)
self.dimention_used_box["dimention_used_{0}".format(i)].grid(column=8,row= row_i)
row_i += 1
bou_ok_ScL = Button(self.w_ScL, text=u'OK', command = self.OK_Scl)
bou_ok_ScL.grid(column=8,row= row_i )
self.w_ScL.mainloop()
def OK_Scl(self) :
self.dimention_used = []
self.use_all_ScL_data = []
self.selected_ScL = []
for i in range(self.nb_of_scl):
self.selected_ScL.append(self.ScL_name["ScLname{0}".format(i)].get())
if self.var["use_all_ScL_data_{0}".format(i)].get() == 'False':
self.use_all_ScL_data.append(False)
else :
self.use_all_ScL_data.append(True)
self.dimention_used.append(self.dimention_used_box["dimention_used_{0}".format(i)].get())
self.w_ScL.destroy()
check_ScL = []
for selected_ScL_i,use_all_ScL_data,dimention_used in zip(self.selected_ScL,self.use_all_ScL_data,self.dimention_used):
line = str(selected_ScL_i)+str(use_all_ScL_data)+str(dimention_used)
if not line in check_ScL:
check_ScL.append(line)
else:
messagebox.showerror('Error','One scaling law has been selected twice\n please start again')
self.win_model()
'''#####################'''
def win_model(self):
self.get_available_models()
self.w_model = tk.Tk()
self.ModelSelect = StringVar()
label = Label(self.w_model, text="\nWhich model do you want to use?")
label.pack()
self.Box_Model = Combobox(self.w_model, values=self.available_models,state = 'readonly', height = 5, width = 30)
self.Box_Model.current(0)
self.Box_Model.pack()
bou_add_Model = Button(self.w_model, text=u'Add Model', command = self.Add_Model)
bou_add_Model.pack()
self.list_Model = Listbox(self.w_model, height = 5, width = 30)
self.list_Model.pack()
bou_del_Model = Button(self.w_model, text=u'Delete Selected Model', command = self.del_Model)
bou_del_Model.pack()
label = Label(self.w_model, text="\n\n\nWhen ready click \"OK\"")
label.pack()
bou_ok_Model = Button(self.w_model, text=u'OK', command = self.ok_Model)
bou_ok_Model.pack()
self.w_model.mainloop()
def Add_Model(self):
len_list = self.list_Model.size()
compteur = 0
for i in range(len_list):
if self.Box_Model.get() == self.list_Model.get(i):
compteur = compteur + 1
if compteur == 0:
self.list_Model.insert(END, self.Box_Model.get())
else:
messagebox.showerror('Error','Model already selected')
def del_Model(self):
items = self.list_Model.curselection()
pos=0
for i in items:
idx = int(i) - pos
self.list_Model.delete(idx,idx)
pos = pos + 1
def ok_Model(self):
self.selected_Model = []
longueur_liste = self.list_Model.size()
for i in range(longueur_liste):
if len(self.list_Model.get(i))!=0:
self.selected_Model.append(self.list_Model.get(i))
if len(self.selected_Model)==0:
messagebox.showerror('Error','Select at least one model')
self.w_model.destroy()
'''#####################'''
def nb_mfd_hyp(self):
self.w_mfd_nb = tk.Tk()
self.w_mfd_nb.title('Number of MFD hypothesis for model : '+str(self.model_i))
label = Label(self.w_mfd_nb, text='\nHow many MFD hypothesis for model : '+str(self.model_i)+' do you want to use?')
label.pack()
self.nb_of_mfd = Entry(self.w_mfd_nb)
self.nb_of_mfd.pack()
self.nb_of_mfd.insert(INSERT,1)
bou_ok = Button(self.w_mfd_nb, text=u'OK', command = self.OK_nb_mfd_hyp)
bou_ok.pack()
self.w_mfd_nb.mainloop()
def OK_nb_mfd_hyp(self) :
self.nb_of_mfd = int(self.nb_of_mfd.get())
self.w_mfd_nb.destroy()
self.mfd_hyp()
def mfd_hyp(self):
self.mfd = {}
self.w_mfd = tk.Tk()
self.w_mfd.grid()
self.w_mfd.title('Hypothesis on MFD for '+str(self.model_i))
row_i = 1
for i in range(self.nb_of_mfd):
label = Label(self.w_mfd, text="Hypothesis "+str(i+1))
label.grid(column=0,row=row_i)
self.mfd["nb_mfd_{0}".format(i)] = Entry(self.w_mfd)
self.mfd["nb_mfd_{0}".format(i)].grid(column=1,row= row_i)
if i == 0:
self.mfd["nb_mfd_{0}".format(i)].insert(INSERT,'GR')
elif i == 1:
self.mfd["nb_mfd_{0}".format(i)].insert(INSERT,'YC')
row_i +=1
bou_ok = Button(self.w_mfd, text=u'OK', command = self.OK_mfd_hyp)
bou_ok.grid(column=4,row=row_i+1)
#self.w_shear_mod.mainloop()
def OK_mfd_hyp(self) :
self.mfd_hyp = []
for i in range(self.nb_of_mfd):
self.mfd_hyp.append(self.mfd["nb_mfd_{0}".format(i)].get())
self.w_mfd.destroy()
'''#####################'''
def nb_b_hyp(self):
self.w_b_nb = tk.Tk()
self.w_b_nb.title('Number of b value distribution for model : '+str(self.model_i) +'\nand MFD : ' + str(self.mfd_hyp[self.i]))
label = Label(self.w_b_nb, text='\nHow many b value distribution hypothesis for model : '+str(self.model_i)+'\nand MFD : ' + str(self.mfd_hyp[self.i])+' do you want to use?')
label.pack()
self.nb_of_b = Entry(self.w_b_nb)
self.nb_of_b.pack()
self.nb_of_b.insert(INSERT,1)
bou_ok = Button(self.w_b_nb, text=u'OK', command = self.OK_nb_b_hyp)
bou_ok.pack()
self.w_b_nb.mainloop()
def OK_nb_b_hyp(self) :
self.nb_of_b = int(self.nb_of_b.get())
self.w_b_nb.destroy()
self.b_hyp()
def b_hyp(self):
self.bmin = {}
self.bmax = {}
self.w_b = tk.Tk()
self.w_b.grid()
self.w_b.title('Hypothesis on b value')
row_i = 0
label = Label(self.w_b, text='\nHypothesis on b value for model '+str(self.model_i)+' and MFD : ' + str(self.mfd_hyp[self.i]))
label.grid(column=0,row=row_i)
row_i +=1
for i in range(self.nb_of_b):
label = Label(self.w_b, text="Hypothesis "+str(i+1)+" for b min and b max")
label.grid(column=0,row=row_i)
self.bmin["nb_bmin_{0}".format(i)] = Entry(self.w_b)
self.bmin["nb_bmin_{0}".format(i)].grid(column=4,row= row_i)
self.bmin["nb_bmin_{0}".format(i)].insert(INSERT,0.9)
self.bmax["nb_bmax_{0}".format(i)] = Entry(self.w_b)
self.bmax["nb_bmax_{0}".format(i)].grid(column=6,row= row_i)
self.bmax["nb_bmax_{0}".format(i)].insert(INSERT,1.1)
row_i +=1
bou_ok = Button(self.w_b, text=u'OK', command = self.OK_b_hyp)
bou_ok.grid(column=4,row=row_i+1)
self.w_b.mainloop()
def OK_b_hyp(self) :
self.bmin_hyp_i = []
self.bmax_hyp_i = []
for i in range(self.nb_of_b):
self.bmin_hyp_i.append(self.bmin["nb_bmin_{0}".format(i)].get())
self.bmax_hyp_i.append(self.bmax["nb_bmax_{0}".format(i)].get())
self.w_b.destroy()
'''#####################'''
def nb_sc_hyp(self):
self.w_sc_nb = tk.Tk()
self.w_sc_nb.title('Number of scenario sets for model : '+str(self.model_i))
label = Label(self.w_sc_nb, text='\nHow many scenario sets for model : '+str(self.model_i)+' do you want to use?')
label.pack()
self.nb_of_sc = Entry(self.w_sc_nb)
self.nb_of_sc.pack()
self.nb_of_sc.insert(INSERT,1)
bou_ok = Button(self.w_sc_nb, text=u'OK', command = self.OK_nb_sc_hyp)
bou_ok.pack()
self.w_sc_nb.mainloop()
def OK_nb_sc_hyp(self) :
self.nb_of_sc = int(self.nb_of_sc.get())
self.w_sc_nb.destroy()
self.sc_hyp()
def sc_hyp(self):
self.sc = {}
self.w_sc = tk.Tk()
self.w_sc.grid()
self.w_sc.title('Hypothesis on scenario sets for '+str(self.model_i))
row_i = 0
for i in range(self.nb_of_sc):
label = Label(self.w_sc, text="Hypothesis "+str(i+1))
label.grid(column=0,row=row_i)
self.sc["sc_{0}".format(i)] = Entry(self.w_sc)
self.sc["sc_{0}".format(i)].grid(column=6,row= row_i)
self.sc["sc_{0}".format(i)].insert(INSERT,'Set_'+str(row_i+1))
row_i +=1
bou_ok = Button(self.w_sc, text=u'OK', command = self.OK_sc_hyp)
bou_ok.grid(column=4,row=row_i+1)
self.w_sc.mainloop()
def OK_sc_hyp(self) :
self.sc_names = []
for i in range(self.nb_of_sc):
self.sc_names.append(self.sc["sc_{0}".format(i)].get())
self.w_sc.destroy()
'''#####################'''
def nb_bg_hyp(self):
self.w_bg_nb = tk.Tk()
self.w_bg_nb.title('Number of background hypothesis for model '+str(self.model_i))
label = Label(self.w_bg_nb, text='\nHow many background hypothesis for model '+str(self.model_i)+' do you want to use?')
label.pack()
self.nb_of_bg = Entry(self.w_bg_nb)
self.nb_of_bg.pack()
self.nb_of_bg.insert(INSERT,1)
bou_ok = Button(self.w_bg_nb, text=u'OK', command = self.OK_nb_bg_hyp)
bou_ok.pack()
self.w_bg_nb.mainloop()
def OK_nb_bg_hyp(self) :
self.nb_of_bg = int(self.nb_of_bg.get())
self.w_bg_nb.destroy()
self.bg_hyp()
def bg_hyp(self):
self.bg = {}
self.w_bg = tk.Tk()
self.w_bg.grid()
self.w_bg.title('Name of the background hypotheses for '+str(self.model_i))
row_i = 0
label = Label(self.w_bg, text='\nBackground hypotheses for '+str(self.model_i))
label.grid(column=0,row=row_i)
row_i +=1
for i in range(self.nb_of_bg):
label = Label(self.w_bg, text="Hypothesis "+str(i+1))
label.grid(column=0,row=row_i)
self.bg["bg_{0}".format(i)] = Entry(self.w_bg)
self.bg["bg_{0}".format(i)].grid(column=6,row= row_i)
self.bg["bg_{0}".format(i)].insert(INSERT,'BG_'+str(row_i))
row_i +=1
bou_ok = Button(self.w_bg, text=u'OK', command = self.OK_bg_hyp)
bou_ok.grid(column=4,row=row_i+1)
self.w_bg.mainloop()
def OK_bg_hyp(self) :
self.bg_names = []
for i in range(self.nb_of_bg):
self.bg_names.append(self.bg["bg_{0}".format(i)].get())
self.w_bg.destroy()
'''#####################'''
def get_available_models(self):
NomFichier_InfosZonage = self.File_geom
InfosZonage = np.genfromtxt(NomFichier_InfosZonage,dtype=[('U100'),('U100'),('f8'),('f8')],skip_header = 1)
Column_model_name = list(map(lambda i : InfosZonage[i][0],range(len(InfosZonage))))
self.available_models = list(np.unique(np.array(Column_model_name)))
def get_available_scaling_laws(self):
self.available_scaling_laws = ['WC1994','Le2010','HB08','TMG2017']
if __name__=="__main__":
app = S_LT()
| [
"numpy.array",
"numpy.genfromtxt"
] | [((17321, 17413), 'numpy.genfromtxt', 'np.genfromtxt', (['NomFichier_InfosZonage'], {'dtype': "['U100', 'U100', 'f8', 'f8']", 'skip_header': '(1)'}), "(NomFichier_InfosZonage, dtype=['U100', 'U100', 'f8', 'f8'],\n skip_header=1)\n", (17334, 17413), True, 'import numpy as np\n'), ((17556, 17583), 'numpy.array', 'np.array', (['Column_model_name'], {}), '(Column_model_name)\n', (17564, 17583), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: vpsem
.. moduleauthor:: <NAME> <<EMAIL>>
Script to compute absorption of x-ray by the gas in a VP-SEM.
"""
###############################################################################
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
# Third party modules.
import numpy as np
# Local modules.
# Project modules.
from xray.mac.models.chantler2005 import Chantler2005
# Globals and constants variables.
def run_water():
density_g_cm3 = 1.0e-5
length_mm = 10.0
length_cm = length_mm*1.0e-1
cu_ka_eV = 8046.0
cu_la_eV = 930.0
chantler2005 = Chantler2005()
for energy_eV in [cu_ka_eV, cu_la_eV]:
total_mac_cm2_g = 0.0
for atomic_number, weight_fraction in [(1, 0.111894), (8, 0.888106)]:
atomic_number = 1
mac_cm2_g = chantler2005.compute_mac_cm2_g(energy_eV, atomic_number)
total_mac_cm2_g += mac_cm2_g*weight_fraction
absorption = np.exp(-total_mac_cm2_g*density_g_cm3*length_cm)
print(energy_eV, total_mac_cm2_g,absorption)
if __name__ == '__main__':
run_water()
| [
"numpy.exp",
"xray.mac.models.chantler2005.Chantler2005"
] | [((1276, 1290), 'xray.mac.models.chantler2005.Chantler2005', 'Chantler2005', ([], {}), '()\n', (1288, 1290), False, 'from xray.mac.models.chantler2005 import Chantler2005\n'), ((1633, 1685), 'numpy.exp', 'np.exp', (['(-total_mac_cm2_g * density_g_cm3 * length_cm)'], {}), '(-total_mac_cm2_g * density_g_cm3 * length_cm)\n', (1639, 1685), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from scipy.spatial import ConvexHull
from hysynth.utils.hybrid_system import HybridSystemConvexHull
from hysynth.utils.hybrid_system.library import construct_variable_name as get_var
def test_instantiation_check():
with pytest.raises(ValueError):
_ = HybridSystemConvexHull("Test", [get_var(1)])
@pytest.fixture()
def mock_hs():
hs = HybridSystemConvexHull("Test", [get_var(1), get_var(2)])
# add locations
hs.add_location("Q1")
hs.add_location("Q2")
hs.add_location("Q3")
# add edges
hs.add_edge("Q1", "Q1")
hs.add_edge("Q1", "Q2")
hs.add_edge("Q1", "Q3")
return hs
@pytest.fixture()
def mock_conv_hull():
# create the random cloud
n_points = 500
n_dim = 5
cloud = np.random.rand(n_points, n_dim)
# create the convex hull
hull = ConvexHull(cloud, "Qx")
return hull
def test_adding_convex_hull_invariant(mock_hs, mock_conv_hull):
mock_hs.set_invariant("Q1", mock_conv_hull)
def test_adding_invalid_invariant(mock_hs):
with pytest.raises(TypeError):
mock_hs.set_invariant("Random!")
def test_adding_convex_hull_guard(mock_hs, mock_conv_hull):
mock_hs.set_guard(("Q1", "Q1"), mock_conv_hull)
def test_adding_invalid_guard(mock_hs):
with pytest.raises(TypeError):
mock_hs.set_guard("Random!")
| [
"hysynth.utils.hybrid_system.library.construct_variable_name",
"pytest.fixture",
"pytest.raises",
"numpy.random.rand",
"scipy.spatial.ConvexHull"
] | [((347, 363), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (361, 363), False, 'import pytest\n'), ((663, 679), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (677, 679), False, 'import pytest\n'), ((777, 808), 'numpy.random.rand', 'np.random.rand', (['n_points', 'n_dim'], {}), '(n_points, n_dim)\n', (791, 808), True, 'import numpy as np\n'), ((850, 873), 'scipy.spatial.ConvexHull', 'ConvexHull', (['cloud', '"""Qx"""'], {}), "(cloud, 'Qx')\n", (860, 873), False, 'from scipy.spatial import ConvexHull\n'), ((260, 285), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (273, 285), False, 'import pytest\n'), ((1059, 1083), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1072, 1083), False, 'import pytest\n'), ((1291, 1315), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1304, 1315), False, 'import pytest\n'), ((420, 430), 'hysynth.utils.hybrid_system.library.construct_variable_name', 'get_var', (['(1)'], {}), '(1)\n', (427, 430), True, 'from hysynth.utils.hybrid_system.library import construct_variable_name as get_var\n'), ((432, 442), 'hysynth.utils.hybrid_system.library.construct_variable_name', 'get_var', (['(2)'], {}), '(2)\n', (439, 442), True, 'from hysynth.utils.hybrid_system.library import construct_variable_name as get_var\n'), ((331, 341), 'hysynth.utils.hybrid_system.library.construct_variable_name', 'get_var', (['(1)'], {}), '(1)\n', (338, 341), True, 'from hysynth.utils.hybrid_system.library import construct_variable_name as get_var\n')] |
import numpy as np
import bct
import sys
import mne
from nitime import TimeSeries
from nitime.analysis import CorrelationAnalyzer
from my_settings import (bands, source_folder, window_size, step_size)
subject = sys.argv[1]
cls = np.load(source_folder + "hilbert_data/%s_classic_ht-epo.npy" %
subject).item()
pln = np.load(source_folder + "hilbert_data/%s_plan_ht-epo.npy" %
subject).item()
times = np.arange(-4000, 1001, 1)
times = times / 1000.
selected_times = times[::step_size]
results_cls = {}
results_pln = {}
for k, band in enumerate(bands.keys()):
# baseline correct timeseries
cls_bs = mne.baseline.rescale(
np.abs(cls[band])**2, times, baseline=(-3.8, -3.3), mode="zscore")
pln_bs = mne.baseline.rescale(
np.abs(pln[band])**2, times, baseline=(-3.8, -3.3), mode="zscore")
deg_cls = []
deg_pln = []
trans_cls = []
trans_pln = []
ge_cls = []
ge_pln = []
cp_cls = []
cp_pln = []
for st in selected_times:
if st + window_size < times[-1]:
from_time = np.abs(times - st).argmin()
to_time = np.abs(times - (st + window_size)).argmin()
corr_cls = []
corr_pln = []
# make timeseries object
for ts in cls_bs:
nits = TimeSeries(
ts[:, from_time:to_time],
sampling_rate=1000) # epochs_normal.info["sfreq"])
corr_cls += [CorrelationAnalyzer(nits)]
for ts in pln_bs:
nits = TimeSeries(
ts[:, from_time:to_time],
sampling_rate=1000) # epochs_normal.info["sfreq"])
corr_pln += [CorrelationAnalyzer(nits)]
corr_cls_coef = [d.corrcoef for d in corr_cls]
corr_pln_coef = [d.corrcoef for d in corr_pln]
full_matrix = np.concatenate(
[np.abs(corr_cls_coef), np.abs(corr_pln_coef)], axis=0)
threshold = np.median(full_matrix[np.nonzero(full_matrix)]) + \
np.std(full_matrix[np.nonzero(full_matrix)])
data_cls_bin = np.abs(corr_cls_coef) > threshold
data_pln_bin = np.abs(corr_pln_coef) > threshold
deg_cls_tmp = np.asarray(
[bct.degrees_und(g) for g in data_cls_bin])
deg_pln_tmp = np.asarray(
[bct.degrees_und(g) for g in data_pln_bin])
trans_cls_tmp = np.asarray(
[bct.transitivity_bu(g) for g in data_cls_bin])
trans_pln_tmp = np.asarray(
[bct.transitivity_bu(g) for g in data_pln_bin])
cp_cls_tmp = np.asarray(
[bct.distance.charpath(g)[0] for g in data_cls_bin])
cp_pln_tmp = np.asarray(
[bct.distance.charpath(g)[0] for g in data_pln_bin])
# Add measure to results list
deg_cls.append(deg_cls_tmp)
deg_pln.append(deg_pln_tmp)
trans_cls.append(trans_cls_tmp)
trans_pln.append(trans_pln_tmp)
cp_cls.append(cp_cls_tmp)
cp_pln.append(cp_pln_tmp)
results_cls["deg_%s" % band] = np.asarray(deg_cls)
results_pln["deg_%s" % band] = np.asarray(deg_pln)
results_cls["trans_%s" % band] = np.asarray(trans_cls)
results_pln["trans_%s" % band] = np.asarray(trans_pln)
results_cls["cp_%s" % band] = np.asarray(cp_cls)
results_pln["cp_%s" % band] = np.asarray(cp_pln)
np.save(source_folder + "graph_data/%s_pln_pow_sliding_bin-epo.npy" %
(subject), results_pln)
np.save(source_folder + "graph_data/%s_cls_pow_sliding_bin-epo.npy" %
(subject), results_cls)
| [
"numpy.load",
"numpy.save",
"my_settings.bands.keys",
"numpy.abs",
"nitime.TimeSeries",
"numpy.asarray",
"bct.transitivity_bu",
"bct.distance.charpath",
"numpy.nonzero",
"bct.degrees_und",
"numpy.arange",
"nitime.analysis.CorrelationAnalyzer"
] | [((430, 455), 'numpy.arange', 'np.arange', (['(-4000)', '(1001)', '(1)'], {}), '(-4000, 1001, 1)\n', (439, 455), True, 'import numpy as np\n'), ((3481, 3576), 'numpy.save', 'np.save', (["(source_folder + 'graph_data/%s_pln_pow_sliding_bin-epo.npy' % subject)", 'results_pln'], {}), "(source_folder + 'graph_data/%s_pln_pow_sliding_bin-epo.npy' %\n subject, results_pln)\n", (3488, 3576), True, 'import numpy as np\n'), ((3583, 3678), 'numpy.save', 'np.save', (["(source_folder + 'graph_data/%s_cls_pow_sliding_bin-epo.npy' % subject)", 'results_cls'], {}), "(source_folder + 'graph_data/%s_cls_pow_sliding_bin-epo.npy' %\n subject, results_cls)\n", (3590, 3678), True, 'import numpy as np\n'), ((575, 587), 'my_settings.bands.keys', 'bands.keys', ([], {}), '()\n', (585, 587), False, 'from my_settings import bands, source_folder, window_size, step_size\n'), ((3181, 3200), 'numpy.asarray', 'np.asarray', (['deg_cls'], {}), '(deg_cls)\n', (3191, 3200), True, 'import numpy as np\n'), ((3236, 3255), 'numpy.asarray', 'np.asarray', (['deg_pln'], {}), '(deg_pln)\n', (3246, 3255), True, 'import numpy as np\n'), ((3293, 3314), 'numpy.asarray', 'np.asarray', (['trans_cls'], {}), '(trans_cls)\n', (3303, 3314), True, 'import numpy as np\n'), ((3352, 3373), 'numpy.asarray', 'np.asarray', (['trans_pln'], {}), '(trans_pln)\n', (3362, 3373), True, 'import numpy as np\n'), ((3408, 3426), 'numpy.asarray', 'np.asarray', (['cp_cls'], {}), '(cp_cls)\n', (3418, 3426), True, 'import numpy as np\n'), ((3461, 3479), 'numpy.asarray', 'np.asarray', (['cp_pln'], {}), '(cp_pln)\n', (3471, 3479), True, 'import numpy as np\n'), ((232, 303), 'numpy.load', 'np.load', (["(source_folder + 'hilbert_data/%s_classic_ht-epo.npy' % subject)"], {}), "(source_folder + 'hilbert_data/%s_classic_ht-epo.npy' % subject)\n", (239, 303), True, 'import numpy as np\n'), ((331, 399), 'numpy.load', 'np.load', (["(source_folder + 'hilbert_data/%s_plan_ht-epo.npy' % subject)"], {}), "(source_folder + 'hilbert_data/%s_plan_ht-epo.npy' % subject)\n", (338, 399), True, 'import numpy as np\n'), ((667, 684), 'numpy.abs', 'np.abs', (['cls[band]'], {}), '(cls[band])\n', (673, 684), True, 'import numpy as np\n'), ((778, 795), 'numpy.abs', 'np.abs', (['pln[band]'], {}), '(pln[band])\n', (784, 795), True, 'import numpy as np\n'), ((1316, 1372), 'nitime.TimeSeries', 'TimeSeries', (['ts[:, from_time:to_time]'], {'sampling_rate': '(1000)'}), '(ts[:, from_time:to_time], sampling_rate=1000)\n', (1326, 1372), False, 'from nitime import TimeSeries\n'), ((1557, 1613), 'nitime.TimeSeries', 'TimeSeries', (['ts[:, from_time:to_time]'], {'sampling_rate': '(1000)'}), '(ts[:, from_time:to_time], sampling_rate=1000)\n', (1567, 1613), False, 'from nitime import TimeSeries\n'), ((2143, 2164), 'numpy.abs', 'np.abs', (['corr_cls_coef'], {}), '(corr_cls_coef)\n', (2149, 2164), True, 'import numpy as np\n'), ((2204, 2225), 'numpy.abs', 'np.abs', (['corr_pln_coef'], {}), '(corr_pln_coef)\n', (2210, 2225), True, 'import numpy as np\n'), ((1079, 1097), 'numpy.abs', 'np.abs', (['(times - st)'], {}), '(times - st)\n', (1085, 1097), True, 'import numpy as np\n'), ((1129, 1163), 'numpy.abs', 'np.abs', (['(times - (st + window_size))'], {}), '(times - (st + window_size))\n', (1135, 1163), True, 'import numpy as np\n'), ((1476, 1501), 'nitime.analysis.CorrelationAnalyzer', 'CorrelationAnalyzer', (['nits'], {}), '(nits)\n', (1495, 1501), False, 'from nitime.analysis import CorrelationAnalyzer\n'), ((1717, 1742), 'nitime.analysis.CorrelationAnalyzer', 'CorrelationAnalyzer', (['nits'], {}), '(nits)\n', (1736, 1742), False, 'from nitime.analysis import CorrelationAnalyzer\n'), ((1923, 1944), 'numpy.abs', 'np.abs', (['corr_cls_coef'], {}), '(corr_cls_coef)\n', (1929, 1944), True, 'import numpy as np\n'), ((1946, 1967), 'numpy.abs', 'np.abs', (['corr_pln_coef'], {}), '(corr_pln_coef)\n', (1952, 1967), True, 'import numpy as np\n'), ((2294, 2312), 'bct.degrees_und', 'bct.degrees_und', (['g'], {}), '(g)\n', (2309, 2312), False, 'import bct\n'), ((2393, 2411), 'bct.degrees_und', 'bct.degrees_und', (['g'], {}), '(g)\n', (2408, 2411), False, 'import bct\n'), ((2494, 2516), 'bct.transitivity_bu', 'bct.transitivity_bu', (['g'], {}), '(g)\n', (2513, 2516), False, 'import bct\n'), ((2598, 2620), 'bct.transitivity_bu', 'bct.transitivity_bu', (['g'], {}), '(g)\n', (2617, 2620), False, 'import bct\n'), ((2024, 2047), 'numpy.nonzero', 'np.nonzero', (['full_matrix'], {}), '(full_matrix)\n', (2034, 2047), True, 'import numpy as np\n'), ((2089, 2112), 'numpy.nonzero', 'np.nonzero', (['full_matrix'], {}), '(full_matrix)\n', (2099, 2112), True, 'import numpy as np\n'), ((2700, 2724), 'bct.distance.charpath', 'bct.distance.charpath', (['g'], {}), '(g)\n', (2721, 2724), False, 'import bct\n'), ((2806, 2830), 'bct.distance.charpath', 'bct.distance.charpath', (['g'], {}), '(g)\n', (2827, 2830), False, 'import bct\n')] |
from optparse import Values
import matplotlib.pyplot as plt
from floodsystem.analysis import polyfit
import matplotlib
import numpy as np
from datetime import datetime, timedelta
from floodsystem.datafetcher import fetch_measure_levels
def plot_water_levels(station, dates, levels):
"plots time series of level data"
# Plots available level data
plt.plot(dates, levels)
# checks if historic level data is available
if not levels:
# if not, ignores and prints a warning
print("Past Level Data Unavailable")
else:
# if available, plots lines of maximum/minimum levels
plt.plot(dates, [station.typical_range[0]]*len(dates))
plt.plot(dates, [station.typical_range[1]]*len(dates))
# set up plot nicely
plt.xlabel('date')
plt.ylabel('water level (m)')
plt.xticks(rotation=45);
plt.title(station.name)
plt.tight_layout()
# plot
plt.show()
def plot_water_level_with_fit(station, dates, levels, p):
x = matplotlib.dates.date2num(dates)
y = levels
plt.plot(x, y, '.')
if dates:
poly, d0 = polyfit(dates, levels, p)
x1 = np.linspace(d0, x[-1], 30)
plt.plot(x1, poly(x1 - d0))
plt.plot(dates, [max(levels)]*len(dates))
plt.plot(dates, [min(levels)]*len(dates))
plt.xlabel('date')
plt.ylabel('water level (m)')
plt.xticks(rotation=45);
plt.title(station.name)
plt.tight_layout()
plt.show()
else:
return None
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"floodsystem.analysis.polyfit",
"matplotlib.pyplot.xticks",
"numpy.linspace",
"matplotlib.dates.date2num",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout"
] | [((379, 402), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'levels'], {}), '(dates, levels)\n', (387, 402), True, 'import matplotlib.pyplot as plt\n'), ((799, 817), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (809, 817), True, 'import matplotlib.pyplot as plt\n'), ((822, 851), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level (m)"""'], {}), "('water level (m)')\n", (832, 851), True, 'import matplotlib.pyplot as plt\n'), ((856, 879), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (866, 879), True, 'import matplotlib.pyplot as plt\n'), ((885, 908), 'matplotlib.pyplot.title', 'plt.title', (['station.name'], {}), '(station.name)\n', (894, 908), True, 'import matplotlib.pyplot as plt\n'), ((913, 931), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (929, 931), True, 'import matplotlib.pyplot as plt\n'), ((949, 959), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (957, 959), True, 'import matplotlib.pyplot as plt\n'), ((1028, 1060), 'matplotlib.dates.date2num', 'matplotlib.dates.date2num', (['dates'], {}), '(dates)\n', (1053, 1060), False, 'import matplotlib\n'), ((1080, 1099), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {}), "(x, y, '.')\n", (1088, 1099), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1158), 'floodsystem.analysis.polyfit', 'polyfit', (['dates', 'levels', 'p'], {}), '(dates, levels, p)\n', (1140, 1158), False, 'from floodsystem.analysis import polyfit\n'), ((1172, 1198), 'numpy.linspace', 'np.linspace', (['d0', 'x[-1]', '(30)'], {}), '(d0, x[-1], 30)\n', (1183, 1198), True, 'import numpy as np\n'), ((1343, 1361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (1353, 1361), True, 'import matplotlib.pyplot as plt\n'), ((1370, 1399), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level (m)"""'], {}), "('water level (m)')\n", (1380, 1399), True, 'import matplotlib.pyplot as plt\n'), ((1408, 1431), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (1418, 1431), True, 'import matplotlib.pyplot as plt\n'), ((1441, 1464), 'matplotlib.pyplot.title', 'plt.title', (['station.name'], {}), '(station.name)\n', (1450, 1464), True, 'import matplotlib.pyplot as plt\n'), ((1473, 1491), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1489, 1491), True, 'import matplotlib.pyplot as plt\n'), ((1502, 1512), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1510, 1512), True, 'import matplotlib.pyplot as plt\n')] |
"""The point of Container.py is to provide a function Container which converts
any old thing A to thing B which looks and acts just like A, but it has a
'value' attribute. B.value looks and acts just like A but every variable
'inside' B has been replaced by its value. Examples:
class MyObject(object):
def __init__(self):
self.x = Uninformative('x',0)
self.y = 3
A = MyObject()
B = Container(A)
B.x
B.value.x
A = [Uninformative('x',0), 3]
B = Container(A)
B
B.value
Should work even with nested inputs:
class MyObject(object):
def __init__(self):
self.x = [Uninformative('x',0), 5]
self.y = 3
A = MyObject()
B = Container(A)
In addition, container objects file away the objects they contain into the
following sets: stochastics, deterministics, variables, nodes, containers, data, step methods.
These flattened representations are useful for things like cache checking.
"""
from .Node import Node, ContainerBase, Variable, StochasticBase, DeterministicBase, PotentialBase, ContainerRegistry
from copy import copy
from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf
from .Container_values import LCValue, DCValue, ACValue, OCValue
from types import ModuleType
import pdb
from . import six
xrange = six.moves.xrange
__all__ = [
'Container',
'DictContainer',
'TupleContainer',
'ListContainer',
'SetContainer',
'ObjectContainer',
'ArrayContainer']
def filter_dict(obj):
filtered_dict = {}
for item in six.iteritems(obj.__dict__):
if isinstance(item[1], Node) or isinstance(item[1], ContainerBase):
filtered_dict[item[0]] = item[1]
return filtered_dict
def Container(*args):
"""
C = Container(iterable)
C = Container(module)
C = Container(object)
C = Container(obj_1, obj_2, obj_3, ...)
Wraps an iterable object (currently a list, set, tuple, dictionary
or ndarray), or a module or other object, or just a sequence of objects,
in a subclass of ContainerBase and returns it.
Iterable subclasses of ContainerBase strive to emulate the iterables they
wrap, with one important difference: They have a value attribute.
A container's value attribute behaves like the container itself, but
it replaces every PyMC variable it contains with that variable's value.
Example:
@stochastic
def A(value=0., mu=3, tau=2):
return normal_like(value, mu, tau)
C = Container([A, 15.2])
will yield the following:
C[0] = A
C.value[0] = A.value
C[1] = C.value[1] = 15.2
The primary reason containers exist is to allow nodes to have large
sets of parents without the need to refer to each of the parents by name.
Example:
x = []
@stochastic
def x_0(value=0, mu=0, tau=2):
return normal_like(value, mu, tau)
x.append(x_0)
last_x = x_0
for i in range(1,N):
@stochastic
def x_now(value=0, mu = last_x, tau=2):
return normal_like(value, mu, tau)
x_now.__name__ = 'x[%i]' % i
last_x = x_now
x.append(x_now)
@stochastic
def y(value=0, mu = x, tau = 100):
mean_sum = 0
for i in range(len(mu)):
mean_sum = mean_sum + mu[i]
return normal_like(value, mean_sum, tau)
x.value will be passed into y's log-probability function as argument mu,
so mu[i] will return x.value[i] = x[i].value. Stochastic y
will cache the values of each element of x, and will evaluate whether it
needs to recompute based on all of them.
:SeeAlso:
ListContainer, TupleContainer, SetContainer, ArrayContainer, DictContainer,
ObjectContainer
"""
if len(args) == 1:
iterable = args[0]
else:
iterable = args
if isinstance(iterable, ContainerBase):
return iterable
for container_class, containing_classes in ContainerRegistry:
if any([isinstance(iterable, containing_class)
for containing_class in containing_classes]):
return container_class(iterable)
# Wrap mutable objects
# if hasattr(iterable, '__dict__'):
# return ObjectContainer(iterable.__dict__)
# Otherwise raise an error.
raise ValueError(
'No container classes available for class ' +
iterable.__class__.__name__ +
', see Container.py for examples on how to write one.')
class _A(object):
pass
dict_proxy_type = type(_A.__dict__)
del _A
def file_items(container, iterable):
"""
Files away objects into the appropriate attributes of the container.
"""
# container._value = copy(iterable)
container.nodes = set()
container.variables = set()
container.deterministics = set()
container.stochastics = set()
container.potentials = set()
container.observed_stochastics = set()
# containers needs to be a list to hold unhashable items.
container.containers = []
i = -1
for item in iterable:
# If this is a dictionary, switch from key to item.
if isinstance(iterable, (dict, dict_proxy_type)):
key = item
item = iterable[key]
# Item counter
else:
i += 1
# If the item isn't iterable, file it away.
if isinstance(item, Variable):
container.variables.add(item)
if isinstance(item, StochasticBase):
if item.observed or not getattr(item, 'mask', None) is None:
container.observed_stochastics.add(item)
if not item.observed:
container.stochastics.add(item)
elif isinstance(item, DeterministicBase):
container.deterministics.add(item)
elif isinstance(item, PotentialBase):
container.potentials.add(item)
elif isinstance(item, ContainerBase):
container.assimilate(item)
container.containers.append(item)
# Wrap internal containers
elif hasattr(item, '__iter__'):
# If this is a non-object-valued ndarray, don't container-ize it.
if isinstance(item, ndarray):
if item.dtype != dtype('object'):
continue
# If the item is iterable, wrap it in a container. Replace the item
# with the wrapped version.
try:
new_container = Container(item)
except:
continue
# Update all of container's variables, potentials, etc. with the new wrapped
# iterable's. This process recursively unpacks nested iterables.
container.assimilate(new_container)
if isinstance(container, dict):
container.replace(key, new_container)
elif isinstance(container, tuple):
return container[:i] + (new_container,) + container[i + 1:]
else:
container.replace(item, new_container, i)
container.nodes = container.potentials | container.variables
# 'Freeze' markov blanket, moral neighbors, coparents of all constituent stochastics
# for future use
for attr in ['moral_neighbors', 'markov_blanket', 'coparents']:
setattr(container, attr, {})
for s in container.stochastics:
for attr in ['moral_neighbors', 'markov_blanket', 'coparents']:
getattr(container, attr)[s] = getattr(s, attr)
value_doc = 'A copy of self, with all variables replaced by their values.'
def sort_list(container, _value):
val_ind = []
val_obj = []
nonval_ind = []
nonval_obj = []
for i in xrange(len(_value)):
obj = _value[i]
if isinstance(obj, Variable) or isinstance(obj, ContainerBase):
val_ind.append(i)
val_obj.append(obj)
else:
nonval_ind.append(i)
nonval_obj.append(obj)
# In case val_obj is only a single array, avert confusion.
# Leave this even though it's confusing!
val_obj.append(None)
nonval_obj.append(None)
container.n_val = len(val_ind)
container.n_nonval = len(nonval_ind)
container.val_ind = array(val_ind, dtype='int32')
container.val_obj = val_obj
container.nonval_ind = array(nonval_ind, dtype='int32')
container.nonval_obj = array(nonval_obj, dtype=object)
container.LCValue = LCValue(container)
class SetContainer(ContainerBase, frozenset):
"""
SetContainers are containers that wrap sets.
:Parameters:
iterable : set.
:Attributes:
value : set
A copy of self, with all variables replaced with their values.
nodes : set
All the stochastics, deterministics and potentials self contains.
deterministics : set
All the deterministics self contains.
stochastics : set
All the stochastics self contains with observed=False.
potentials : set
All the potentials self contains.
observed_stochastics : set
All the stochastics self contains with observed=True.
containers : list
All the containers self contains.
:Note:
- nodes, deterministics, etc. include all the objects in nested
containers.
- value replaces objects in nested containers.
:SeeAlso:
Container, ListContainer, DictContainer, ArrayContainer, TupleContainer,
ObjectContainer
"""
register = True
change_methods = []
containing_classes = [set, frozenset]
def __init__(self, iterable):
self.new_iterable = set(iterable)
file_items(self, self.new_iterable)
ContainerBase.__init__(self, self.new_iterable)
self._value = list(self)
sort_list(self, self._value)
def replace(self, item, new_container, i):
self.new_iterable.discard(item)
self.new_iterable.add(new_container)
def get_value(self):
self.LCValue.run()
return set(self._value)
value = property(fget=get_value, doc=value_doc)
class TupleContainer(ContainerBase, tuple):
"""
TupleContainers are containers that wrap tuples.
:Parameters:
iterable : tuple.
:Attributes:
value : tuple
A copy of self, with all variables replaced with their values.
nodes : set
All the stochastics, deterministics and potentials self contains.
deterministics : set
All the deterministics self contains.
stochastics : set
All the stochastics self contains with observed=False.
potentials : set
All the potentials self contains.
observed_stochastics : set
All the stochastics self contains with observed=True.
containers : list
All the containers self contains.
:Note:
- nodes, deterministics, etc. include all the objects in nested
containers.
- value replaces objects in nested containers.
:SeeAlso:
Container, ListContainer, DictContainer, ArrayContainer, SetContainer,
ObjectContainer
"""
register = True
change_methods = []
containing_classes = [tuple]
def __init__(self, iterable):
new_tup = file_items(self, iterable)
if len(self.containers) > 0:
raise NotImplementedError("""We have not figured out how to satisfactorily implement nested TupleContainers.
The reason is there is no way to change an element of a tuple after it has been created.
Even the Python-C API makes this impossible by checking that a tuple is new
before allowing you to change one of its elements.""")
ContainerBase.__init__(self, iterable)
file_items(self, iterable)
self._value = list(self)
sort_list(self, self._value)
def replace(self, item, new_container, i):
list.__setitem__(self, i, new_container)
def get_value(self):
self.LCValue.run()
return tuple(self._value)
value = property(fget=get_value, doc=value_doc)
class ListContainer(ContainerBase, list):
"""
ListContainers are containers that wrap lists.
:Parameters:
iterable : list.
:Attributes:
value : list
A copy of self, with all variables replaced with their values.
nodes : set
All the stochastics, deterministics and potentials self contains.
deterministics : set
All the deterministics self contains.
stochastics : set
All the stochastics self contains with observed=False.
potentials : set
All the potentials self contains.
observed_stochastics : set
All the stochastics self contains with observed=True.
containers : list
All the containers self contains.
:Note:
- nodes, deterministics, etc. include all the objects in nested
containers.
- value replaces objects in nested containers.
:SeeAlso:
Container, TupleContainer, DictContainer, ArrayContainer, SetContainer,
ObjectContainer
"""
change_methods = [
'__setitem__',
'__delitem__',
'__setslice__',
'__delslice__',
'__iadd__',
'__imul__',
'append',
'extend',
'insert',
'pop',
'remove',
'reverse',
'sort']
containing_classes = [list]
register = True
def __init__(self, iterable):
list.__init__(self, iterable)
ContainerBase.__init__(self, iterable)
file_items(self, iterable)
self._value = list(self)
sort_list(self, self._value)
def replace(self, item, new_container, i):
list.__setitem__(self, i, new_container)
def get_value(self):
self.LCValue.run()
return self._value
value = property(fget=get_value, doc=value_doc)
class DictContainer(ContainerBase, dict):
"""
DictContainers are containers that wrap dictionaries.
Modules are converted into DictContainers, and variables' and potentials'
Parents objects are DictContainers also.
:Parameters:
iterable : dictionary or object with a __dict__.
:Attributes:
value : dictionary
A copy of self, with all variables replaced with their values.
nodes : set
All the stochastics, deterministics and potentials self contains.
deterministics : set
All the deterministics self contains.
stochastics : set
All the stochastics self contains with observed=False.
potentials : set
All the potentials self contains.
observed_stochastics : set
All the stochastics self contains with observed=True.
containers : list
All the containers self contains.
:Note:
- nodes, deterministics, etc. include all the objects in nested
containers.
- value replaces objects in nested containers.
:SeeAlso:
Container, ListContainer, TupleContainer, ArrayContainer, SetContainer,
ObjectContainer
"""
change_methods = [
'__setitem__',
'__delitem__',
'clear',
'pop',
'popitem',
'update']
containing_classes = [dict]
register = True
def __init__(self, iterable):
dict.__init__(self, iterable)
ContainerBase.__init__(self, iterable)
self._value = copy(iterable)
file_items(self, iterable)
self.val_keys = []
self.val_obj = []
self.nonval_keys = []
self.nonval_obj = []
self._value = {}
for key, obj in six.iteritems(self):
if isinstance(obj, Variable) or isinstance(obj, ContainerBase):
self.val_keys.append(key)
self.val_obj.append(obj)
else:
self.nonval_keys.append(key)
self.nonval_obj.append(obj)
# In case val_obj is only a single array, avert confusion.
# Leave this even though it's confusing!
self.val_obj.append(None)
self.nonval_obj.append(None)
self.n_val = len(self.val_keys)
self.val_keys = array(self.val_keys, dtype=object)
# self.val_obj = array(self.val_obj, dtype=object)
self.n_nonval = len(self.nonval_keys)
self.nonval_keys = array(self.nonval_keys, dtype=object)
self.nonval_obj = array(self.nonval_obj, dtype=object)
self.DCValue = DCValue(self)
def replace(self, key, new_container):
dict.__setitem__(self, key, new_container)
def get_value(self):
# DCValue(self)
self.DCValue.run()
return self._value
value = property(fget=get_value, doc=value_doc)
def conservative_update(obj, dict):
for k in dict:
if not hasattr(obj, k):
try:
setattr(obj, k, dict[k])
except:
pass
class ObjectContainer(ContainerBase):
"""
ObjectContainers wrap non-iterable objects.
Contents of the input iterable, or attributes of the input object,
are exposed as attributes of the object.
:Parameters:
iterable : dictionary or object with a __dict__.
:Attributes:
value : object
A copy of self, with all variables replaced with their values.
nodes : set
All the stochastics, deterministics and potentials self contains.
deterministics : set
All the deterministics self contains.
stochastics : set
All the stochastics self contains with observed=False.
potentials : set
All the potentials self contains.
observed_stochastics : set
All the stochastics self contains with observed=True.
containers : list
All the containers self contains.
:Note:
- nodes, deterministics, etc. include all the objects in nested
containers.
- value replaces objects in nested containers.
:SeeAlso:
Container, ListContainer, DictContainer, ArrayContainer, SetContainer,
TupleContainer
"""
register = False
def __init__(self, input):
if isinstance(input, dict):
input_to_file = input
conservative_update(self, input_to_file)
# self.__dict__.update(input_to_file)
elif hasattr(input, '__iter__'):
input_to_file = input
else: # Modules, objects, etc.
input_to_file = input.__dict__
conservative_update(self, input_to_file)
# self.__dict__.update(input_to_file)
dictpop = copy(self.__dict__)
if 'self' in dictpop:
dictpop.pop('self')
self._dict_container = DictContainer(dictpop)
file_items(self, input_to_file)
self._value = copy(self)
ContainerBase.__init__(self, input)
self.OCValue = OCValue(self)
def replace(self, item, new_container, key):
dict.__setitem__(self.__dict__, key, new_container)
def _get_value(self):
self.OCValue.run()
return self._value
value = property(fget=_get_value, doc=value_doc)
class ArrayContainer(ContainerBase, ndarray):
"""
ArrayContainers wrap Numerical Python ndarrays. These are full
ndarray subclasses, and should support all of ndarrays'
functionality.
:Parameters:
iterable : array.
:Attributes:
value : array.
A copy of self, with all variables replaced with their values.
nodes : set
All the stochastics, deterministics and potentials self contains.
deterministics : set
All the deterministics self contains.
stochastics : set
All the stochastics self contains with observed=False.
potentials : set
All the potentials self contains.
observed_stochastics : set
All the stochastics self contains with observed=True.
containers : list
All the containers self contains.
:Note:
- nodes, deterministics, etc. include all the objects in nested
containers.
- value replaces objects in nested containers.
:SeeAlso:
Container, ListContainer, DictContainer, ObjectContainer, SetContainer,
TupleContainer
"""
register = True
change_methods = []
containing_classes = [ndarray]
def __new__(subtype, array_in):
if not array_in.dtype == dtype('object'):
raise ValueError(
'Cannot create container from array whose dtype is not object.')
C = array(array_in, copy=True).view(subtype)
C_ravel = C.ravel()
ContainerBase.__init__(C, array_in)
# Sort out contents and wrap internal containers.
file_items(C, C_ravel)
C._value = C.copy()
C._ravelledvalue = C._value.ravel()
# An array range to keep around.
C.iterrange = arange(len(C_ravel))
val_ind = []
val_obj = []
nonval_ind = []
nonval_obj = []
for i in xrange(len(C_ravel)):
obj = C_ravel[i]
if isinstance(obj, Variable) or isinstance(obj, ContainerBase):
val_ind.append(i)
val_obj.append(obj)
else:
nonval_ind.append(i)
nonval_obj.append(obj)
val_obj.append(None)
C.val_ind = array(val_ind, dtype='int32')
C.val_obj = val_obj
C.n_val = len(val_ind)
nonval_obj.append(None)
C.nonval_ind = array(nonval_ind, dtype='int32')
C.nonval_obj = array(nonval_obj, dtype=object)
C.n_nonval = len(nonval_ind)
C.flags['W'] = False
C.ACValue = ACValue(C)
return C
def replace(self, item, new_container, i):
ndarray.__setitem__(self.ravel(), i, new_container)
# This method converts self to self.value.
def get_value(self):
self.ACValue.run()
return self._value
value = property(fget=get_value, doc=value_doc)
| [
"numpy.dtype",
"numpy.array",
"copy.copy"
] | [((8334, 8363), 'numpy.array', 'array', (['val_ind'], {'dtype': '"""int32"""'}), "(val_ind, dtype='int32')\n", (8339, 8363), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((8423, 8455), 'numpy.array', 'array', (['nonval_ind'], {'dtype': '"""int32"""'}), "(nonval_ind, dtype='int32')\n", (8428, 8455), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((8483, 8514), 'numpy.array', 'array', (['nonval_obj'], {'dtype': 'object'}), '(nonval_obj, dtype=object)\n', (8488, 8514), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((15414, 15428), 'copy.copy', 'copy', (['iterable'], {}), '(iterable)\n', (15418, 15428), False, 'from copy import copy\n'), ((16165, 16199), 'numpy.array', 'array', (['self.val_keys'], {'dtype': 'object'}), '(self.val_keys, dtype=object)\n', (16170, 16199), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((16332, 16369), 'numpy.array', 'array', (['self.nonval_keys'], {'dtype': 'object'}), '(self.nonval_keys, dtype=object)\n', (16337, 16369), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((16396, 16432), 'numpy.array', 'array', (['self.nonval_obj'], {'dtype': 'object'}), '(self.nonval_obj, dtype=object)\n', (16401, 16432), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((18570, 18589), 'copy.copy', 'copy', (['self.__dict__'], {}), '(self.__dict__)\n', (18574, 18589), False, 'from copy import copy\n'), ((18770, 18780), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (18774, 18780), False, 'from copy import copy\n'), ((21315, 21344), 'numpy.array', 'array', (['val_ind'], {'dtype': '"""int32"""'}), "(val_ind, dtype='int32')\n", (21320, 21344), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((21459, 21491), 'numpy.array', 'array', (['nonval_ind'], {'dtype': '"""int32"""'}), "(nonval_ind, dtype='int32')\n", (21464, 21491), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((21515, 21546), 'numpy.array', 'array', (['nonval_obj'], {'dtype': 'object'}), '(nonval_obj, dtype=object)\n', (21520, 21546), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((20366, 20381), 'numpy.dtype', 'dtype', (['"""object"""'], {}), "('object')\n", (20371, 20381), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((20507, 20533), 'numpy.array', 'array', (['array_in'], {'copy': '(True)'}), '(array_in, copy=True)\n', (20512, 20533), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n'), ((6374, 6389), 'numpy.dtype', 'dtype', (['"""object"""'], {}), "('object')\n", (6379, 6389), False, 'from numpy import ndarray, array, zeros, shape, arange, where, dtype, Inf\n')] |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from config import Config
#
# DEVICE = torch.device('cpu')
# NOISY_LAYER_STD = 0.1
# From shandong
def layer_init(layer, w_scale=1.0):
nn.init.orthogonal_(layer.weight.data)
layer.weight.data.mul_(w_scale)
nn.init.constant_(layer.bias.data, 0)
return layer
def tensor(x):
if isinstance(x, torch.Tensor):
return x
x = np.asarray(x, dtype=np.float32)
x = torch.from_numpy(x).to(Config.DEVICE)
return x
class DummyBody(nn.Module):
def __init__(self, state_dim):
super(DummyBody, self).__init__()
self.feature_dim = state_dim
def forward(self, x):
return x
class FCBody(nn.Module):
def __init__(self, state_dim, hidden_units=(64, 64), gate=F.relu, noisy_linear=False):
super(FCBody, self).__init__()
dims = (state_dim,) + hidden_units
if noisy_linear:
self.layers = nn.ModuleList(
[NoisyLinear(dim_in, dim_out) for dim_in, dim_out in zip(dims[:-1], dims[1:])])
else:
self.layers = nn.ModuleList(
[layer_init(nn.Linear(dim_in, dim_out)) for dim_in, dim_out in zip(dims[:-1], dims[1:])])
self.gate = gate
self.feature_dim = dims[-1]
self.noisy_linear = noisy_linear
def reset_noise(self):
if self.noisy_linear:
for layer in self.layers:
layer.reset_noise()
def forward(self, x):
for layer in self.layers:
x = self.gate(layer(x))
return x
#GaussianActorCriticNet(
# config.state_dim, config.action_dim,
# actor_body=FCBody(config.state_dim), critic_body=FCBody(config.state_dim))
class GaussianActorCriticNet(nn.Module):
def __init__(self,
state_dim,
action_dim,
hidden_dim
# phi_body=None,
# actor_body=None,
# critic_body=None
):
super(GaussianActorCriticNet, self).__init__()
# if phi_body is None: phi_body = DummyBody(state_dim)
# if actor_body is None: actor_body = DummyBody(phi_body.feature_dim)
# if critic_body is None: critic_body = DummyBody(phi_body.feature_dim)
# self.phi_body = phi_body
self.actor_body = FCBody(state_dim, hidden_dim)
self.critic_body = FCBody(state_dim, hidden_dim)
self.fc_action = layer_init(nn.Linear(self.actor_body.feature_dim, action_dim), 1e-3)
self.fc_critic = layer_init(nn.Linear(self.critic_body.feature_dim, 1), 1e-3)
self.std = nn.Parameter(torch.zeros(action_dim))
# self.phi_params = list(self.phi_body.parameters())
self.actor_params = list(self.actor_body.parameters()) + list(self.fc_action.parameters()) #+ self.phi_params
self.actor_params.append(self.std)
self.critic_params = list(self.critic_body.parameters()) + list(self.fc_critic.parameters()) #+ self.phi_params
self.to(Config.DEVICE)
def forward(self, obs, action=None):
obs = tensor(obs)
# phi = self.phi_body(obs)
phi_a = self.actor_body(obs)
phi_v = self.critic_body(obs)
mean = torch.tanh(self.fc_action(phi_a))
v = self.fc_critic(phi_v) #20,1
dist = torch.distributions.Normal(mean, F.softplus(self.std)) #batch shape 20,4
if action is None:
action = dist.sample() #20,4
log_prob = dist.log_prob(action).sum(-1).unsqueeze(-1) #20,1
entropy = dist.entropy().sum(-1).unsqueeze(-1) #20,1
return {'action': action, #20,4
'log_pi_a': log_prob, #20,1
'entropy': entropy, #20,1
'mean': mean, #20,4
'v': v} #20,1
def layer_init(layer, w_scale=1.0):
nn.init.orthogonal_(layer.weight.data)
layer.weight.data.mul_(w_scale)
nn.init.constant_(layer.bias.data, 0)
return layer
# Adapted from https://github.com/saj1919/RL-Adventure/blob/master/5.noisy%20dqn.ipynb
class NoisyLinear(nn.Module):
def __init__(self, in_features, out_features, std_init=0.4):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.zeros((out_features, in_features)), requires_grad=True)
self.weight_sigma = nn.Parameter(torch.zeros((out_features, in_features)), requires_grad=True)
self.register_buffer('weight_epsilon', torch.zeros((out_features, in_features)))
self.bias_mu = nn.Parameter(torch.zeros(out_features), requires_grad=True)
self.bias_sigma = nn.Parameter(torch.zeros(out_features), requires_grad=True)
self.register_buffer('bias_epsilon', torch.zeros(out_features))
self.register_buffer('noise_in', torch.zeros(in_features))
self.register_buffer('noise_out_weight', torch.zeros(out_features))
self.register_buffer('noise_out_bias', torch.zeros(out_features))
self.reset_parameters()
self.reset_noise()
def forward(self, x):
if self.training:
weight = self.weight_mu + self.weight_sigma.mul(self.weight_epsilon)
bias = self.bias_mu + self.bias_sigma.mul(self.bias_epsilon)
else:
weight = self.weight_mu
bias = self.bias_mu
return F.linear(x, weight, bias)
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.weight_mu.size(1))
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.weight_sigma.size(1)))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.bias_sigma.size(0)))
def reset_noise(self):
self.noise_in.normal_(std=Config.NOISY_LAYER_STD)
self.noise_out_weight.normal_(std=Config.NOISY_LAYER_STD)
self.noise_out_bias.normal_(std=Config.NOISY_LAYER_STD)
self.weight_epsilon.copy_(self.transform_noise(self.noise_out_weight).ger(
self.transform_noise(self.noise_in)))
self.bias_epsilon.copy_(self.transform_noise(self.noise_out_bias))
def transform_noise(self, x):
return x.sign().mul(x.abs().sqrt())
| [
"numpy.asarray",
"torch.nn.functional.linear",
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.functional.softplus",
"torch.nn.init.orthogonal_",
"torch.from_numpy"
] | [((240, 278), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['layer.weight.data'], {}), '(layer.weight.data)\n', (259, 278), True, 'import torch.nn as nn\n'), ((319, 356), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias.data', '(0)'], {}), '(layer.bias.data, 0)\n', (336, 356), True, 'import torch.nn as nn\n'), ((452, 483), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (462, 483), True, 'import numpy as np\n'), ((3860, 3898), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['layer.weight.data'], {}), '(layer.weight.data)\n', (3879, 3898), True, 'import torch.nn as nn\n'), ((3947, 3984), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias.data', '(0)'], {}), '(layer.bias.data, 0)\n', (3964, 3984), True, 'import torch.nn as nn\n'), ((5466, 5491), 'torch.nn.functional.linear', 'F.linear', (['x', 'weight', 'bias'], {}), '(x, weight, bias)\n', (5474, 5491), True, 'import torch.nn.functional as F\n'), ((492, 511), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (508, 511), False, 'import torch\n'), ((2489, 2539), 'torch.nn.Linear', 'nn.Linear', (['self.actor_body.feature_dim', 'action_dim'], {}), '(self.actor_body.feature_dim, action_dim)\n', (2498, 2539), True, 'import torch.nn as nn\n'), ((2583, 2625), 'torch.nn.Linear', 'nn.Linear', (['self.critic_body.feature_dim', '(1)'], {}), '(self.critic_body.feature_dim, 1)\n', (2592, 2625), True, 'import torch.nn as nn\n'), ((2665, 2688), 'torch.zeros', 'torch.zeros', (['action_dim'], {}), '(action_dim)\n', (2676, 2688), False, 'import torch\n'), ((3380, 3400), 'torch.nn.functional.softplus', 'F.softplus', (['self.std'], {}), '(self.std)\n', (3390, 3400), True, 'import torch.nn.functional as F\n'), ((4387, 4427), 'torch.zeros', 'torch.zeros', (['(out_features, in_features)'], {}), '((out_features, in_features))\n', (4398, 4427), False, 'import torch\n'), ((4490, 4530), 'torch.zeros', 'torch.zeros', (['(out_features, in_features)'], {}), '((out_features, in_features))\n', (4501, 4530), False, 'import torch\n'), ((4599, 4639), 'torch.zeros', 'torch.zeros', (['(out_features, in_features)'], {}), '((out_features, in_features))\n', (4610, 4639), False, 'import torch\n'), ((4678, 4703), 'torch.zeros', 'torch.zeros', (['out_features'], {}), '(out_features)\n', (4689, 4703), False, 'import torch\n'), ((4764, 4789), 'torch.zeros', 'torch.zeros', (['out_features'], {}), '(out_features)\n', (4775, 4789), False, 'import torch\n'), ((4856, 4881), 'torch.zeros', 'torch.zeros', (['out_features'], {}), '(out_features)\n', (4867, 4881), False, 'import torch\n'), ((4925, 4949), 'torch.zeros', 'torch.zeros', (['in_features'], {}), '(in_features)\n', (4936, 4949), False, 'import torch\n'), ((5000, 5025), 'torch.zeros', 'torch.zeros', (['out_features'], {}), '(out_features)\n', (5011, 5025), False, 'import torch\n'), ((5074, 5099), 'torch.zeros', 'torch.zeros', (['out_features'], {}), '(out_features)\n', (5085, 5099), False, 'import torch\n'), ((1176, 1202), 'torch.nn.Linear', 'nn.Linear', (['dim_in', 'dim_out'], {}), '(dim_in, dim_out)\n', (1185, 1202), True, 'import torch.nn as nn\n')] |
from typing import ClassVar, Sequence, Tuple, Union
import numpy as np
from ..utils.events.dataclass import Property, evented_dataclass
def only_2D_3D(ndisplay):
if ndisplay not in (2, 3):
raise ValueError(
f"Invalid number of dimensions to be displayed {ndisplay}"
f" must be either 2 or 3."
)
else:
return ndisplay
def reorder_after_dim_reduction(order):
"""Ensure current dimension order is preserved after dims are dropped.
Parameters
----------
order : tuple
The data to reorder.
Returns
-------
arr : tuple
The original array with the unneeded dimension
thrown away.
"""
arr = np.array(order)
arr[np.argsort(arr)] = range(len(arr))
return tuple(arr.tolist())
def assert_axis_in_bounds(axis: int, ndim: int) -> int:
"""Assert a given value is inside the existing axes of the image.
Returns
-------
axis : int
The axis which was checked for validity.
ndim : int
The dimensionality of the layer.
Raises
------
ValueError
The given axis index is out of bounds.
"""
if axis not in range(-ndim, ndim):
msg = (
f'Axis {axis} not defined for dimensionality {ndim}. '
f'Must be in [{-ndim}, {ndim}).'
)
raise ValueError(msg)
return axis % ndim
@evented_dataclass
class Dims:
"""Dimensions object modeling slicing and displaying.
Parameters
----------
ndim : int
Number of dimensions.
ndisplay : int
Number of displayed dimensions.
last_used : int
Dimension which was last used.
range : tuple of 3-tuple of float
List of tuples (min, max, step), one for each dimension. In a world
coordinates space.
current_step : tuple of int
Tuple the slider position for each dims slider, in slider coordinates.
order : tuple of int
Tuple of ordering the dimensions, where the last dimensions are rendered.
axis_labels : tuple of str
Tuple of labels for each dimension.
Attributes
----------
ndim : int
Number of dimensions.
ndisplay : int
Number of displayed dimensions.
last_used : int
Dimension which was last used.
range : tuple of 3-tuple of float
List of tuples (min, max, step), one for each dimension. In a world
coordinates space.
current_step : tuple of int
Tuple the slider position for each dims slider, in slider coordinates.
order : tuple of int
Tuple of ordering the dimensions, where the last dimensions are rendered.
axis_labels : tuple of str
Tuple of labels for each dimension.
nsteps : tuple of int
Number of steps available to each slider. These are calculated from
the ``range``.
point : tuple of float
List of floats setting the current value of the range slider when in
POINT mode, one for each dimension. In a world coordinates space. These
are calculated from the ``current_step`` and ``range``.
displayed : tuple of int
List of dimensions that are displayed. These are calculated from the
``order`` and ``ndisplay``.
not_displayed : tuple of int
List of dimensions that are not displayed. These are calculated from the
``order`` and ``ndisplay``.
displayed_order : tuple of int
Order of only displayed dimensions. These are calculated from the
``displayed`` dimensions.
"""
ndim: int = 2
ndisplay: Property[int, None, only_2D_3D] = 2
last_used: int = 0
range: Property[Tuple, None, tuple] = ()
current_step: Property[Tuple, None, tuple] = ()
order: Property[Tuple, None, tuple] = ()
axis_labels: Property[Tuple, None, tuple] = ()
_scroll_progress: ClassVar[int] = 0
def __post_init__(self):
max_ndim = max(
self.ndim,
self.ndisplay,
len(self.axis_labels),
len(self.order),
len(self.range),
len(self.current_step),
)
self._on_ndim_set(max_ndim)
def _on_ndim_set(self, ndim):
"""Adjust lengths of other attributes based on number of dimensions."""
# Gets called after the ndim attribute is set.
if len(self.range) < ndim:
# Range value is (min, max, step) for the entire slider
self._range = ((0, 2, 1),) * (ndim - len(self.range)) + self.range
elif len(self.range) > ndim:
self._range = self.range[-ndim:]
if len(self.current_step) < ndim:
self._current_step = (0,) * (
ndim - len(self.current_step)
) + self.current_step
elif len(self.current_step) > ndim:
self._current_step = self.current_step[-ndim:]
if len(self.order) < ndim:
self._order = tuple(range(ndim - len(self.order))) + tuple(
o + ndim - len(self.order) for o in self.order
)
elif len(self.order) > ndim:
self._order = reorder_after_dim_reduction(self.order[-ndim:])
if len(self.axis_labels) < ndim:
# Append new "default" labels to existing ones
if self.axis_labels == tuple(
map(str, range(len(self.axis_labels)))
):
self._axis_labels = tuple(map(str, range(ndim)))
else:
self._axis_labels = (
tuple(map(str, range(ndim - len(self.axis_labels))))
+ self.axis_labels
)
elif len(self.axis_labels) > ndim:
self._axis_labels = self.axis_labels[-ndim:]
# Normally we wouldn't need to set the `ndim` here too
# but this lets us use the method in the post-init too
self._ndim = ndim
def _on_order_set(self, order):
"""Check the values of the order attribute."""
if not set(order) == set(range(self.ndim)):
raise ValueError(
f"Invalid ordering {order} for {self.ndim} dimensions"
)
def _on_axis_labels_set(self, axis_labels):
"""Check the length of the axis_labels attribute."""
if not len(axis_labels) == self.ndim:
raise ValueError(
f"Invalid number of axis labels {len(axis_labels)} for {self.ndim} dimensions"
)
def _on_range_set(self, range_var):
"""Check the length of the range attribute."""
if not len(range_var) == self.ndim:
raise ValueError(
f"Invalid length range {len(range_var)} for {self.ndim} dimensions"
)
@property
def nsteps(self):
"""Tuple of int: Number of slider steps for each dimension."""
return tuple(
int((max_val - min_val) // step_size) + 1
for min_val, max_val, step_size in self.range
)
@property
def point(self):
"""Tuple of float: Value of each dimension."""
# The point value is computed from the range and current_step
point = tuple(
min_val + step_size * value
for (min_val, max_val, step_size), value in zip(
self.range, self.current_step
)
)
return point
@property
def displayed(self):
"""Tuple: Dimensions that are displayed."""
return self.order[-self.ndisplay :]
@property
def not_displayed(self):
"""Tuple: Dimensions that are not displayed."""
return self.order[: -self.ndisplay]
@property
def displayed_order(self):
"""Tuple: Order of only displayed dimensions."""
order = np.array(self.displayed)
order[np.argsort(order)] = list(range(len(order)))
return tuple(order)
def set_range(self, axis: int, _range: Sequence[Union[int, float]]):
"""Sets the range (min, max, step) for a given dimension.
Parameters
----------
axis : int
Dimension index.
_range : tuple
Range specified as (min, max, step).
"""
axis = assert_axis_in_bounds(axis, self.ndim)
if self.range[axis] != _range:
full_range = list(self.range)
full_range[axis] = _range
self.range = full_range
self.last_used = axis
def set_point(self, axis: int, value: Union[int, float]):
"""Sets point to slice dimension in world coordinates.
The desired point gets transformed into an integer step
of the slider and stored in the current_step.
Parameters
----------
axis : int
Dimension index.
value : int or float
Value of the point.
"""
axis = assert_axis_in_bounds(axis, self.ndim)
(min_val, max_val, step_size) = self._range[axis]
raw_step = (value - min_val) / step_size
self.set_current_step(axis, raw_step)
def set_current_step(self, axis: int, value: int):
"""Sets the slider step at which to slice this dimension.
The position of the slider in world coordinates gets
calculated from the current_step of the slider.
Parameters
----------
axis : int
Dimension index.
value : int or float
Value of the point.
"""
axis = assert_axis_in_bounds(axis, self.ndim)
step = np.round(np.clip(value, 0, self.nsteps[axis] - 1)).astype(int)
if self._current_step[axis] != step:
full_current_step = list(self.current_step)
full_current_step[axis] = step
self.current_step = full_current_step
self.last_used = axis
def set_axis_label(self, axis: int, label: str):
"""Sets a new axis label for the given axis.
Parameters
----------
axis : int
Dimension index
label : str
Given label
"""
axis = assert_axis_in_bounds(axis, self.ndim)
if self.axis_labels[axis] != str(label):
full_axis_labels = list(self.axis_labels)
full_axis_labels[axis] = str(label)
self.axis_labels = full_axis_labels
self.last_used = axis
def reset(self):
"""Reset dims values to initial states."""
# Don't reset axis labels
self.range = ((0, 2, 1),) * self.ndim
self.current_step = (0,) * self.ndim
self.order = tuple(range(self.ndim))
def _increment_dims_right(self, axis: int = None):
"""Increment dimensions to the right along given axis, or last used axis if None
Parameters
----------
axis : int, optional
Axis along which to increment dims, by default None
"""
if axis is None:
axis = self.last_used
self.set_current_step(axis, self.current_step[axis] + 1)
def _increment_dims_left(self, axis: int = None):
"""Increment dimensions to the left along given axis, or last used axis if None
Parameters
----------
axis : int, optional
Axis along which to increment dims, by default None
"""
if axis is None:
axis = self.last_used
self.set_current_step(axis, self.current_step[axis] - 1)
def _focus_up(self):
"""Shift focused dimension slider to be the next slider above."""
sliders = [d for d in self.not_displayed if self.nsteps[d] > 1]
if len(sliders) == 0:
return
index = (sliders.index(self.last_used) + 1) % len(sliders)
self.last_used = sliders[index]
def _focus_down(self):
"""Shift focused dimension slider to be the next slider bellow."""
sliders = [d for d in self.not_displayed if self.nsteps[d] > 1]
if len(sliders) == 0:
return
index = (sliders.index(self.last_used) - 1) % len(sliders)
self.last_used = sliders[index]
def _roll(self):
"""Roll order of dimensions for display."""
order = np.array(self.order)
nsteps = np.array(self.nsteps)
order[nsteps > 1] = np.roll(order[nsteps > 1], 1)
self.order = order
def _transpose(self):
"""Transpose displayed dimensions."""
order = list(self.order)
order[-2], order[-1] = order[-1], order[-2]
self.order = order
| [
"numpy.argsort",
"numpy.array",
"numpy.clip",
"numpy.roll"
] | [((707, 722), 'numpy.array', 'np.array', (['order'], {}), '(order)\n', (715, 722), True, 'import numpy as np\n'), ((731, 746), 'numpy.argsort', 'np.argsort', (['arr'], {}), '(arr)\n', (741, 746), True, 'import numpy as np\n'), ((7727, 7751), 'numpy.array', 'np.array', (['self.displayed'], {}), '(self.displayed)\n', (7735, 7751), True, 'import numpy as np\n'), ((12108, 12128), 'numpy.array', 'np.array', (['self.order'], {}), '(self.order)\n', (12116, 12128), True, 'import numpy as np\n'), ((12146, 12167), 'numpy.array', 'np.array', (['self.nsteps'], {}), '(self.nsteps)\n', (12154, 12167), True, 'import numpy as np\n'), ((12196, 12225), 'numpy.roll', 'np.roll', (['order[nsteps > 1]', '(1)'], {}), '(order[nsteps > 1], 1)\n', (12203, 12225), True, 'import numpy as np\n'), ((7766, 7783), 'numpy.argsort', 'np.argsort', (['order'], {}), '(order)\n', (7776, 7783), True, 'import numpy as np\n'), ((9479, 9519), 'numpy.clip', 'np.clip', (['value', '(0)', '(self.nsteps[axis] - 1)'], {}), '(value, 0, self.nsteps[axis] - 1)\n', (9486, 9519), True, 'import numpy as np\n')] |
""" Definition of power supply interfacing commands. """
import time
import numpy as np
from mqlab.connections import Instrument
class PowerSupply(Instrument):
def __init__(self, max_current_A, **kwargs):
""" Init for power supply including a safety routine to limit accidental setting of erroneously high currents. """
# Store current limit
self.max_current_A = max_current_A
# Init main MQ instrument class
super().__init__(**kwargs)
def ramp_down(self):
""" Slowly (over a few seconds) ramp the power down, e.g. to protect diodes. """
self._set_current_before_ramp = self.get_current()
for current in np.linspace(self._set_current_before_ramp, 0, 6):
self.set_current(current)
time.sleep(0.25)
def ramp_back_up(self):
""" Ramp back up to current before ramp_down was fired. """
if not hasattr(self, '_set_current_before_ramp'):
raise ValueError('This command only works after a ramp_down command execution.')
for current in np.linspace(0, self._set_current_before_ramp, 6):
self.set_current(current)
time.sleep(0.25)
class HP6653A(PowerSupply):
""" Interfacing code for HP6653A power supply. """
def set_current(self, current):
""" Set the current limit to the user specified current (A) maintaining all other settings. """
if current > self.max_current_A:
raise ValueError('Entered current value [{} A] is above the device max current limit [{} A]. Check input and raise the current limit when initialising the device connection if needed.'.format(current, self.max_current_A))
else:
self.send('CURR {:.3f}'.format(current))
def set_voltage(self, voltage):
""" Set the voltage limit to the user specified voltage (V) maintaining all other settings. """
self.send('VOLT {:.3f}'.format(voltage))
def get_current(self):
""" Return current [A]. """
return self.query('CURR?', dtype=float)
def get_voltage(self):
""" Return voltage [V]. """
return self.query('VOLT?', dtype=float)
def set_output_off(self):
""" Disable output. """
self.send('OUTP OFF')
def set_output_on(self):
""" Enable output. """
self.send('OUTP ON')
# Create virtual entities for current and voltage so they can be simply accessed as variables (e.g. self.current=1) rather than using setters / getters
current = property(get_current, set_current)
voltage = property(get_voltage, set_voltage)
class Newport5600(PowerSupply):
""" Interfacing code for Newport 5600 diode driver. """
def set_current(self, current):
""" Set the current limit to the user specified current (A) maintaining all other settings. """
if current > self.max_current_A:
raise ValueError('Entered current value [{} A] is above the device max current limit [{} A]. Check input and raise the current limit when initialising the device connection if needed.'.format(current, self.max_current_A))
else:
self.send('LASer:LDI {:.3f}'.format(current))
def set_voltage(self, voltage):
""" Set the voltage limit to the user specified voltage (V) maintaining all other settings. """
self.send('LASer:LDV {:.3f}'.format(voltage))
def get_current(self):
""" Return current [A]. """
return self.query('LASer:LDI?', dtype=float)
def get_voltage(self):
""" Return voltage [V]. """
return self.query('LASer:LDV?', dtype=float)
def set_output_off(self):
""" Disable output. """
self.send('LASer:OUTput 0')
def set_output_on(self):
""" Enable output. """
self.send('LASer:OUTput 1')
self._turn_on_time = time.time() # Used for recording time power has been on (for ZBLAN fibre laser monitoring)
def print_on_time(self):
elapsed_time = time.time() - self._turn_on_time
m, s = divmod(elapsed_time, 60)
formatted_time = '{:d}m {:0.1f}s'.format(int(m), s)
print('Elapsed time since turn on: {}'.format(formatted_time))
def current_kick(self, low_value, high_value, delay=1):
""" Set current to low value for a given delay, then jump back up to high value - useful for kick-starting mode-locking! """
self.set_current(low_value)
time.sleep(delay)
self.set_current(high_value)
def multiple_current_kicks(self, low_value, high_values, delay=1, delay_between_kicks=5):
for high_value in high_values:
self.set_current(low_value)
time.sleep(delay)
print('{:.2f} A'.format(high_value))
self.set_current(high_value)
time.sleep(delay_between_kicks)
# Create virtual entities for current and voltage so they can be simply accessed as variables (e.g. self.current=1) rather than using setters / getters
current = property(get_current, set_current)
voltage = property(get_voltage, set_voltage)
| [
"time.sleep",
"numpy.linspace",
"time.time"
] | [((702, 750), 'numpy.linspace', 'np.linspace', (['self._set_current_before_ramp', '(0)', '(6)'], {}), '(self._set_current_before_ramp, 0, 6)\n', (713, 750), True, 'import numpy as np\n'), ((1100, 1148), 'numpy.linspace', 'np.linspace', (['(0)', 'self._set_current_before_ramp', '(6)'], {}), '(0, self._set_current_before_ramp, 6)\n', (1111, 1148), True, 'import numpy as np\n'), ((3942, 3953), 'time.time', 'time.time', ([], {}), '()\n', (3951, 3953), False, 'import time\n'), ((4540, 4557), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (4550, 4557), False, 'import time\n'), ((804, 820), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (814, 820), False, 'import time\n'), ((1202, 1218), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (1212, 1218), False, 'import time\n'), ((4090, 4101), 'time.time', 'time.time', ([], {}), '()\n', (4099, 4101), False, 'import time\n'), ((4787, 4804), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (4797, 4804), False, 'import time\n'), ((4910, 4941), 'time.sleep', 'time.sleep', (['delay_between_kicks'], {}), '(delay_between_kicks)\n', (4920, 4941), False, 'import time\n')] |
from helper_tool import ConfigSemanticKITTI as cfg
from RandLANet import Network, compute_loss, compute_acc, IoUCalculator
from semantic_kitti_dataset import SemanticKITTI
import numpy as np
import os, argparse
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from datetime import datetime
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', default='output/checkpoint.tar', help='Model checkpoint path [default: None]')
parser.add_argument('--log_dir', default='output', help='Dump dir to save model checkpoint [default: log]')
parser.add_argument('--max_epoch', type=int, default=400, help='Epoch to run [default: 180]')
parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 8]')
FLAGS = parser.parse_args()
################################################# log #################################################
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'a')
def log_string(out_str):
LOG_FOUT.write(out_str + '\n')
LOG_FOUT.flush()
print(out_str)
################################################# dataset #################################################
# Init datasets and dataloaders
def my_worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
# Create Dataset and Dataloader
TRAIN_DATASET = SemanticKITTI('training')
TEST_DATASET = SemanticKITTI('validation')
print(len(TRAIN_DATASET), len(TEST_DATASET))
TRAIN_DATALOADER = DataLoader(TRAIN_DATASET, batch_size=FLAGS.batch_size, shuffle=True, num_workers=20, worker_init_fn=my_worker_init_fn, collate_fn=TRAIN_DATASET.collate_fn)
TEST_DATALOADER = DataLoader(TEST_DATASET, batch_size=FLAGS.batch_size, shuffle=True, num_workers=20, worker_init_fn=my_worker_init_fn, collate_fn=TEST_DATASET.collate_fn)
print(len(TRAIN_DATALOADER), len(TEST_DATALOADER))
################################################# network #################################################
CUDA_VISIBLE_DEVICES=2
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = Network(cfg)
net.to(device)
# Load the Adam optimizer
optimizer = optim.Adam(net.parameters(), lr=cfg.learning_rate)
# Load checkpoint if there is any
it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler`
start_epoch = 0
CHECKPOINT_PATH = FLAGS.checkpoint_path
if CHECKPOINT_PATH is not None and os.path.isfile(CHECKPOINT_PATH):
checkpoint = torch.load(CHECKPOINT_PATH)
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']
log_string("-> loaded checkpoint %s (epoch: %d)"%(CHECKPOINT_PATH, start_epoch))
# Multi GPU
if torch.cuda.device_count() > 1:
log_string("Let's use %d GPUs!" % (torch.cuda.device_count()))
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
net = nn.DataParallel(net)
################################################# training functions ###########################################
def adjust_learning_rate(optimizer, epoch):
lr = optimizer.param_groups[0]['lr']
lr = lr * cfg.lr_decays[epoch]
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train_one_epoch():
stat_dict = {} # collect statistics
adjust_learning_rate(optimizer, EPOCH_CNT)
net.train() # set model to training mode
iou_calc = IoUCalculator(cfg)
for batch_idx, batch_data in enumerate(TRAIN_DATALOADER):
for key in batch_data:
if type(batch_data[key]) is list:
for i in range(len(batch_data[key])):
batch_data[key][i] = batch_data[key][i].cuda()
else:
batch_data[key] = batch_data[key].cuda()
# Forward pass
optimizer.zero_grad()
end_points = net(batch_data)
loss, end_points = compute_loss(end_points, cfg)
loss.backward()
optimizer.step()
acc, end_points = compute_acc(end_points)
iou_calc.add_data(end_points)
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'iou' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_interval = 10
if (batch_idx + 1) % batch_interval == 0:
log_string(' ---- batch: %03d ----' % (batch_idx + 1))
# TRAIN_VISUALIZER.log_scalars({key:stat_dict[key]/batch_interval for key in stat_dict},
# (EPOCH_CNT*len(TRAIN_DATALOADER)+batch_idx)*BATCH_SIZE)
for key in sorted(stat_dict.keys()):
log_string('mean %s: %f' % (key, stat_dict[key] / batch_interval))
stat_dict[key] = 0
mean_iou, iou_list = iou_calc.compute_iou()
log_string('mean IoU:{:.1f}'.format(mean_iou * 100))
s = 'IoU:'
for iou_tmp in iou_list:
s += '{:5.2f} '.format(100 * iou_tmp)
log_string(s)
def evaluate_one_epoch():
stat_dict = {} # collect statistics
net.eval() # set model to eval mode (for bn and dp)
iou_calc = IoUCalculator(cfg)
for batch_idx, batch_data in enumerate(TEST_DATALOADER):
for key in batch_data:
if type(batch_data[key]) is list:
for i in range(len(batch_data[key])):
batch_data[key][i] = batch_data[key][i].cuda()
else:
batch_data[key] = batch_data[key].cuda()
# Forward pass
with torch.no_grad():
end_points = net(batch_data)
loss, end_points = compute_loss(end_points, cfg)
acc, end_points = compute_acc(end_points)
iou_calc.add_data(end_points)
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'iou' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_interval = 10
if (batch_idx + 1) % batch_interval == 0:
log_string(' ---- batch: %03d ----' % (batch_idx + 1))
for key in sorted(stat_dict.keys()):
log_string('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))
mean_iou, iou_list = iou_calc.compute_iou()
log_string('mean IoU:{:.1f}'.format(mean_iou * 100))
s = 'IoU:'
for iou_tmp in iou_list:
s += '{:5.2f} '.format(100 * iou_tmp)
log_string(s)
def train(start_epoch):
global EPOCH_CNT
loss = 0
for epoch in range(start_epoch, FLAGS.max_epoch):
EPOCH_CNT = epoch
log_string('**** EPOCH %03d ****' % (epoch))
log_string(str(datetime.now()))
np.random.seed()
train_one_epoch()
if EPOCH_CNT == 0 or EPOCH_CNT % 10 == 9: # Eval every 10 epochs
log_string('**** EVAL EPOCH %03d START****' % (epoch))
evaluate_one_epoch()
log_string('**** EVAL EPOCH %03d END****' % (epoch))
# Save checkpoint
save_dict = {'epoch': epoch+1, # after training one epoch, the start_epoch should be epoch+1
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}
try: # with nn.DataParallel() the net is added as a submodule of DataParallel
save_dict['model_state_dict'] = net.module.state_dict()
except:
save_dict['model_state_dict'] = net.state_dict()
torch.save(save_dict, os.path.join(LOG_DIR, 'checkpoint.tar'))
if __name__ == '__main__':
train(start_epoch)
| [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.cuda.device_count",
"os.path.isfile",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"RandLANet.IoUCalculator",
"datetime.datetime.now",
"RandLANet.compute_acc",
"torch.cuda.... | [((356, 381), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (379, 381), False, 'import os, argparse\n'), ((1475, 1500), 'semantic_kitti_dataset.SemanticKITTI', 'SemanticKITTI', (['"""training"""'], {}), "('training')\n", (1488, 1500), False, 'from semantic_kitti_dataset import SemanticKITTI\n'), ((1516, 1543), 'semantic_kitti_dataset.SemanticKITTI', 'SemanticKITTI', (['"""validation"""'], {}), "('validation')\n", (1529, 1543), False, 'from semantic_kitti_dataset import SemanticKITTI\n'), ((1608, 1772), 'torch.utils.data.DataLoader', 'DataLoader', (['TRAIN_DATASET'], {'batch_size': 'FLAGS.batch_size', 'shuffle': '(True)', 'num_workers': '(20)', 'worker_init_fn': 'my_worker_init_fn', 'collate_fn': 'TRAIN_DATASET.collate_fn'}), '(TRAIN_DATASET, batch_size=FLAGS.batch_size, shuffle=True,\n num_workers=20, worker_init_fn=my_worker_init_fn, collate_fn=\n TRAIN_DATASET.collate_fn)\n', (1618, 1772), False, 'from torch.utils.data import DataLoader\n'), ((1782, 1944), 'torch.utils.data.DataLoader', 'DataLoader', (['TEST_DATASET'], {'batch_size': 'FLAGS.batch_size', 'shuffle': '(True)', 'num_workers': '(20)', 'worker_init_fn': 'my_worker_init_fn', 'collate_fn': 'TEST_DATASET.collate_fn'}), '(TEST_DATASET, batch_size=FLAGS.batch_size, shuffle=True,\n num_workers=20, worker_init_fn=my_worker_init_fn, collate_fn=\n TEST_DATASET.collate_fn)\n', (1792, 1944), False, 'from torch.utils.data import DataLoader\n'), ((2202, 2214), 'RandLANet.Network', 'Network', (['cfg'], {}), '(cfg)\n', (2209, 2214), False, 'from RandLANet import Network, compute_loss, compute_acc, IoUCalculator\n'), ((977, 1000), 'os.path.exists', 'os.path.exists', (['LOG_DIR'], {}), '(LOG_DIR)\n', (991, 1000), False, 'import os, argparse\n'), ((1006, 1023), 'os.mkdir', 'os.mkdir', (['LOG_DIR'], {}), '(LOG_DIR)\n', (1014, 1023), False, 'import os, argparse\n'), ((1040, 1078), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""log_train.txt"""'], {}), "(LOG_DIR, 'log_train.txt')\n", (1052, 1078), False, 'import os, argparse\n'), ((2521, 2552), 'os.path.isfile', 'os.path.isfile', (['CHECKPOINT_PATH'], {}), '(CHECKPOINT_PATH)\n', (2535, 2552), False, 'import os, argparse\n'), ((2571, 2598), 'torch.load', 'torch.load', (['CHECKPOINT_PATH'], {}), '(CHECKPOINT_PATH)\n', (2581, 2598), False, 'import torch\n'), ((2860, 2885), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2883, 2885), False, 'import torch\n'), ((3037, 3057), 'torch.nn.DataParallel', 'nn.DataParallel', (['net'], {}), '(net)\n', (3052, 3057), True, 'import torch.nn as nn\n'), ((3553, 3571), 'RandLANet.IoUCalculator', 'IoUCalculator', (['cfg'], {}), '(cfg)\n', (3566, 3571), False, 'from RandLANet import Network, compute_loss, compute_acc, IoUCalculator\n'), ((5291, 5309), 'RandLANet.IoUCalculator', 'IoUCalculator', (['cfg'], {}), '(cfg)\n', (5304, 5309), False, 'from RandLANet import Network, compute_loss, compute_acc, IoUCalculator\n'), ((2158, 2183), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2181, 2183), False, 'import torch\n'), ((4026, 4055), 'RandLANet.compute_loss', 'compute_loss', (['end_points', 'cfg'], {}), '(end_points, cfg)\n', (4038, 4055), False, 'from RandLANet import Network, compute_loss, compute_acc, IoUCalculator\n'), ((4132, 4155), 'RandLANet.compute_acc', 'compute_acc', (['end_points'], {}), '(end_points)\n', (4143, 4155), False, 'from RandLANet import Network, compute_loss, compute_acc, IoUCalculator\n'), ((5767, 5796), 'RandLANet.compute_loss', 'compute_loss', (['end_points', 'cfg'], {}), '(end_points, cfg)\n', (5779, 5796), False, 'from RandLANet import Network, compute_loss, compute_acc, IoUCalculator\n'), ((5824, 5847), 'RandLANet.compute_acc', 'compute_acc', (['end_points'], {}), '(end_points)\n', (5835, 5847), False, 'from RandLANet import Network, compute_loss, compute_acc, IoUCalculator\n'), ((6869, 6885), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (6883, 6885), True, 'import numpy as np\n'), ((2930, 2955), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2953, 2955), False, 'import torch\n'), ((5681, 5696), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5694, 5696), False, 'import torch\n'), ((7663, 7702), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""checkpoint.tar"""'], {}), "(LOG_DIR, 'checkpoint.tar')\n", (7675, 7702), False, 'import os, argparse\n'), ((6843, 6857), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6855, 6857), False, 'from datetime import datetime\n'), ((1385, 1406), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (1404, 1406), True, 'import numpy as np\n')] |
import functools
import os
import random
from typing import List
import numpy
i = 7
j = 7
k = 5
ij = i + j
ijk = i + j + k
# A - 0, B - 1, C - 2
P = numpy.array(
[[0, i / ij, j / ij],
[i / ijk, k / ijk, j / ijk],
[j / ij, i / ij, 0]]
)
print("P")
print(P)
w, v = numpy.linalg.eig(P.transpose())
print(v)
v = -1 * v[:, 0]
print("v:", w[0], v)
s = functools.reduce(lambda x, y: x + y, v)
v = v / s
print("v:", functools.reduce(lambda x, y: x + y, v), v)
T = numpy.array([v, v, v])
Z = numpy.linalg.inv(numpy.identity(3) - (P - T))
print("Z", Z)
I = numpy.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
D = numpy.diag([1 / v[0], 1 / v[1], 1 / v[2]])
ZDiag = numpy.diag(Z.diagonal())
M = (numpy.identity(3) - Z + I.dot(ZDiag)).dot(D)
print(M)
# simulation
def interval_choose(x: float, chanses) -> int:
accum = 0
for i, chance in enumerate(chanses):
accum += chance
if x < accum:
return i
raise Exception("chances must be stohastic")
simulations: int = 1_000
iterations: int = 1_000
V = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
N = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
randoms: List[float] = list()
for _ in range(simulations * iterations):
randoms.append(random.random())
for sim_num in range(simulations):
if sim_num % 1000 == 0:
print(sim_num)
state: int = 0
steps: List[int] = [0, 0, 0]
for iter_num in range(1000 * sim_num, 1000 * sim_num + iterations):
for i in range(3):
if state == i or steps[i] != 0:
steps[i] += 1
state = interval_choose(randoms[iter_num], P[state])
for i in range(3):
if steps[i] != 0:
V[state, i] += steps[i]
N[state, i] += 1
steps[state] = 0
A = numpy.empty((3, 3))
for i in range(3):
for j in range(3):
A[i, j] = V[i, j] / N[i, j]
print(A)
def matrix_as_table(m: numpy.ndarray, left: List[str], top: List[str]) -> str:
w, h = m.shape
output = "| \\ |"
for j in range(w):
output += f" {top[j]} |"
output += "\n|" + ("---|" * (w + 1)) + "\n"
for i in range(h):
output += f"| {left[i]} |"
for j in range(w):
output += f" {m[i,j]:5.4} |"
output += "\n"
return output
def vector_as_table(v:numpy.ndarray, top: List[str]) -> str:
[w] = v.shape
output = "|"
for j in range(w):
output += f" {top[j]} |"
output += "\n|" + ("---|" * w) + "\n|"
for j in range(w):
output += f" {v[j]:5.4} |"
output += "\n"
return output
c = ["A", "B", "C"]
try:
os.remove("./report.md")
except:
pass
finally:
with open("./schema.md", "r") as income, open("./report.md", "w") as outcome:
content = income.read()
outcome.write(content.format(simulations, iterations, matrix_as_table(P, c, c), vector_as_table(v, c), matrix_as_table(M, c, c), matrix_as_table(A, c,c))) | [
"os.remove",
"numpy.empty",
"numpy.identity",
"random.random",
"numpy.array",
"functools.reduce",
"numpy.diag"
] | [((152, 240), 'numpy.array', 'numpy.array', (['[[0, i / ij, j / ij], [i / ijk, k / ijk, j / ijk], [j / ij, i / ij, 0]]'], {}), '([[0, i / ij, j / ij], [i / ijk, k / ijk, j / ijk], [j / ij, i /\n ij, 0]])\n', (163, 240), False, 'import numpy\n'), ((364, 403), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x + y)', 'v'], {}), '(lambda x, y: x + y, v)\n', (380, 403), False, 'import functools\n'), ((475, 497), 'numpy.array', 'numpy.array', (['[v, v, v]'], {}), '([v, v, v])\n', (486, 497), False, 'import numpy\n'), ((567, 613), 'numpy.array', 'numpy.array', (['[[1, 1, 1], [1, 1, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\n', (578, 613), False, 'import numpy\n'), ((618, 660), 'numpy.diag', 'numpy.diag', (['[1 / v[0], 1 / v[1], 1 / v[2]]'], {}), '([1 / v[0], 1 / v[1], 1 / v[2]])\n', (628, 660), False, 'import numpy\n'), ((1044, 1090), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (1055, 1090), False, 'import numpy\n'), ((1095, 1141), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (1106, 1141), False, 'import numpy\n'), ((1784, 1803), 'numpy.empty', 'numpy.empty', (['(3, 3)'], {}), '((3, 3))\n', (1795, 1803), False, 'import numpy\n'), ((426, 465), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x + y)', 'v'], {}), '(lambda x, y: x + y, v)\n', (442, 465), False, 'import functools\n'), ((2608, 2632), 'os.remove', 'os.remove', (['"""./report.md"""'], {}), "('./report.md')\n", (2617, 2632), False, 'import os\n'), ((519, 536), 'numpy.identity', 'numpy.identity', (['(3)'], {}), '(3)\n', (533, 536), False, 'import numpy\n'), ((1233, 1248), 'random.random', 'random.random', ([], {}), '()\n', (1246, 1248), False, 'import random\n'), ((699, 716), 'numpy.identity', 'numpy.identity', (['(3)'], {}), '(3)\n', (713, 716), False, 'import numpy\n')] |
import numpy as np
from badgr.utils.np_utils import imresize
from badgr.utils.python_utils import AttrDict
class EnvSpec(object):
def __init__(self, names_shapes_limits_dtypes):
names_shapes_limits_dtypes = list(names_shapes_limits_dtypes)
names_shapes_limits_dtypes += [('done', (1,), (0, 1), np.bool)]
self._names_to_shapes = AttrDict()
self._names_to_limits = AttrDict()
self._names_to_dtypes = AttrDict()
for name, shape, limit, dtype in names_shapes_limits_dtypes:
self._names_to_shapes.add_recursive(name, shape)
self._names_to_limits.add_recursive(name, limit)
self._names_to_dtypes.add_recursive(name, dtype)
@property
def observation_names(self):
raise NotImplementedError
@property
def output_observation_names(self):
return self.observation_names
@property
def action_names(self):
raise NotImplementedError
@property
def names(self):
return self.observation_names + self.action_names
@property
def names_to_shapes(self):
return self._names_to_shapes
@property
def names_to_limits(self):
return self._names_to_limits
@property
def names_to_dtypes(self):
return self._names_to_dtypes
def dims(self, names):
return np.array([np.sum(self.names_to_shapes.get_recursive(name)) for name in names])
def dim(self, names):
return np.sum(self.dims(names))
def normalize(self, inputs):
"""
:param inputs (AttrDict):
:return: AttrDict
"""
inputs_normalized = AttrDict()
for key, value in inputs.get_leaf_items():
lower, upper = self.names_to_limits.get_recursive(key)
lower, upper = np.array(lower), np.array(upper)
mean = 0.5 * (lower + upper)
std = 0.5 * (upper - lower)
value_normalized = (value - mean) / std
inputs_normalized.add_recursive(key, value_normalized)
return inputs_normalized
def denormalize(self, inputs):
"""
:param inputs (AttrDict):
:return: AttrDict
"""
inputs_denormalized = AttrDict()
for key, value in inputs.get_leaf_items():
lower, upper = self.names_to_limits.get_recursive(key)
lower, upper = np.array(lower), np.array(upper)
mean = 0.5 * (lower + upper)
std = 0.5 * (upper - lower)
value_denormalized = value * std + mean
inputs_denormalized.add_recursive(key, value_denormalized)
return inputs_denormalized
def process_image(self, name, image):
"""
Default behavior: resize the image
"""
if len(image.shape) == 4:
return np.array([self.process_image(name, im_i) for im_i in image])
return imresize(image, self.names_to_shapes.get_recursive(name))
class Env(object):
def __init__(self, env_spec, params):
self.spec = env_spec
def step(self, get_action):
raise NotImplementedError
return obs, goal, done
def reset(self):
raise NotImplementedError
return obs, goal
| [
"badgr.utils.python_utils.AttrDict",
"numpy.array"
] | [((361, 371), 'badgr.utils.python_utils.AttrDict', 'AttrDict', ([], {}), '()\n', (369, 371), False, 'from badgr.utils.python_utils import AttrDict\n'), ((404, 414), 'badgr.utils.python_utils.AttrDict', 'AttrDict', ([], {}), '()\n', (412, 414), False, 'from badgr.utils.python_utils import AttrDict\n'), ((447, 457), 'badgr.utils.python_utils.AttrDict', 'AttrDict', ([], {}), '()\n', (455, 457), False, 'from badgr.utils.python_utils import AttrDict\n'), ((1640, 1650), 'badgr.utils.python_utils.AttrDict', 'AttrDict', ([], {}), '()\n', (1648, 1650), False, 'from badgr.utils.python_utils import AttrDict\n'), ((2216, 2226), 'badgr.utils.python_utils.AttrDict', 'AttrDict', ([], {}), '()\n', (2224, 2226), False, 'from badgr.utils.python_utils import AttrDict\n'), ((1797, 1812), 'numpy.array', 'np.array', (['lower'], {}), '(lower)\n', (1805, 1812), True, 'import numpy as np\n'), ((1814, 1829), 'numpy.array', 'np.array', (['upper'], {}), '(upper)\n', (1822, 1829), True, 'import numpy as np\n'), ((2373, 2388), 'numpy.array', 'np.array', (['lower'], {}), '(lower)\n', (2381, 2388), True, 'import numpy as np\n'), ((2390, 2405), 'numpy.array', 'np.array', (['upper'], {}), '(upper)\n', (2398, 2405), True, 'import numpy as np\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import itertools
from pprint import pprint
import numpy as np
from IPython.core.display import display
from ipywidgets import HTML, HBox, widgets, Layout
import matplotlib.pyplot as plt
from pymor.core.config import config
from pymor.discretizers.builtin.gui.matplotlib import MatplotlibPatchAxes, Matplotlib1DAxes
from pymor.vectorarrays.interface import VectorArray
class MPLPlotBase:
def __init__(self, U, grid, codim, legend, bounding_box=None, separate_colorbars=False, columns=2,
separate_plots=False, separate_axes=False):
assert isinstance(U, VectorArray) \
or (isinstance(U, tuple)
and all(isinstance(u, VectorArray) for u in U)
and all(len(u) == len(U[0]) for u in U))
if separate_plots:
self.fig_ids = (U.uid,) if isinstance(U, VectorArray) else tuple(u.uid for u in U)
else:
# using the same id multiple times lets us automagically re-use the same figure
self.fig_ids = (U.uid,) if isinstance(U, VectorArray) else [U[0].uid] * len(U)
self.U = U = (U.to_numpy().astype(np.float64, copy=False),) if isinstance(U, VectorArray) else \
tuple(u.to_numpy().astype(np.float64, copy=False) for u in U)
if grid.dim == 1 and len(U[0]) > 1 and not separate_plots:
raise NotImplementedError('Plotting of VectorArrays with length > 1 is only available with '
'`separate_plots=True`')
if not config.HAVE_MATPLOTLIB:
raise ImportError('cannot visualize: import of matplotlib failed')
if not config.HAVE_IPYWIDGETS and len(U[0]) > 1:
raise ImportError('cannot visualize: import of ipywidgets failed')
self.legend = (legend,) if isinstance(legend, str) else legend
assert self.legend is None or isinstance(self.legend, tuple) and len(self.legend) == len(U)
self._set_limits(U)
self.plots = []
# this _supposed_ to let animations run in sync
sync_timer = None
do_animation = not separate_axes and len(U[0]) > 1
if separate_plots:
for i, (vmin, vmax, u) in enumerate(zip(self.vmins, self.vmaxs, U)):
figure = plt.figure(self.fig_ids[i])
sync_timer = sync_timer or figure.canvas.new_timer()
if grid.dim == 2:
plot = MatplotlibPatchAxes(U=u, figure=figure, sync_timer=sync_timer, grid=grid, vmin=vmin, vmax=vmax,
bounding_box=bounding_box, codim=codim, columns=columns,
colorbar=separate_colorbars or i == len(U) - 1)
else:
plot = Matplotlib1DAxes(U=u, figure=figure, sync_timer=sync_timer, grid=grid, vmin=vmin, vmax=vmax,
columns=columns, codim=codim, separate_axes=separate_axes)
if self.legend:
plot.ax[0].set_title(self.legend[i])
self.plots.append(plot)
# plt.tight_layout()
else:
figure = plt.figure(self.fig_ids[0])
sync_timer = sync_timer or figure.canvas.new_timer()
if grid.dim == 2:
plot = MatplotlibPatchAxes(U=U, figure=figure, sync_timer=sync_timer, grid=grid, vmin=self.vmins,
vmax=self.vmaxs, bounding_box=bounding_box, codim=codim, columns=columns,
colorbar=True)
else:
plot = Matplotlib1DAxes(U=U, figure=figure, sync_timer=sync_timer, grid=grid, vmin=self.vmins,
vmax=self.vmaxs, columns=columns, codim=codim, separate_axes=separate_axes)
if self.legend:
plot.ax[0].set_title(self.legend[0])
self.plots.append(plot)
if do_animation:
for fig_id in self.fig_ids:
# avoids figure double display
plt.close(fig_id)
html = [p.html for p in self.plots]
template = """<div style="float: left; padding: 10px;">{0}</div>"""
# IPython display system checks for presence and calls this func
self._repr_html_ = lambda : '\n'.join(template.format(a._repr_html_()) for a in html)
else:
self._out = widgets.Output()
with self._out:
plt.show()
# IPython display system checks for presence and calls this func
self._ipython_display_ = self._out._ipython_display_
def visualize_patch(grid, U, bounding_box=([0, 0], [1, 1]), codim=2, title=None, legend=None,
separate_colorbars=False, rescale_colorbars=False, columns=2):
"""Visualize scalar data associated to a two-dimensional |Grid| as a patch plot.
The grid's |ReferenceElement| must be the triangle or square. The data can either
be attached to the faces or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case a subplot is created for each entry of the tuple. The
lengths of all arrays have to agree.
bounding_box
A bounding box in which the grid is contained.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 2).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
separate_colorbars
If `True`, use separate colorbars for each subplot.
rescale_colorbars
If `True`, rescale colorbars to data in each frame.
columns
The number of columns in the visualizer GUI in case multiple plots are displayed
at the same time.
"""
class Plot(MPLPlotBase):
def _set_limits(self, np_U):
if separate_colorbars:
# todo rescaling not set up
if rescale_colorbars:
self.vmins = tuple(np.min(u[0]) for u in np_U)
self.vmaxs = tuple(np.max(u[0]) for u in np_U)
else:
self.vmins = tuple(np.min(u) for u in np_U)
self.vmaxs = tuple(np.max(u) for u in np_U)
else:
if rescale_colorbars:
self.vmins = (min(np.min(u[0]) for u in np_U),) * len(np_U)
self.vmaxs = (max(np.max(u[0]) for u in np_U),) * len(np_U)
else:
self.vmins = (min(np.min(u) for u in np_U),) * len(np_U)
self.vmaxs = (max(np.max(u) for u in np_U),) * len(np_U)
def __init__(self):
super(Plot, self).__init__(U, grid, codim, legend, bounding_box=bounding_box, columns=columns,
separate_colorbars=separate_colorbars, separate_plots=True,
separate_axes=False)
return Plot()
def visualize_matplotlib_1d(grid, U, codim=1, title=None, legend=None, separate_plots=True, separate_axes=False,
columns=2):
"""Visualize scalar data associated to a one-dimensional |Grid| as a plot.
The grid's |ReferenceElement| must be the line. The data can either
be attached to the subintervals or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as an animation in a single axes object or a series of axes, depending on the
`separate_axes` switch. It is also possible to provide a tuple of |VectorArrays|,
in which case several plots are made into one or multiple figures,
depending on the `separate_plots` switch. The lengths of all arrays have to agree.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 1).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
separate_plots
If `True`, use multiple figures to visualize multiple |VectorArrays|.
separate_axes
If `True`, use separate axes for each figure instead of an Animation.
column
Number of columns the subplots are organized in.
"""
class Plot(MPLPlotBase):
def _set_limits(self, np_U):
if separate_plots:
if separate_axes:
self.vmins = tuple(np.min(u) for u in np_U)
self.vmaxs = tuple(np.max(u) for u in np_U)
else:
self.vmins = (min(np.min(u) for u in np_U),) * len(np_U)
self.vmaxs = (max(np.max(u) for u in np_U),) * len(np_U)
else:
self.vmins = min(np.min(u) for u in np_U)
self.vmaxs = max(np.max(u) for u in np_U)
def __init__(self):
super(Plot, self).__init__(U, grid, codim, legend, separate_plots=separate_plots, columns=columns,
separate_axes=separate_axes)
return Plot()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"pymor.discretizers.builtin.gui.matplotlib.MatplotlibPatchAxes",
"matplotlib.pyplot.figure",
"numpy.min",
"ipywidgets.widgets.Output",
"numpy.max",
"pymor.discretizers.builtin.gui.matplotlib.Matplotlib1DAxes"
] | [((3378, 3405), 'matplotlib.pyplot.figure', 'plt.figure', (['self.fig_ids[0]'], {}), '(self.fig_ids[0])\n', (3388, 3405), True, 'import matplotlib.pyplot as plt\n'), ((4640, 4656), 'ipywidgets.widgets.Output', 'widgets.Output', ([], {}), '()\n', (4654, 4656), False, 'from ipywidgets import HTML, HBox, widgets, Layout\n'), ((2475, 2502), 'matplotlib.pyplot.figure', 'plt.figure', (['self.fig_ids[i]'], {}), '(self.fig_ids[i])\n', (2485, 2502), True, 'import matplotlib.pyplot as plt\n'), ((3524, 3712), 'pymor.discretizers.builtin.gui.matplotlib.MatplotlibPatchAxes', 'MatplotlibPatchAxes', ([], {'U': 'U', 'figure': 'figure', 'sync_timer': 'sync_timer', 'grid': 'grid', 'vmin': 'self.vmins', 'vmax': 'self.vmaxs', 'bounding_box': 'bounding_box', 'codim': 'codim', 'columns': 'columns', 'colorbar': '(True)'}), '(U=U, figure=figure, sync_timer=sync_timer, grid=grid,\n vmin=self.vmins, vmax=self.vmaxs, bounding_box=bounding_box, codim=\n codim, columns=columns, colorbar=True)\n', (3543, 3712), False, 'from pymor.discretizers.builtin.gui.matplotlib import MatplotlibPatchAxes, Matplotlib1DAxes\n'), ((3831, 4003), 'pymor.discretizers.builtin.gui.matplotlib.Matplotlib1DAxes', 'Matplotlib1DAxes', ([], {'U': 'U', 'figure': 'figure', 'sync_timer': 'sync_timer', 'grid': 'grid', 'vmin': 'self.vmins', 'vmax': 'self.vmaxs', 'columns': 'columns', 'codim': 'codim', 'separate_axes': 'separate_axes'}), '(U=U, figure=figure, sync_timer=sync_timer, grid=grid, vmin\n =self.vmins, vmax=self.vmaxs, columns=columns, codim=codim,\n separate_axes=separate_axes)\n', (3847, 4003), False, 'from pymor.discretizers.builtin.gui.matplotlib import MatplotlibPatchAxes, Matplotlib1DAxes\n'), ((4281, 4298), 'matplotlib.pyplot.close', 'plt.close', (['fig_id'], {}), '(fig_id)\n', (4290, 4298), True, 'import matplotlib.pyplot as plt\n'), ((4701, 4711), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4709, 4711), True, 'import matplotlib.pyplot as plt\n'), ((2977, 3138), 'pymor.discretizers.builtin.gui.matplotlib.Matplotlib1DAxes', 'Matplotlib1DAxes', ([], {'U': 'u', 'figure': 'figure', 'sync_timer': 'sync_timer', 'grid': 'grid', 'vmin': 'vmin', 'vmax': 'vmax', 'columns': 'columns', 'codim': 'codim', 'separate_axes': 'separate_axes'}), '(U=u, figure=figure, sync_timer=sync_timer, grid=grid, vmin\n =vmin, vmax=vmax, columns=columns, codim=codim, separate_axes=separate_axes\n )\n', (2993, 3138), False, 'from pymor.discretizers.builtin.gui.matplotlib import MatplotlibPatchAxes, Matplotlib1DAxes\n'), ((9430, 9439), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (9436, 9439), True, 'import numpy as np\n'), ((9488, 9497), 'numpy.max', 'np.max', (['u'], {}), '(u)\n', (9494, 9497), True, 'import numpy as np\n'), ((6541, 6553), 'numpy.min', 'np.min', (['u[0]'], {}), '(u[0])\n', (6547, 6553), True, 'import numpy as np\n'), ((6608, 6620), 'numpy.max', 'np.max', (['u[0]'], {}), '(u[0])\n', (6614, 6620), True, 'import numpy as np\n'), ((6697, 6706), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (6703, 6706), True, 'import numpy as np\n'), ((6761, 6770), 'numpy.max', 'np.max', (['u'], {}), '(u)\n', (6767, 6770), True, 'import numpy as np\n'), ((9114, 9123), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (9120, 9123), True, 'import numpy as np\n'), ((9178, 9187), 'numpy.max', 'np.max', (['u'], {}), '(u)\n', (9184, 9187), True, 'import numpy as np\n'), ((6880, 6892), 'numpy.min', 'np.min', (['u[0]'], {}), '(u[0])\n', (6886, 6892), True, 'import numpy as np\n'), ((6960, 6972), 'numpy.max', 'np.max', (['u[0]'], {}), '(u[0])\n', (6966, 6972), True, 'import numpy as np\n'), ((7062, 7071), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (7068, 7071), True, 'import numpy as np\n'), ((7139, 7148), 'numpy.max', 'np.max', (['u'], {}), '(u)\n', (7145, 7148), True, 'import numpy as np\n'), ((9263, 9272), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (9269, 9272), True, 'import numpy as np\n'), ((9340, 9349), 'numpy.max', 'np.max', (['u'], {}), '(u)\n', (9346, 9349), True, 'import numpy as np\n')] |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
##find parent directory and import model
#parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#sys.path.append(parentddir)
from ..agdrift_exe import Agdrift
test = {}
class TestAgdrift(unittest.TestCase):
"""
IEC unit tests.
"""
def setUp(self):
"""
setup the test as needed
e.g. pandas to open agdrift qaqc csv
Read qaqc csv and create pandas DataFrames for inputs and expected outputs
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
pass
def create_agdrift_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty agdrift object
agdrift_empty = Agdrift(df_empty, df_empty)
return agdrift_empty
def test_validate_sim_scenarios(self):
"""
:description determines if user defined scenarios are valid for processing
:param application_method: type of Tier I application method employed
:param aquatic_body_def: type of endpoint of concern (e.g., pond, wetland); implies whether
: endpoint of concern parameters (e.g.,, pond width) are set (i.e., by user or EPA standard)
:param drop_size_*: qualitative description of spray droplet size for aerial & ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of orchard being sprayed
:NOTE we perform an additional validation check related to distances later in the code just before integration
:return
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.out_sim_scenario_chk = pd.Series([], dtype='object')
expected_result = pd.Series([
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid Tier I Aquatic Aerial Scenario',
'Invalid Tier I Aquatic Ground Scenario',
'Invalid Tier I Aquatic Airblast Scenario',
'Invalid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid scenario ecosystem_type',
'Invalid Tier I Aquatic Assessment application method',
'Invalid Tier I Terrestrial Assessment application method'],dtype='object')
try:
#set test data
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.application_method = pd.Series(
['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'Tier II Aerial',
'Tier III Aerial'], dtype='object')
agdrift_empty.ecosystem_type = pd.Series(
['aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'Field Assessment',
'aquatic_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(
['epa_defined_pond',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'Defined Pond',
'user_defined_pond',
'epa_defined_pond',
'NaN',
'NaN',
'NaN',
'epa_defined_pond',
'user_defined_wetland',
'user_defined_pond'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(
['NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'user_defined_terrestrial',
'user_defined_terrestrial',
'NaN',
'NaN',
'user_defined_terrestrial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(
['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'fine_to_medium',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'medium_to_coarse',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine Indeed',
'NaN',
'very_fine_to_medium',
'medium_to_coarse',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'NaN',
'fine_to_medium-coarse',
'very_fine',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine'], dtype='object')
agdrift_empty.boom_height = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'low',
'high',
'low',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'NaN',
'NaN',
'NaN',
'NaN'],dtype='object')
agdrift_empty.airblast_type = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'orchard',
'vineyard',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'vineyard',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.validate_sim_scenarios()
result = agdrift_empty.out_sim_scenario_chk
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_sim_scenario_id(self):
"""
:description provides scenario ids per simulation that match scenario names (i.e., column_names) from SQL database
:param out_sim_scenario_id: scenario name as assigned to individual simulations
:param num_simulations: number of simulations to assign scenario names
:param out_sim_scenario_chk: from previous method where scenarios were checked for validity
:param application_method: application method of scenario
:param drop_size_*: qualitative description of spray droplet size for aerial and ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of airblast application (e.g., vineyard, orchard)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series(['aerial_vf2f',
'aerial_f2m',
'aerial_m2c',
'aerial_c2vc',
'ground_low_vf',
'ground_low_fmc',
'ground_high_vf',
'ground_high_fmc',
'airblast_normal',
'airblast_dense',
'airblast_sparse',
'airblast_vineyard',
'airblast_orchard',
'Invalid'], dtype='object')
try:
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.out_sim_scenario_chk = pd.Series(['Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Invalid Scenario'], dtype='object')
agdrift_empty.application_method = pd.Series(['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.boom_height = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'low',
'low',
'high',
'high',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.airblast_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'vineyard',
'orchard',
'NaN'], dtype='object')
agdrift_empty.set_sim_scenario_id()
result = agdrift_empty.out_sim_scenario_id
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_assign_column_names(self):
"""
:description assigns column names (except distaqnce column) from sql database to internal scenario names
:param column_name: short name for pesiticide application scenario for which distance vs deposition data is provided
:param scenario_name: internal variable for holding scenario names
:param scenario_number: index for scenario_name (this method assumes the distance values could occur in any column
:param distance_name: internal name for the column holding distance data
:NOTE to test both outputs of this method I simply appended them together
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.scenario_name = pd.Series([], dtype='object')
expected_result = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard'], dtype='object')
try:
agdrift_empty.column_names = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard', 'distance_ft'])
#call method to assign scenario names
agdrift_empty.assign_column_names()
result = agdrift_empty.scenario_name
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_distances(self):
"""
:description retrieves distance values for deposition scenario datasets
: all scenarios use same distances
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result = pd.Series([], dtype='float')
try:
expected_result = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632]
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.num_db_values = len(expected_result)
result = agdrift_empty.get_distances(agdrift_empty.num_db_values)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_scenario_deposition_data(self):
"""
:description retrieves deposition data for all scenarios from sql database
: and checks that for each the first, last, and total number of values
: are correct
:param scenario: name of scenario for which data is to be retrieved
:param num_values: number of values included in scenario datasets
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
#scenario_data = pd.Series([[]], dtype='float')
result = pd.Series([], dtype='float')
#changing expected values to the 161st
expected_result = [0.50013,0.041273,161.0, #aerial_vf2f
0.49997,0.011741,161.0, #aerial_f2m
0.4999,0.0053241,161.0, #aerial_m2c
0.49988,0.0031189,161.0, #aerial_c2vc
1.019339,9.66E-04,161.0, #ground_low_vf
1.007885,6.13E-04,161.0, #ground_low_fmc
1.055205,1.41E-03,161.0, #ground_high_vf
1.012828,7.72E-04,161.0, #ground_high_fmc
8.91E-03,3.87E-05,161.0, #airblast_normal
0.1155276,4.66E-04,161.0, #airblast_dense
0.4762651,5.14E-05,161.0, #airblast_sparse
3.76E-02,3.10E-05,161.0, #airblast_vineyard
0.2223051,3.58E-04,161.0] #airblast_orchard
try:
agdrift_empty.num_db_values = 161 #set number of data values in sql db
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
#agdrift_empty.db_name = 'sqlite_agdrift_distance.db'
#this is the list of scenario names (column names) in sql db (the order here is important because
#the expected values are ordered in this manner
agdrift_empty.scenario_name = ['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
#cycle through reading scenarios and building result list
for i in range(len(agdrift_empty.scenario_name)):
#get scenario data
scenario_data = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name[i],
agdrift_empty.num_db_values)
print(scenario_data)
#extract 1st and last values of scenario data and build result list (including how many values are
#retrieved for each scenario
if i == 0:
#fix this
result = [scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))]
else:
result.extend([scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))])
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_column_names(self):
"""
:description retrieves column names from sql database (sqlite_agdrift_distance.db)
: (each column name refers to a specific deposition scenario;
: the scenario name is used later to retrieve the deposition data)
:parameter output name of sql database table from which to retrieve requested data
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
result = pd.Series([], dtype='object')
expected_result = ['distance_ft','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
try:
result = agdrift_empty.get_column_names()
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filter_arrays(self):
"""
:description eliminate blank data cells (i.e., distances for which no deposition value is provided)
(and thus reduce the number of x,y values to be used)
:parameter x_in: array of distance values associated with values for a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter y_in: array of deposition values associated with a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter x_out: processed array of x_in values eliminating indices of blank distance/deposition values
:parameter y_out: processed array of y_in values eliminating indices of blank distance/deposition values
:NOTE y_in array is assumed to be populated by values >= 0. except for the blanks as 'nan' entries
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([0.,1.,4.,5.,6.,7.], dtype='float')
expected_result_y = pd.Series([10.,11.,14.,15.,16.,17.], dtype='float')
try:
x_in = pd.Series([0.,1.,2.,3.,4.,5.,6.,7.], dtype='float')
y_in = pd.Series([10.,11.,'nan','nan',14.,15.,16.,17.], dtype='float')
x_out, y_out = agdrift_empty.filter_arrays(x_in, y_in)
result_x = x_out
result_y = y_out
npt.assert_allclose(result_x, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result_y, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result_x, expected_result_x]
tab = [result_y, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_list_sims_per_scenario(self):
"""
:description scan simulations and count number and indices of simulations that apply to each scenario
:parameter num_scenarios number of deposition scenarios included in SQL database
:parameter num_simulations number of simulations included in this model execution
:parameter scenario_name name of deposition scenario as recorded in SQL database
:parameter out_sim_scenario_id identification of deposition scenario specified per model run simulation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_num_sims = pd.Series([2,2,2,2,2,2,2,2,2,2,2,2,2], dtype='int')
expected_sim_indices = pd.Series([[0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[2,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[3,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[4,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[5,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[6,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[7,20,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[8,21,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[9,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[10,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[11,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[12,25,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], dtype='int')
try:
agdrift_empty.scenario_name = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.out_sim_scenario_id = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.num_simulations = len(agdrift_empty.out_sim_scenario_id)
agdrift_empty.num_scenarios = len(agdrift_empty.scenario_name)
result_num_sims, result_sim_indices = agdrift_empty.list_sims_per_scenario()
npt.assert_array_equal(result_num_sims, expected_num_sims, err_msg='', verbose=True)
npt.assert_array_equal(result_sim_indices, expected_sim_indices, err_msg='', verbose=True)
finally:
tab = [result_num_sims, expected_num_sims, result_sim_indices, expected_sim_indices]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_determine_area_dimensions(self):
"""
:description determine relevant area/length/depth of waterbody or terrestrial area
:param i: simulation number
:param ecosystem_type: type of assessment to be conducted
:param aquatic_body_type: source of dimensional data for area (EPA or User defined)
:param terrestrial_field_type: source of dimensional data for area (EPA or User defined)
:param *_width: default or user specified width of waterbody or terrestrial field
:param *_length: default or user specified length of waterbody or terrestrial field
:param *_depth: default or user specified depth of waterbody or terrestrial field
:NOTE all areas, i.e., ponds, wetlands, and terrestrial fields are of 1 hectare size; the user can elect
to specify a width other than the default width but it won't change the area size; thus for
user specified areas the length is calculated and not specified by the user)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_width = pd.Series([208.7, 208.7, 100., 400., 150., 0.], dtype='float')
expected_length = pd.Series([515.8, 515.8, 1076.39, 269.098, 717.593, 0.], dtype='float')
expected_depth = pd.Series([6.56, 0.4921, 7., 23., 0., 0.], dtype='float')
try:
agdrift_empty.ecosystem_type = pd.Series(['aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(['epa_defined_pond',
'epa_defined_wetland',
'user_defined_pond',
'user_defined_wetland',
'NaN',
'NaN'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'epa_defined_terrestrial'], dtype='object')
num_simulations = len(agdrift_empty.ecosystem_type)
agdrift_empty.default_width = 208.7
agdrift_empty.default_length = 515.8
agdrift_empty.default_pond_depth = 6.56
agdrift_empty.default_wetland_depth = 0.4921
agdrift_empty.user_pond_width = pd.Series(['NaN', 'NaN', 100., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_pond_depth = pd.Series(['NaN', 'NaN', 7., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_width = pd.Series(['NaN', 'NaN', 'NaN', 400., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_depth = pd.Series(['NaN','NaN', 'NaN', 23., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_terrestrial_width = pd.Series(['NaN', 'NaN', 'NaN', 'NaN', 150., 'NaN'], dtype='float')
width_result = pd.Series(num_simulations * ['NaN'], dtype='float')
length_result = pd.Series(num_simulations * ['NaN'], dtype='float')
depth_result = pd.Series(num_simulations * ['NaN'], dtype='float')
agdrift_empty.out_area_width = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_length = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_depth = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.sqft_per_hectare = 107639
for i in range(num_simulations):
width_result[i], length_result[i], depth_result[i] = agdrift_empty.determine_area_dimensions(i)
npt.assert_allclose(width_result, expected_width, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(length_result, expected_length, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(depth_result, expected_depth, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [width_result, expected_width, length_result, expected_length, depth_result, expected_depth]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa(self):
"""
:description calculation of average deposition over width of water body
:param integration_result result of integration of deposition curve across the distance
: beginning at the near distance and extending to the far distance of the water body
:param integration_distance effectively the width of the water body
:param avg_dep_foa average deposition rate across the width of the water body
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.1538462, 0.5, 240.])
try:
integration_result = pd.Series([1.,125.,3e5], dtype='float')
integration_distance = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa(integration_result, integration_distance)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([6.5, 3.125e4, 3.75e8])
try:
avg_dep_foa = pd.Series([1.,125.,3e5], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_lbac(avg_dep_foa, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa_from_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.553846e-01, 8.8e-06, 4.e-08])
try:
avg_dep_lbac = pd.Series([1.01, 0.0022, 0.00005], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa_from_lbac(avg_dep_lbac, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_gha(self):
"""
Deposition calculation.
:param avg_dep_gha: average deposition over width of water body in units of grams/hectare
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert hectares to acres
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.01516739, 0.111524, 0.267659])
try:
avg_dep_gha = pd.Series([17., 125., 3e2], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_gha(avg_dep_gha)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_waterconc_ngl(self):
"""
:description calculate the average deposition onto the pond/wetland/field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.311455e-05, 2.209479e-03, 2.447423e-03])
try:
avg_waterconc_ngl = pd.Series([17., 125., 3e2], dtype='float')
area_width = pd.Series([50., 200., 500.], dtype='float')
area_length = pd.Series([6331., 538., 215.], dtype='float')
area_depth = pd.Series([0.5, 6.5, 3.], dtype='float')
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_waterconc_ngl(avg_waterconc_ngl, area_width,
area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field in lbs/acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.676538e-02, 2.2304486, 44.608973])
try:
avg_fielddep_mgcm2 = pd.Series([3.e-4, 2.5e-2, 5.e-01])
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.cm2_per_ft2 = 929.03
agdrift_empty.mg_per_gram = 1.e3
result = agdrift_empty.calc_avg_dep_lbac_from_mgcm2(avg_fielddep_mgcm2)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_gha(self):
"""
:description average deposition over width of water body in grams per acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert acres to hectares
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401061, 0.3648362, 0.03362546])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.47105
result = agdrift_empty.calc_avg_dep_gha(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_waterconc_ngl(self):
"""
:description calculate the average concentration of pesticide in the pond/wetland
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([70.07119, 18.24654, 22.41823])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
area_width = pd.Series([6.56, 208.7, 997.], dtype='float')
area_length = pd.Series([1.640838e4, 515.7595, 107.9629], dtype='float')
area_depth = pd.Series([6.56, 6.56, 0.4921], dtype='float')
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
result = agdrift_empty.calc_avg_waterconc_ngl(avg_dep_lbac ,area_width, area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_fielddep_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401063e-5, 3.648369e-6, 3.362552e-7])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.mg_per_gram = 1.e3
agdrift_empty.cm2_per_ft2 = 929.03
result = agdrift_empty.calc_avg_fielddep_mgcm2(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016]
expected_result_y = [0.364712246,0.351507467,0.339214283,0.316974687,0.279954504,0.225948786,0.159949625,
0.123048839,0.099781801,0.071666234,0.056352938,0.03860139,0.029600805,0.024150524,
0.020550354,0.01795028,0.015967703,0.014467663,0.013200146,0.01215011,0.011300098,
0.010550085,0.009905072,0.009345065,0.008845057,0.008400051,0.008000046,0.007635043,
0.007300039,0.007000034,0.006725033,0.00646503,0.006230027,0.006010027,0.005805023,
0.005615023,0.005435021,0.00527002,0.00511002,0.004960017,0.004820017,0.004685016,
0.004560015,0.004440015,0.004325013,0.004220012,0.004120012,0.004020012,0.003925011,
0.003835011,0.00375001,0.00367001,0.00359001,0.00351001,0.003435009,0.003365009,
0.003300007,0.003235009,0.003170007,0.003110007,0.003055006,0.003000007,0.002945006,
0.002895006,0.002845006,0.002795006,0.002745006,0.002695006,0.002650005,0.002610005,
0.002570005,0.002525006,0.002485004,0.002450005,0.002410005,0.002370005,0.002335004,
0.002300005,0.002265004,0.002235004,0.002205004,0.002175004,0.002145004,0.002115004,
0.002085004,0.002055004,0.002025004,0.002000002,0.001975004,0.001945004,0.001920002,
0.001900002,0.001875004,0.001850002,0.001830002,0.001805004,0.001780002,0.001760002,
0.001740002,0.001720002,0.001700002,0.001680002,0.001660002,0.001640002,0.001620002,
0.001605001,0.001590002,0.001570002,0.001550002,0.001535001,0.001520002,0.001500002,
0.001485001,0.001470002,0.001455001,0.001440002,0.001425001,0.001410002,0.001395001,
0.001385001,0.001370002,0.001355001,0.001340002,0.001325001,0.001315001,0.001305001,
0.001290002,0.001275001,0.001265001,0.001255001,0.001245001,0.001230002,0.001215001,
0.001205001,0.001195001,0.001185001,0.001175001,0.001165001,0.001155001,0.001145001,
0.001135001,0.001125001,0.001115001,0.001105001,0.001095001,0.001085001,0.001075001,
0.001065001,0.00106,0.001055001,0.001045001,0.001035001,0.001025001,0.001015001,
0.001005001,0.0009985,0.000993001,0.000985001,0.000977001,0.000969501]
expected_result_npts = 160
x_dist = 6.56
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.scenario_name = 'ground_low_vf'
agdrift_empty.num_db_values = 161
x_array_in = agdrift_empty.get_distances(agdrift_empty.num_db_values)
y_array_in = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name, agdrift_empty.num_db_values)
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(agdrift_empty.num_db_values,
x_array_in, y_array_in, x_dist)
# write output arrays to excel file -- just for debugging
agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "output_array_generate.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg1(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a uniformly spaced x_array and monotonically increasing y_array
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.]
expected_result_y = [2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,11.5,
12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,21.5,
22.5,23.5,24.5,25.5,26.5,27.5,28.5,29.5,30.5,31.5,
32.5,33.5,34.5,35.5,36.5,37.5,38.5,39.5,40.5,41.5,
42.5,43.5,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(num_db_values, x_array_in,
y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg2(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a non-uniformly spaced x_array and monotonically increasing y_array
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.5,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.5,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.5,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.5,42.,43.,44.]
expected_result_y = [2.5,3.5,4.5,5.5,6.5,7.5,8.4666667,9.4,10.4,11.4,
12.4,13.975,14.5,15.5,16.5,17.5,18.466666667,19.4,20.4,21.4,
22.4,23.975,24.5,25.5,26.5,27.5,28.46666667,29.4,30.4,31.4,
32.4,33.975,34.5,35.5,36.5,37.5,38.466666667,39.4,40.4,41.4,
42.4,43.975,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
agdrift_empty.num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.5,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.5,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.5,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.5,42.,43.,44.,45.,46.,47.,48.,49.,50.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(agdrift_empty.num_db_values,
x_array_in, y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg3(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array);
averages reflect weighted average assuming linearity between x points;
average is calculated as the area under the y-curve beginning at each x point and extending out x_dist
divided by x_dist (which yields the weighted average y between the relevant x points)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a monotonically increasing y_array and inserts a gap in the x values
that is greater than x_dist
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,51.,52.]
expected_result_y = [2.5,3.5,4.5,5.4111111,6.14444444,6.7,7.07777777,7.277777777,10.5,11.5,
12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,21.5,
22.5,23.5,24.5,25.5,26.5,27.5,28.5,29.5,30.5,31.5,
32.5,33.5,34.5,35.5,36.5,37.5,38.5,39.5,40.5,41.5,
42.5,43.5,44.5,45.5, 46.5]
expected_result_npts = 45
x_dist = 5.
num_db_values = 51
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,
51.,52.,53.,54.,55.,56.,57.,58.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(num_db_values, x_array_in,
y_array_in, x_dist)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
and generates running weighted averages from the first x,y value until it locates the user
specified integrated average of interest
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016]
expected_result_y = [0.364712246,0.351507467,0.339214283,0.316974687,0.279954504,0.225948786,0.159949625,
0.123048839,0.099781801,0.071666234,0.056352938,0.03860139,0.029600805,0.024150524,
0.020550354,0.01795028,0.015967703,0.014467663,0.013200146,0.01215011,0.011300098,
0.010550085,0.009905072,0.009345065,0.008845057,0.008400051,0.008000046,0.007635043,
0.007300039,0.007000034,0.006725033,0.00646503,0.006230027,0.006010027,0.005805023,
0.005615023,0.005435021,0.00527002,0.00511002,0.004960017,0.004820017,0.004685016,
0.004560015,0.004440015,0.004325013,0.004220012,0.004120012,0.004020012,0.003925011,
0.003835011,0.00375001,0.00367001,0.00359001,0.00351001,0.003435009,0.003365009,
0.003300007,0.003235009,0.003170007,0.003110007,0.003055006,0.003000007,0.002945006,
0.002895006,0.002845006,0.002795006,0.002745006,0.002695006,0.002650005,0.002610005,
0.002570005,0.002525006,0.002485004,0.002450005,0.002410005,0.002370005,0.002335004,
0.002300005,0.002265004,0.002235004,0.002205004,0.002175004,0.002145004,0.002115004,
0.002085004,0.002055004,0.002025004,0.002000002,0.001975004,0.001945004,0.001920002,
0.001900002,0.001875004,0.001850002,0.001830002,0.001805004,0.001780002,0.001760002,
0.001740002,0.001720002,0.001700002,0.001680002,0.001660002,0.001640002,0.001620002,
0.001605001,0.001590002,0.001570002,0.001550002,0.001535001,0.001520002,0.001500002,
0.001485001,0.001470002,0.001455001,0.001440002,0.001425001,0.001410002,0.001395001,
0.001385001,0.001370002,0.001355001,0.001340002,0.001325001,0.001315001,0.001305001,
0.001290002,0.001275001,0.001265001,0.001255001,0.001245001,0.001230002,0.001215001,
0.001205001,0.001195001,0.001185001,0.001175001,0.001165001,0.001155001,0.001145001,
0.001135001,0.001125001,0.001115001,0.001105001,0.001095001,0.001085001,0.001075001,
0.001065001,0.00106,0.001055001,0.001045001,0.001035001,0.001025001,0.001015001,
0.001005001,0.0009985,0.000993001,0.000985001,0.000977001,0.000969501]
expected_result_npts = 160
expected_x_dist_of_interest = 990.8016
x_dist = 6.56
weighted_avg = 0.0009697 #this is the running average value we're looking for
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.scenario_name = 'ground_low_vf'
agdrift_empty.num_db_values = 161
agdrift_empty.find_nearest_x = True
x_array_in = agdrift_empty.get_distances(agdrift_empty.num_db_values)
y_array_in = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name, agdrift_empty.num_db_values)
x_array_out, y_array_out, npts_out, x_dist_of_interest, range_chk = \
agdrift_empty.locate_integrated_avg(agdrift_empty.num_db_values, x_array_in, y_array_in, x_dist, weighted_avg)
npt.assert_array_equal(expected_x_dist_of_interest, x_dist_of_interest, verbose=True)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} x-units to area and got {1} '.format(expected_x_dist_of_interest, x_dist_of_interest))
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg1(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE this test is for a monotonically increasing function with some irregularity in x-axis points
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,7.0,16.0,17.0,18.0,19.0,20.0,28.0,29.0,30.0,31.]
expected_result_y = [0.357143,1.27778,4.4125,5.15,5.7125,6.1,6.3125,9.5,10.5,11.5,12.5]
expected_result_npts = 11
expected_x_dist_of_interest = 30.5
x_dist = 5.
weighted_avg = 12.
num_db_values = 51
x_array_in = [0.,7.,16.,17.,18.,19.,20.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,
51.,52.,53.,54.,55.,56.,57.,58.,59.,60.,
61.,62.,63.,64.,65.,66.,67.,68.,69.,70.,
71.]
y_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,
11.,12.,13.,14.,15.,16.,17.,18.,19.,20.,
21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,
31.,32.,33.,34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.]
agdrift_empty.find_nearest_x = True
x_array_out, y_array_out, npts_out, x_dist_of_interest, range_chk = \
agdrift_empty.locate_integrated_avg(num_db_values, x_array_in, y_array_in, x_dist, weighted_avg)
npt.assert_array_equal(expected_x_dist_of_interest, x_dist_of_interest, verbose=True)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} x-units to area and got {1} '.format(expected_x_dist_of_interest, x_dist_of_interest))
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg2(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test is for a monotonically decreasing function with irregular x-axis spacing
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,7.,16.,17.,18.,19.,20.,28.,29.,30.,
34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,
51.,52.,53.,54.,55.,56.,57.,58.,59.,60.]
expected_result_y = [49.6429,48.7222,45.5875,44.85,44.2875,43.9,43.6875,41.175,40.7,40.3,
37.5,36.5,35.5,34.5,33.5,32.5,31.5,30.5,29.5,28.5,
27.5,26.5,25.5,24.5,23.5,22.5,21.5,20.5,19.5,18.5,
17.5,16.5,15.5,14.5,13.5,12.5,11.5]
expected_result_npts = 37
expected_x_dist_of_interest = 60.
x_dist = 5.
weighted_avg = 12.
num_db_values = 51
agdrift_empty.find_nearest_x = True
x_array_in = [0.,7.,16.,17.,18.,19.,20.,28.,29.,30.,
34.,35.,36.,37.,38.,39.,40.,
41.,42.,43.,44.,45.,46.,47.,48.,49.,50.,
51.,52.,53.,54.,55.,56.,57.,58.,59.,60.,
61.,62.,63.,64.,65.,66.,67.,68.,69.,70.,
71.,72.,73.,74. ]
y_array_in = [50.,49.,48.,47.,46.,45.,44.,43.,42.,41.,
40.,39.,38.,37.,36.,35.,34.,33.,32.,31.,
30.,29.,28.,27.,26.,25.,24.,23.,22.,21.,
20.,19.,18.,17.,16.,15.,14.,13.,12.,11.,
10.,9.,8.,7.,6.,5.,4.,3.,2.,1.,0.]
x_array_out, y_array_out, npts_out, x_dist_of_interest, range_chk = \
agdrift_empty.locate_integrated_avg(num_db_values, x_array_in, y_array_in, x_dist, weighted_avg)
npt.assert_array_equal(expected_x_dist_of_interest, x_dist_of_interest, verbose=True)
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} x-units to area and got {1} '.format(expected_x_dist_of_interest, x_dist_of_interest))
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_locate_integrated_avg3(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE this test is for a monotonically decreasing function with regular x-axis spacing
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
expected_result_x_dist = pd.Series([], dtype='float')
try:
expected_result_x = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,
10.,11.,12.,13.,14.,15.,16.,17.,18.,19.,
20.,21.,22.,23.,24.,25.,26.,27.,28.,29.,
30.,31.,32.,33.,34.,35.,36.]
expected_result_y = [47.5,46.5,45.5,44.5,43.5,42.5,41.5,40.5,39.5,38.5,
37.5,36.5,35.5,34.5,33.5,32.5,31.5,30.5,29.5,28.5,
27.5,26.5,25.5,24.5,23.5,22.5,21.5,20.5,19.5,18.5,
17.5,16.5,15.5,14.5,13.5,12.5,11.5]
expected_result_npts = 37
expected_x_dist_of_interest = 36.
x_dist = 5.
weighted_avg = 12.
num_db_values = 51
agdrift_empty.find_nearest_x = True
x_array_in = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,
10.,11.,12.,13.,14.,15.,16.,17.,18.,19.,
20.,21.,22.,23.,24.,25.,26.,27.,28.,29.,
30.,31.,32.,33.,34.,35.,36.,37.,38.,39.,
40.,41.,42.,43.,44.,45.,46.,47.,48.,49.,
50.]
y_array_in = [50.,49.,48.,47.,46.,45.,44.,43.,42.,41.,
40.,39.,38.,37.,36.,35.,34.,33.,32.,31.,
30.,29.,28.,27.,26.,25.,24.,23.,22.,21.,
20.,19.,18.,17.,16.,15.,14.,13.,12.,11.,
10.,9.,8.,7.,6.,5.,4.,3.,2.,1.,0.]
x_array_out, y_array_out, npts_out, x_dist_of_interest, range_chk = \
agdrift_empty.locate_integrated_avg(num_db_values, x_array_in, y_array_in, x_dist, weighted_avg)
npt.assert_array_equal(expected_x_dist_of_interest, x_dist_of_interest, verbose=True )
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True )
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} x-units to area and got {1} '.format(expected_x_dist_of_interest, x_dist_of_interest))
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_round_model_outputs(self):
"""
:description round output variable values (and place in output variable series) so that they can be directly
compared to expected results (which were limited in terms of their output format from the OPP AGDRIFT
model (V2.1.1) interface (we don't have the AGDRIFT code so we cannot change the output format to
agree with this model
:param avg_dep_foa:
:param avg_dep_lbac:
:param avg_dep_gha:
:param avg_waterconc_ngl:
:param avg_field_dep_mgcm2:
:param num_sims: number of simulations
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
num_sims = 3
num_args = 5
agdrift_empty.out_avg_dep_foa = pd.Series(num_sims * [np.nan], dtype='float')
agdrift_empty.out_avg_dep_lbac = pd.Series(num_sims * [np.nan], dtype='float')
agdrift_empty.out_avg_dep_gha = pd.Series(num_sims * [np.nan], dtype='float')
agdrift_empty.out_avg_waterconc_ngl = pd.Series(num_sims * [np.nan], dtype='float')
agdrift_empty.out_avg_field_dep_mgcm2 = pd.Series(num_sims * [np.nan], dtype='float')
result = pd.Series(num_sims * [num_args*[np.nan]], dtype='float')
expected_result = pd.Series(num_sims * [num_args*[np.nan]], dtype='float')
expected_result[0] = [1.26,1.26,1.26,1.26,1.26]
expected_result[1] = [0.0004,0.0004,0.0004,0.0004,0.0004]
expected_result[2] = [3.45e-05,3.45e-05,3.45e-05,3.45e-05,3.45e-05]
try:
#setting each variable to same values, each value tests a separate pathway through rounding method
avg_dep_lbac = pd.Series([1.2567,3.55e-4,3.454e-5], dtype='float')
avg_dep_foa = pd.Series([1.2567,3.55e-4,3.454e-5], dtype='float')
avg_dep_gha = pd.Series([1.2567,3.55e-4,3.454e-5], dtype='float')
avg_waterconc_ngl = pd.Series([1.2567,3.55e-4,3.454e-5], dtype='float')
avg_field_dep_mgcm2 = pd.Series([1.2567,3.55e-4,3.454e-5], dtype='float')
for i in range(num_sims):
lbac = avg_dep_lbac[i]
foa = avg_dep_foa[i]
gha = avg_dep_gha[i]
ngl = avg_waterconc_ngl[i]
mgcm2 = avg_field_dep_mgcm2[i]
agdrift_empty.round_model_outputs(foa, lbac, gha, ngl, mgcm2, i)
result[i] = [agdrift_empty.out_avg_dep_foa[i], agdrift_empty.out_avg_dep_lbac[i],
agdrift_empty.out_avg_dep_gha[i], agdrift_empty.out_avg_waterconc_ngl[i],
agdrift_empty.out_avg_field_dep_mgcm2[i]]
npt.assert_allclose(result[0], expected_result[0], rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result[1], expected_result[1], rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result[2], expected_result[2], rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_find_dep_pt_location(self):
"""
:description this method locates the downwind distance associated with a specific deposition rate
:param x_array: array of distance values
:param y_array: array of deposition values
:param npts: number of values in x/y arrays
:param foa: value of deposition (y value) of interest
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
result = [[],[],[],[]]
expected_result = [(0.0, 'in range'), (259.1832, 'in range'), (997.3632, 'in range'), (np.nan, 'out of range')]
try:
x_array = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016, 997.3632]
y_array = [0.364706389,0.351133211,0.338484161,0.315606383,0.277604029,0.222810736,0.159943507,
0.121479708,0.099778741,0.068653,0.05635,0.0386,0.0296,0.02415,0.02055,0.01795,
0.0159675,0.0144675,0.0132,0.01215,0.0113,0.01055,0.009905,0.009345,0.008845,0.0084,
0.008,0.007635,0.0073,0.007,0.006725,0.006465,0.00623,0.00601,0.005805,0.005615,
0.005435,0.00527,0.00511,0.00496,0.00482,0.004685,0.00456,0.00444,0.004325,0.00422,
0.00412,0.00402,0.003925,0.003835,0.00375,0.00367,0.00359,0.00351,0.003435,0.003365,
0.0033,0.003235,0.00317,0.00311,0.003055,0.003,0.002945,0.002895,0.002845,0.002795,
0.002745,0.002695,0.00265,0.00261,0.00257,0.002525,0.002485,0.00245,0.00241,0.00237,
0.002335,0.0023,0.002265,0.002235,0.002205,0.002175,0.002145,0.002115,0.002085,
0.002055,0.002025,0.002,0.001975,0.001945,0.00192,0.0019,0.001875,0.00185,0.00183,
0.001805,0.00178,0.00176,0.00174,0.00172,0.0017,0.00168,0.00166,0.00164,0.00162,
0.001605,0.00159,0.00157,0.00155,0.001535,0.00152,0.0015,0.001485,0.00147,0.001455,
0.00144,0.001425,0.00141,0.001395,0.001385,0.00137,0.001355,0.00134,0.001325,0.001315,
0.001305,0.00129,0.001275,0.001265,0.001255,0.001245,0.00123,0.001215,0.001205,
0.001195,0.001185,0.001175,0.001165,0.001155,0.001145,0.001135,0.001125,0.001115,
0.001105,0.001095,0.001085,0.001075,0.001065,0.00106,0.001055,0.001045,0.001035,
0.001025,0.001015,0.001005,0.0009985,0.000993,0.000985,0.000977,0.0009695,0.0009612]
npts = len(x_array)
num_sims = 4
foa = [0.37, 0.004, 0.0009613, 0.0008]
for i in range(num_sims):
result[i] = agdrift_empty.find_dep_pt_location(x_array, y_array, npts, foa[i])
npt.assert_equal(expected_result, result, verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_extend_curve_opp(self):
"""
:description extends/extrapolates an x,y array of data points that reflect a ln ln relationship by selecting
a number of points near the end of the x,y arrays and fitting a line to the points
ln ln transforms (two ln ln transforms can by applied; on using the straight natural log of
each selected x,y point and one using a 'relative' value of each of the selected points --
the relative values are calculated by establishing a zero point closest to the selected
points
For AGDRIFT: extends distance vs deposition (fraction of applied) curve to enable model calculations
when area of interest (pond, wetland, terrestrial field) lie partially outside the original
curve (whose extent is 997 feet). The extension is achieved by fitting a line of best fit
to the last 16 points of the original curve. The x,y values representing the last 16 points
are natural log transforms of the distance and deposition values at the 16 points. Two long
transforms are coded here, reflecting the fact that the AGDRIFT model (v2.1.1) uses each of them
under different circumstandes (which I believe is not the intention but is the way the model
functions -- my guess is that one of the transforms was used and then a second one was coded
to increase the degree of conservativeness -- but the code was changed in only one of the two
places where the transformation occurs.
Finally, the AGDRIFT model extends the curve only when necessary (i.e., when it determines that
the area of intereest lies partially beyond the last point of the origanal curve (997 ft). In
this code all the curves are extended out to 1994 ft, which represents the furthest distance that
the downwind edge of an area of concern can be specified. All scenario curves are extended here
because we are running multiple simulations (e.g., monte carlo) and instead of extending the
curves each time a simulation requires it (which may be multiple time for the same scenario
curve) we just do it for all curves up front. There is a case to be made that the
curves should be extended external to this code and simply provide the full curve in the SQLite
database containing the original curve.
:param x_array: array of x values to be extended (must be at least 17 data points in original array)
:param y_array: array of y values to be extended
:param max_dist: maximum distance (ft) associated with unextended x values
:param dist_inc: increment (ft) for each extended data point
:param num_pts_ext: number of points at end of original x,y arrays to be used for extending the curve
:param ln_ln_trans: form of transformation to perform (True: straight ln ln, False: relative ln ln)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
# x_array_in = pd.Series([], dtype='float')
# y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,6.5616,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632,
1003.9232,1010.4832,1017.0432,1023.6032,1030.1632,1036.7232,1043.2832,1049.8432,1056.4032,
1062.9632,1069.5232,1076.0832,1082.6432,1089.2032,1095.7632,1102.3232,1108.8832,1115.4432,
1122.0032,1128.5632,1135.1232,1141.6832,1148.2432,1154.8032,1161.3632,1167.9232,1174.4832,
1181.0432,1187.6032,1194.1632,1200.7232,1207.2832,1213.8432,1220.4032,1226.9632,1233.5232,
1240.0832,1246.6432,1253.2032,1259.7632,1266.3232,1272.8832,1279.4432,1286.0032,1292.5632,
1299.1232,1305.6832,1312.2432,1318.8032,1325.3632,1331.9232,1338.4832,1345.0432,1351.6032,
1358.1632,1364.7232,1371.2832,1377.8432,1384.4032,1390.9632,1397.5232,1404.0832,1410.6432,
1417.2032,1423.7632,1430.3232,1436.8832,1443.4432,1450.0032,1456.5632,1463.1232,1469.6832,
1476.2432,1482.8032,1489.3632,1495.9232,1502.4832,1509.0432,1515.6032,1522.1632,1528.7232,
1535.2832,1541.8432,1548.4032,1554.9632,1561.5232,1568.0832,1574.6432,1581.2032,1587.7632,
1594.3232,1600.8832,1607.4432,1614.0032,1620.5632,1627.1232,1633.6832,1640.2432,1646.8032,
1653.3632,1659.9232,1666.4832,1673.0432,1679.6032,1686.1632,1692.7232,1699.2832,1705.8432,
1712.4032,1718.9632,1725.5232,1732.0832,1738.6432,1745.2032,1751.7632,1758.3232,1764.8832,
1771.4432,1778.0032,1784.5632,1791.1232,1797.6832,1804.2432,1810.8032,1817.3632,1823.9232,
1830.4832,1837.0432,1843.6032,1850.1632,1856.7232,1863.2832,1869.8432,1876.4032,1882.9632,
1889.5232,1896.0832,1902.6432,1909.2032,1915.7632,1922.3232,1928.8832,1935.4432,1942.0032,
1948.5632,1955.1232,1961.6832,1968.2432,1974.8032,1981.3632,1987.9232,1994.4832]
expected_result_y = [0.49997,0.37451,0.29849,0.25004,0.2138,0.19455,0.18448,0.17591,0.1678,0.15421,0.1401,
0.12693,0.11785,0.11144,0.10675,0.099496,0.092323,0.085695,0.079234,0.074253,0.070316,
0.067191,0.064594,0.062337,0.060348,0.058192,0.055224,0.051972,0.049283,0.04757,
0.046226,0.044969,0.043922,0.043027,0.041934,0.040528,0.039018,0.037744,0.036762,
0.035923,0.035071,0.034267,0.033456,0.032629,0.03184,0.031078,0.030363,0.02968,0.029028,
0.028399,0.027788,0.027199,0.026642,0.026124,0.025635,0.02517,0.024719,0.024287,0.023867,
0.023457,0.023061,0.022685,0.022334,0.021998,0.021675,0.02136,0.021055,0.020758,0.020467,
0.020186,0.019919,0.019665,0.019421,0.019184,0.018951,0.018727,0.018514,0.018311,
0.018118,0.017929,0.017745,0.017564,0.017387,0.017214,0.017046,0.016886,0.016732,
0.016587,0.016446,0.016309,0.016174,0.016039,0.015906,0.015777,0.015653,0.015532,
0.015418,0.015308,0.015202,0.015097,0.014991,0.014885,0.014782,0.014683,0.014588,0.0145,
0.014415,0.014334,0.014254,0.014172,0.01409,0.014007,0.013926,0.013846,0.01377,0.013697,
0.013628,0.013559,0.013491,0.013423,0.013354,0.013288,0.013223,0.01316,0.013099,0.01304,
0.012983,0.012926,0.01287,0.012814,0.012758,0.012703,0.012649,0.012597,0.012547,0.012499,
0.01245,0.012402,0.012352,0.012302,0.012254,0.012205,0.012158,0.012113,0.012068,0.012025,
0.011982,0.01194,0.011899,0.011859,0.011819,0.01178,0.011741,1.1826345E-02,1.1812256E-02,
1.1798945E-02,1.1786331E-02,1.1774344E-02,1.1762927E-02,1.1752028E-02,1.1741602E-02,
1.1731610E-02,1.1722019E-02,1.1712796E-02,1.1703917E-02,1.1695355E-02,1.1687089E-02,
1.1679100E-02,1.1671370E-02,1.1663883E-02,1.1656623E-02,1.1649579E-02,1.1642737E-02,
1.1636087E-02,1.1629617E-02,1.1623319E-02,1.1617184E-02,1.1611203E-02,1.1605369E-02,
1.1599676E-02,1.1594116E-02,1.1588684E-02,1.1583373E-02,1.1578179E-02,1.1573097E-02,
1.1568122E-02,1.1563249E-02,1.1558475E-02,1.1553795E-02,1.1549206E-02,1.1544705E-02,
1.1540288E-02,1.1535953E-02,1.1531695E-02,1.1527514E-02,1.1523405E-02,1.1519367E-02,
1.1515397E-02,1.1511493E-02,1.1507652E-02,1.1503873E-02,1.1500154E-02,1.1496493E-02,
1.1492889E-02,1.1489338E-02,1.1485841E-02,1.1482395E-02,1.1478999E-02,1.1475651E-02,
1.1472351E-02,1.1469096E-02,1.1465886E-02,1.1462720E-02,1.1459595E-02,1.1456512E-02,
1.1453469E-02,1.1450465E-02,1.1447499E-02,1.1444570E-02,1.1441677E-02,1.1438820E-02,
1.1435997E-02,1.1433208E-02,1.1430452E-02,1.1427728E-02,1.1425036E-02,1.1422374E-02,
1.1419742E-02,1.1417139E-02,1.1414566E-02,1.1412020E-02,1.1409502E-02,1.1407011E-02,
1.1404546E-02,1.1402107E-02,1.1399693E-02,1.1397304E-02,1.1394939E-02,1.1392598E-02,
1.1390281E-02,1.1387986E-02,1.1385713E-02,1.1383463E-02,1.1381234E-02,1.1379026E-02,
1.1376840E-02,1.1374673E-02,1.1372527E-02,1.1370400E-02,1.1368292E-02,1.1366204E-02,
1.1364134E-02,1.1362082E-02,1.1360048E-02,1.1358032E-02,1.1356033E-02,1.1354052E-02,
1.1352087E-02,1.1350139E-02,1.1348207E-02,1.1346291E-02,1.1344390E-02,1.1342505E-02,
1.1340635E-02,1.1338781E-02,1.1336941E-02,1.1335115E-02,1.1333304E-02,1.1331507E-02,
1.1329723E-02,1.1327954E-02,1.1326197E-02,1.1324454E-02,1.1322724E-02,1.1321007E-02,
1.1319303E-02,1.1317611E-02,1.1315931E-02,1.1314263E-02,1.1312608E-02,1.1310964E-02,
1.1309332E-02,1.1307711E-02,1.1306101E-02,1.1304503E-02,1.1302915E-02,1.1301339E-02,
1.1299773E-02,1.1298218E-02,1.1296673E-02,1.1295138E-02,1.1293614E-02,1.1292099E-02,
1.1290594E-02,1.1289100E-02,1.1287614E-02,1.1286139E-02,1.1284672E-02,1.1283215E-02,
1.1281767E-02,1.1280328E-02,1.1278898E-02,1.1277477E-02,1.1276065E-02,1.1274661E-02]
expected_result_npts = [305]
max_dist = 997.3632
dist_inc = 6.56
num_pts_ext = 16
ln_ln_trans = False #using the relative ln ln transformation in this test
agdrift_empty.meters_per_ft = 0.3048
x_array_in = pd.Series([0.,6.5616,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632])
y_array_in = pd.Series([0.49997,0.37451,0.29849,0.25004,0.2138,0.19455,0.18448,0.17591,0.1678,0.15421,0.1401,
0.12693,0.11785,0.11144,0.10675,0.099496,0.092323,0.085695,0.079234,0.074253,0.070316,
0.067191,0.064594,0.062337,0.060348,0.058192,0.055224,0.051972,0.049283,0.04757,
0.046226,0.044969,0.043922,0.043027,0.041934,0.040528,0.039018,0.037744,0.036762,
0.035923,0.035071,0.034267,0.033456,0.032629,0.03184,0.031078,0.030363,0.02968,0.029028,
0.028399,0.027788,0.027199,0.026642,0.026124,0.025635,0.02517,0.024719,0.024287,0.023867,
0.023457 ,0.023061,0.022685,0.022334,0.021998,0.021675,0.02136,0.021055,0.020758,0.020467,
0.020186,0.019919,0.019665,0.019421,0.019184,0.018951,0.018727,0.018514,0.018311,
0.018118,0.017929,0.017745,0.017564,0.017387,0.017214,0.017046,0.016886,0.016732,
0.016587,0.016446,0.016309,0.016174,0.016039,0.015906,0.015777,0.015653,0.015532,
0.015418,0.015308,0.015202,0.015097,0.014991,0.014885,0.014782,0.014683,0.014588,0.0145,
0.014415,0.014334,0.014254,0.014172,0.01409,0.014007,0.013926,0.013846,0.01377,0.013697,
0.013628,0.013559,0.013491,0.013423,0.013354,0.013288,0.013223,0.01316,0.013099,0.01304,
0.012983,0.012926,0.01287,0.012814,0.012758,0.012703,0.012649,0.012597,0.012547,0.012499,
0.01245,0.012402,0.012352,0.012302,0.012254,0.012205,0.012158,0.012113,0.012068,0.012025,
0.011982,0.01194,0.011899,0.011859,0.011819,0.01178,0.011741])
x_array_out, y_array_out = agdrift_empty.extend_curve_opp(x_array_in, y_array_in, max_dist, dist_inc, num_pts_ext,
ln_ln_trans)
npts_out = [len(y_array_out)]
#
#agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "extend_data.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts[0], npts_out[0]))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_extend_curve_opp1(self):
"""
:description extends/extrapolates an x,y array of data points that reflect a ln ln relationship by selecting
a number of points near the end of the x,y arrays and fitting a line to the points
ln ln transforms (two ln ln transforms can by applied; on using the straight natural log of
each selected x,y point and one using a 'relative' value of each of the selected points --
the relative values are calculated by establishing a zero point closest to the selected
points
For AGDRIFT: extends distance vs deposition (fraction of applied) curve to enable model calculations
when area of interest (pond, wetland, terrestrial field) lie partially outside the original
curve (whose extent is 997 feet). The extension is achieved by fitting a line of best fit
to the last 16 points of the original curve. The x,y values representing the last 16 points
are natural log transforms of the distance and deposition values at the 16 points. Two long
transforms are coded here, reflecting the fact that the AGDRIFT model (v2.1.1) uses each of them
under different circumstandes (which I believe is not the intention but is the way the model
functions -- my guess is that one of the transforms was used and then a second one was coded
to increase the degree of conservativeness -- but the code was changed in only one of the two
places where the transformation occurs.
Finally, the AGDRIFT model extends the curve only when necessary (i.e., when it determines that
the area of intereest lies partially beyond the last point of the origanal curve (997 ft). In
this code all the curves are extended out to 1994 ft, which represents the furthest distance that
the downwind edge of an area of concern can be specified. All scenario curves are extended here
because we are running multiple simulations (e.g., monte carlo) and instead of extending the
curves each time a simulation requires it (which may be multiple time for the same scenario
curve) we just do it for all curves up front. There is a case to be made that the
curves should be extended external to this code and simply provide the full curve in the SQLite
database containing the original curve.
:param x_array: array of x values to be extended (must be at least 17 data points in original array)
:param y_array: array of y values to be extended
:param max_dist: maximum distance (ft) associated with unextended x values
:param dist_inc: increment (ft) for each extended data point
:param num_pts_ext: number of points at end of original x,y arrays to be used for extending the curve
:param ln_ln_trans: form of transformation to perform (True: straight ln ln, False: relative ln ln)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
# x_array_in = pd.Series([], dtype='float')
# y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,6.5616,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632,
1003.9232,1010.4832,1017.0432,1023.6032,1030.1632,1036.7232,1043.2832,1049.8432,1056.4032,
1062.9632,1069.5232,1076.0832,1082.6432,1089.2032,1095.7632,1102.3232,1108.8832,1115.4432,
1122.0032,1128.5632,1135.1232,1141.6832,1148.2432,1154.8032,1161.3632,1167.9232,1174.4832,
1181.0432,1187.6032,1194.1632,1200.7232,1207.2832,1213.8432,1220.4032,1226.9632,1233.5232,
1240.0832,1246.6432,1253.2032,1259.7632,1266.3232,1272.8832,1279.4432,1286.0032,1292.5632,
1299.1232,1305.6832,1312.2432,1318.8032,1325.3632,1331.9232,1338.4832,1345.0432,1351.6032,
1358.1632,1364.7232,1371.2832,1377.8432,1384.4032,1390.9632,1397.5232,1404.0832,1410.6432,
1417.2032,1423.7632,1430.3232,1436.8832,1443.4432,1450.0032,1456.5632,1463.1232,1469.6832,
1476.2432,1482.8032,1489.3632,1495.9232,1502.4832,1509.0432,1515.6032,1522.1632,1528.7232,
1535.2832,1541.8432,1548.4032,1554.9632,1561.5232,1568.0832,1574.6432,1581.2032,1587.7632,
1594.3232,1600.8832,1607.4432,1614.0032,1620.5632,1627.1232,1633.6832,1640.2432,1646.8032,
1653.3632,1659.9232,1666.4832,1673.0432,1679.6032,1686.1632,1692.7232,1699.2832,1705.8432,
1712.4032,1718.9632,1725.5232,1732.0832,1738.6432,1745.2032,1751.7632,1758.3232,1764.8832,
1771.4432,1778.0032,1784.5632,1791.1232,1797.6832,1804.2432,1810.8032,1817.3632,1823.9232,
1830.4832,1837.0432,1843.6032,1850.1632,1856.7232,1863.2832,1869.8432,1876.4032,1882.9632,
1889.5232,1896.0832,1902.6432,1909.2032,1915.7632,1922.3232,1928.8832,1935.4432,1942.0032,
1948.5632,1955.1232,1961.6832,1968.2432,1974.8032,1981.3632,1987.9232,1994.4832]
expected_result_y = [0.49997,0.37451,0.29849,0.25004,0.2138,0.19455,0.18448,0.17591,0.1678,0.15421,0.1401,
0.12693,0.11785,0.11144,0.10675,0.099496,0.092323,0.085695,0.079234,0.074253,0.070316,
0.067191,0.064594,0.062337,0.060348,0.058192,0.055224,0.051972,0.049283,0.04757,
0.046226,0.044969,0.043922,0.043027,0.041934,0.040528,0.039018,0.037744,0.036762,
0.035923,0.035071,0.034267,0.033456,0.032629,0.03184,0.031078,0.030363,0.02968,0.029028,
0.028399,0.027788,0.027199,0.026642,0.026124,0.025635,0.02517,0.024719,0.024287,0.023867,
0.023457,0.023061,0.022685,0.022334,0.021998,0.021675,0.02136,0.021055,0.020758,0.020467,
0.020186,0.019919,0.019665,0.019421,0.019184,0.018951,0.018727,0.018514,0.018311,
0.018118,0.017929,0.017745,0.017564,0.017387,0.017214,0.017046,0.016886,0.016732,
0.016587,0.016446,0.016309,0.016174,0.016039,0.015906,0.015777,0.015653,0.015532,
0.015418,0.015308,0.015202,0.015097,0.014991,0.014885,0.014782,0.014683,0.014588,0.0145,
0.014415,0.014334,0.014254,0.014172,0.01409,0.014007,0.013926,0.013846,0.01377,0.013697,
0.013628,0.013559,0.013491,0.013423,0.013354,0.013288,0.013223,0.01316,0.013099,0.01304,
0.012983,0.012926,0.01287,0.012814,0.012758,0.012703,0.012649,0.012597,0.012547,0.012499,
0.01245,0.012402,0.012352,0.012302,0.012254,0.012205,0.012158,0.012113,0.012068,0.012025,
0.011982,0.01194,0.011899,0.011859,0.011819,0.01178,0.011741,1.16941E-02,1.16540E-02,
1.16144E-02,1.15752E-02,1.15363E-02,1.14978E-02,1.14597E-02,1.14219E-02,1.13845E-02,
1.13475E-02,1.13108E-02,1.12744E-02,1.12384E-02,1.12027E-02,1.11674E-02,1.11323E-02,
1.10976E-02,1.10632E-02,1.10291E-02,1.09953E-02,1.09618E-02,1.09286E-02,1.08957E-02,
1.08630E-02,1.08307E-02,1.07986E-02,1.07668E-02,1.07353E-02,1.07040E-02,1.06730E-02,
1.06423E-02,1.06118E-02,1.05816E-02,1.05516E-02,1.05218E-02,1.04923E-02,1.04631E-02,
1.04341E-02,1.04053E-02,1.03767E-02,1.03484E-02,1.03203E-02,1.02924E-02,1.02647E-02,
1.02372E-02,1.02100E-02,1.01829E-02,1.01561E-02,1.01295E-02,1.01031E-02,1.00768E-02,
1.00508E-02,1.00250E-02,9.99932E-03,9.97386E-03,9.94860E-03,9.92351E-03,9.89861E-03,
9.87389E-03,9.84934E-03,9.82498E-03,9.80078E-03,9.77676E-03,9.75291E-03,9.72923E-03,
9.70571E-03,9.68236E-03,9.65916E-03,9.63613E-03,9.61326E-03,9.59055E-03,9.56799E-03,
9.54558E-03,9.52332E-03,9.50122E-03,9.47926E-03,9.45745E-03,9.43578E-03,9.41426E-03,
9.39287E-03,9.37163E-03,9.35053E-03,9.32957E-03,9.30874E-03,9.28804E-03,9.26748E-03,
9.24705E-03,9.22675E-03,9.20657E-03,9.18653E-03,9.16661E-03,9.14682E-03,9.12714E-03,
9.10760E-03,9.08817E-03,9.06886E-03,9.04967E-03,9.03060E-03,9.01164E-03,8.99280E-03,
8.97407E-03,8.95546E-03,8.93696E-03,8.91856E-03,8.90028E-03,8.88210E-03,8.86404E-03,
8.84608E-03,8.82822E-03,8.81047E-03,8.79282E-03,8.77527E-03,8.75782E-03,8.74048E-03,
8.72323E-03,8.70608E-03,8.68903E-03,8.67208E-03,8.65522E-03,8.63845E-03,8.62178E-03,
8.60521E-03,8.58872E-03,8.57233E-03,8.55602E-03,8.53981E-03,8.52368E-03,8.50765E-03,
8.49170E-03,8.47583E-03,8.46005E-03,8.44436E-03,8.42875E-03,8.41323E-03,8.39778E-03,
8.38242E-03,8.36714E-03,8.35194E-03,8.33682E-03,8.32178E-03,8.30682E-03,8.29193E-03,
8.27713E-03,8.26240E-03,8.24774E-03,8.23316E-03,8.21866E-03,8.20422E-03,8.18987E-03,
8.17558E-03,8.16137E-03,8.14722E-03]
expected_result_npts = [305]
max_dist = 997.3632
dist_inc = 6.56
num_pts_ext = 16
ln_ln_trans = True #using the absolute ln ln transformation in this test
agdrift_empty.meters_per_ft = 0.3048
x_array_in = pd.Series([0.,6.5616,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632])
y_array_in = pd.Series([0.49997,0.37451,0.29849,0.25004,0.2138,0.19455,0.18448,0.17591,0.1678,0.15421,0.1401,
0.12693,0.11785,0.11144,0.10675,0.099496,0.092323,0.085695,0.079234,0.074253,0.070316,
0.067191,0.064594,0.062337,0.060348,0.058192,0.055224,0.051972,0.049283,0.04757,
0.046226,0.044969,0.043922,0.043027,0.041934,0.040528,0.039018,0.037744,0.036762,
0.035923,0.035071,0.034267,0.033456,0.032629,0.03184,0.031078,0.030363,0.02968,0.029028,
0.028399,0.027788,0.027199,0.026642,0.026124,0.025635,0.02517,0.024719,0.024287,0.023867,
0.023457,0.023061,0.022685,0.022334,0.021998,0.021675,0.02136,0.021055,0.020758,0.020467,
0.020186,0.019919,0.019665,0.019421,0.019184,0.018951,0.018727,0.018514,0.018311,
0.018118,0.017929,0.017745,0.017564,0.017387,0.017214,0.017046,0.016886,0.016732,
0.016587,0.016446,0.016309,0.016174,0.016039,0.015906,0.015777,0.015653,0.015532,
0.015418,0.015308,0.015202,0.015097,0.014991,0.014885,0.014782,0.014683,0.014588,0.0145,
0.014415,0.014334,0.014254,0.014172,0.01409,0.014007,0.013926,0.013846,0.01377,0.013697,
0.013628,0.013559,0.013491,0.013423,0.013354,0.013288,0.013223,0.01316,0.013099,0.01304,
0.012983,0.012926,0.01287,0.012814,0.012758,0.012703,0.012649,0.012597,0.012547,0.012499,
0.01245,0.012402,0.012352,0.012302,0.012254,0.012205,0.012158,0.012113,0.012068,0.012025,
0.011982,0.01194,0.011899,0.011859,0.011819,0.01178,0.011741])
x_array_out, y_array_out = agdrift_empty.extend_curve_opp(x_array_in, y_array_in, max_dist, dist_inc, num_pts_ext,
ln_ln_trans)
npts_out = [len(y_array_out)]
#
#agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "extend_data.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts[0], npts_out[0]))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_extend_curve(self):
"""
:description extends/extrapolates an x,y array of data points that reflect a ln ln relationship by selecting
a number of points near the end of the x,y arrays and fitting a line to the points
ln ln transforms (two ln ln transforms can by applied; on using the straight natural log of
each selected x,y point and one using a 'relative' value of each of the selected points --
the relative values are calculated by establishing a zero point closest to the selected
points
For AGDRIFT: extends distance vs deposition (fraction of applied) curve to enable model calculations
when area of interest (pond, wetland, terrestrial field) lie partially outside the original
curve (whose extent is 997 feet). The extension is achieved by fitting a line of best fit
to the last 16 points of the original curve. The x,y values representing the last 16 points
are natural log transforms of the distance and deposition values at the 16 points. Two long
transforms are coded here, reflecting the fact that the AGDRIFT model (v2.1.1) uses each of them
under different circumstandes (which I believe is not the intention but is the way the model
functions -- my guess is that one of the transforms was used and then a second one was coded
to increase the degree of conservativeness -- but the code was changed in only one of the two
places where the transformation occurs.
Finally, the AGDRIFT model extends the curve only when necessary (i.e., when it determines that
the area of intereest lies partially beyond the last point of the origanal curve (997 ft). In
this code all the curves are extended out to 1994 ft, which represents the furthest distance that
the downwind edge of an area of concern can be specified. All scenario curves are extended here
because we are running multiple simulations (e.g., monte carlo) and instead of extending the
curves each time a simulation requires it (which may be multiple time for the same scenario
curve) we just do it for all curves up front. There is a case to be made that the
curves should be extended external to this code and simply provide the full curve in the SQLite
database containing the original curve.
:param x_array: array of x values to be extended (must be at least 17 data points in original array)
:param y_array: array of y values to be extended
:param max_dist: maximum distance (ft) associated with unextended x values
:param dist_inc: increment (ft) for each extended data point
:param num_pts_ext: number of points at end of original x,y arrays to be used for extending the curve
:param ln_ln_trans: form of transformation to perform (True: straight ln ln, False: relative ln ln)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
# x_array_in = pd.Series([], dtype='float')
# y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,6.5616,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632,
1003.9232,1010.4832,1017.0432,1023.6032,1030.1632,1036.7232,1043.2832,1049.8432,1056.4032,
1062.9632,1069.5232,1076.0832,1082.6432,1089.2032,1095.7632,1102.3232,1108.8832,1115.4432,
1122.0032,1128.5632,1135.1232,1141.6832,1148.2432,1154.8032,1161.3632,1167.9232,1174.4832,
1181.0432,1187.6032,1194.1632,1200.7232,1207.2832,1213.8432,1220.4032,1226.9632,1233.5232,
1240.0832,1246.6432,1253.2032,1259.7632,1266.3232,1272.8832,1279.4432,1286.0032,1292.5632,
1299.1232,1305.6832,1312.2432,1318.8032,1325.3632,1331.9232,1338.4832,1345.0432,1351.6032,
1358.1632,1364.7232,1371.2832,1377.8432,1384.4032,1390.9632,1397.5232,1404.0832,1410.6432,
1417.2032,1423.7632,1430.3232,1436.8832,1443.4432,1450.0032,1456.5632,1463.1232,1469.6832,
1476.2432,1482.8032,1489.3632,1495.9232,1502.4832,1509.0432,1515.6032,1522.1632,1528.7232,
1535.2832,1541.8432,1548.4032,1554.9632,1561.5232,1568.0832,1574.6432,1581.2032,1587.7632,
1594.3232,1600.8832,1607.4432,1614.0032,1620.5632,1627.1232,1633.6832,1640.2432,1646.8032,
1653.3632,1659.9232,1666.4832,1673.0432,1679.6032,1686.1632,1692.7232,1699.2832,1705.8432,
1712.4032,1718.9632,1725.5232,1732.0832,1738.6432,1745.2032,1751.7632,1758.3232,1764.8832,
1771.4432,1778.0032,1784.5632,1791.1232,1797.6832,1804.2432,1810.8032,1817.3632,1823.9232,
1830.4832,1837.0432,1843.6032,1850.1632,1856.7232,1863.2832,1869.8432,1876.4032,1882.9632,
1889.5232,1896.0832,1902.6432,1909.2032,1915.7632,1922.3232,1928.8832,1935.4432,1942.0032,
1948.5632,1955.1232,1961.6832,1968.2432,1974.8032,1981.3632,1987.9232,1994.4832]
expected_result_y = [0.49997,0.37451,0.29849,0.25004,0.2138,0.19455,0.18448,0.17591,0.1678,0.15421,0.1401,
0.12693,0.11785,0.11144,0.10675,0.099496,0.092323,0.085695,0.079234,0.074253,0.070316,
0.067191,0.064594,0.062337,0.060348,0.058192,0.055224,0.051972,0.049283,0.04757,
0.046226,0.044969,0.043922,0.043027,0.041934,0.040528,0.039018,0.037744,0.036762,
0.035923,0.035071,0.034267,0.033456,0.032629,0.03184,0.031078,0.030363,0.02968,0.029028,
0.028399,0.027788,0.027199,0.026642,0.026124,0.025635,0.02517,0.024719,0.024287,0.023867,
0.023457,0.023061,0.022685,0.022334,0.021998,0.021675,0.02136,0.021055,0.020758,0.020467,
0.020186,0.019919,0.019665,0.019421,0.019184,0.018951,0.018727,0.018514,0.018311,
0.018118,0.017929,0.017745,0.017564,0.017387,0.017214,0.017046,0.016886,0.016732,
0.016587,0.016446,0.016309,0.016174,0.016039,0.015906,0.015777,0.015653,0.015532,
0.015418,0.015308,0.015202,0.015097,0.014991,0.014885,0.014782,0.014683,0.014588,0.0145,
0.014415,0.014334,0.014254,0.014172,0.01409,0.014007,0.013926,0.013846,0.01377,0.013697,
0.013628,0.013559,0.013491,0.013423,0.013354,0.013288,0.013223,0.01316,0.013099,0.01304,
0.012983,0.012926,0.01287,0.012814,0.012758,0.012703,0.012649,0.012597,0.012547,0.012499,
0.01245,0.012402,0.012352,0.012302,0.012254,0.012205,0.012158,0.012113,0.012068,0.012025,
0.011982,0.01194,0.011899,0.011859,0.011819,0.01178,0.011741,0.011695283,0.01165546,
0.011616029,0.011576983,0.011538317,0.011500024,0.011462099,0.011424535,0.011387327,
0.01135047,0.011313958,0.011277785,0.011241946,0.011206437,0.011171253,0.011136388,
0.011101837,0.011067597,0.011033662,0.011000028,0.010966691,0.010933646,0.010900889,
0.010868416,0.010836222,0.010804305,0.01077266,0.010741283,0.01071017,0.010679318,
0.010648723,0.010618382,0.010588291,0.010558447,0.010528846,0.010499485,0.010470361,
0.010441471,0.010412812,0.010384381,0.010356174,0.010328189,0.010300423,0.010272873,
0.010245536,0.01021841,0.010191491,0.010164778,0.010138268,0.010111958,0.010085846,
0.010059928,0.010034204,0.01000867,0.009983324,0.009958164,0.009933188,0.009908393,
0.009883777,0.009859339,0.009835075,0.009810984,0.009787064,0.009763313,0.009739729,
0.00971631,0.009693054,0.00966996,0.009647024,0.009624247,0.009601625,0.009579157,
0.009556841,0.009534676,0.009512659,0.009490791,0.009469067,0.009447488,0.009426051,
0.009404755,0.009383599,0.00936258,0.009341698,0.00932095,0.009300337,0.009279855,
0.009259504,0.009239282,0.009219188,0.009199221,0.009179379,0.009159662,0.009140066,
0.009120593,0.009101239,0.009082005,0.009062888,0.009043888,0.009025004,0.009006234,
0.008987576,0.008969031,0.008950597,0.008932272,0.008914057,0.008895949,0.008877947,
0.008860051,0.00884226,0.008824572,0.008806987,0.008789503,0.00877212,0.008754837,
0.008737652,0.008720565,0.008703575,0.008686681,0.008669882,0.008653177,0.008636566,
0.008620047,0.008603619,0.008587282,0.008571035,0.008554878,0.008538808,0.008522826,
0.008506931,0.008491122,0.008475398,0.008459758,0.008444202,0.008428729,0.008413338,
0.008398029,0.0083828,0.008367652,0.008352583,0.008337592,0.00832268,0.008307845,
0.008293086,0.008278404,0.008263797,0.008249265,0.008234806,0.008220422,0.00820611,
0.00819187,0.008177702,0.008163606]
expected_result_npts = [305]
max_dist = 997.3632
dist_inc = 6.56
num_pts_ext = 15
ln_ln_trans = True
x_array_in = pd.Series([0.,6.5616,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632])
y_array_in = pd.Series([0.49997,0.37451,0.29849,0.25004,0.2138,0.19455,0.18448,0.17591,0.1678,0.15421,0.1401,
0.12693,0.11785,0.11144,0.10675,0.099496,0.092323,0.085695,0.079234,0.074253,0.070316,
0.067191,0.064594,0.062337,0.060348,0.058192,0.055224,0.051972,0.049283,0.04757,
0.046226,0.044969,0.043922,0.043027,0.041934,0.040528,0.039018,0.037744,0.036762,
0.035923,0.035071,0.034267,0.033456,0.032629,0.03184,0.031078,0.030363,0.02968,0.029028,
0.028399,0.027788,0.027199,0.026642,0.026124,0.025635,0.02517,0.024719,0.024287,0.023867,
0.023457,0.023061,0.022685,0.022334,0.021998,0.021675,0.02136,0.021055,0.020758,0.020467,
0.020186,0.019919,0.019665,0.019421,0.019184,0.018951,0.018727,0.018514,0.018311,
0.018118,0.017929,0.017745,0.017564,0.017387,0.017214,0.017046,0.016886,0.016732,
0.016587,0.016446,0.016309,0.016174,0.016039,0.015906,0.015777,0.015653,0.015532,
0.015418,0.015308,0.015202,0.015097,0.014991,0.014885,0.014782,0.014683,0.014588,0.0145,
0.014415,0.014334,0.014254,0.014172,0.01409,0.014007,0.013926,0.013846,0.01377,0.013697,
0.013628,0.013559,0.013491,0.013423,0.013354,0.013288,0.013223,0.01316,0.013099,0.01304,
0.012983,0.012926,0.01287,0.012814,0.012758,0.012703,0.012649,0.012597,0.012547,0.012499,
0.01245,0.012402,0.012352,0.012302,0.012254,0.012205,0.012158,0.012113,0.012068,0.012025,
0.011982,0.01194,0.011899,0.011859,0.011819,0.01178,0.011741])
x_array_out, y_array_out = agdrift_empty.extend_curve(x_array_in, y_array_in, max_dist, dist_inc, num_pts_ext,
ln_ln_trans)
npts_out = [len(y_array_out)]
#
#agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "extend_data.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts[0], npts_out[0]))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_extend_curve1(self):
"""
:description extends/extrapolates an x,y array of data points that reflect a ln ln relationship by selecting
a number of points near the end of the x,y arrays and fitting a line to the points
ln ln transforms (two ln ln transforms can by applied; on using the straight natural log of
each selected x,y point and one using a 'relative' value of each of the selected points --
the relative values are calculated by establishing a zero point closest to the selected
points
For AGDRIFT: extends distance vs deposition (fraction of applied) curve to enable model calculations
when area of interest (pond, wetland, terrestrial field) lie partially outside the original
curve (whose extent is 997 feet). The extension is achieved by fitting a line of best fit
to the last 16 points of the original curve. The x,y values representing the last 16 points
are natural log transforms of the distance and deposition values at the 16 points. Two long
transforms are coded here, reflecting the fact that the AGDRIFT model (v2.1.1) uses each of them
under different circumstandes (which I believe is not the intention but is the way the model
functions -- my guess is that one of the transforms was used and then a second one was coded
to increase the degree of conservativeness -- but the code was changed in only one of the two
places where the transformation occurs.
Finally, the AGDRIFT model extends the curve only when necessary (i.e., when it determines that
the area of intereest lies partially beyond the last point of the origanal curve (997 ft). In
this code all the curves are extended out to 1994 ft, which represents the furthest distance that
the downwind edge of an area of concern can be specified. All scenario curves are extended here
because we are running multiple simulations (e.g., monte carlo) and instead of extending the
curves each time a simulation requires it (which may be multiple time for the same scenario
curve) we just do it for all curves up front. There is a case to be made that the
curves should be extended external to this code and simply provide the full curve in the SQLite
database containing the original curve.
:param x_array: array of x values to be extended (must be at least 17 data points in original array)
:param y_array: array of y values to be extended
:param max_dist: maximum distance (ft) associated with unextended x values
:param dist_inc: increment (ft) for each extended data point
:param num_pts_ext: number of points at end of original x,y arrays to be used for extending the curve
:param ln_ln_trans: form of transformation to perform (True: straight ln ln, False: relative ln ln)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
# x_array_in = pd.Series([], dtype='float')
# y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,6.5616,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632,
1003.9232,1010.4832,1017.0432,1023.6032,1030.1632,1036.7232,1043.2832,1049.8432,1056.4032,
1062.9632,1069.5232,1076.0832,1082.6432,1089.2032,1095.7632,1102.3232,1108.8832,1115.4432,
1122.0032,1128.5632,1135.1232,1141.6832,1148.2432,1154.8032,1161.3632,1167.9232,1174.4832,
1181.0432,1187.6032,1194.1632,1200.7232,1207.2832,1213.8432,1220.4032,1226.9632,1233.5232,
1240.0832,1246.6432,1253.2032,1259.7632,1266.3232,1272.8832,1279.4432,1286.0032,1292.5632,
1299.1232,1305.6832,1312.2432,1318.8032,1325.3632,1331.9232,1338.4832,1345.0432,1351.6032,
1358.1632,1364.7232,1371.2832,1377.8432,1384.4032,1390.9632,1397.5232,1404.0832,1410.6432,
1417.2032,1423.7632,1430.3232,1436.8832,1443.4432,1450.0032,1456.5632,1463.1232,1469.6832,
1476.2432,1482.8032,1489.3632,1495.9232,1502.4832,1509.0432,1515.6032,1522.1632,1528.7232,
1535.2832,1541.8432,1548.4032,1554.9632,1561.5232,1568.0832,1574.6432,1581.2032,1587.7632,
1594.3232,1600.8832,1607.4432,1614.0032,1620.5632,1627.1232,1633.6832,1640.2432,1646.8032,
1653.3632,1659.9232,1666.4832,1673.0432,1679.6032,1686.1632,1692.7232,1699.2832,1705.8432,
1712.4032,1718.9632,1725.5232,1732.0832,1738.6432,1745.2032,1751.7632,1758.3232,1764.8832,
1771.4432,1778.0032,1784.5632,1791.1232,1797.6832,1804.2432,1810.8032,1817.3632,1823.9232,
1830.4832,1837.0432,1843.6032,1850.1632,1856.7232,1863.2832,1869.8432,1876.4032,1882.9632,
1889.5232,1896.0832,1902.6432,1909.2032,1915.7632,1922.3232,1928.8832,1935.4432,1942.0032,
1948.5632,1955.1232,1961.6832,1968.2432,1974.8032,1981.3632,1987.9232,1994.4832]
expected_result_y = [0.49997,0.37451,0.29849,0.25004,0.2138,0.19455,0.18448,0.17591,0.1678,0.15421,0.1401,
0.12693,0.11785,0.11144,0.10675,0.099496,0.092323,0.085695,0.079234,0.074253,0.070316,
0.067191,0.064594,0.062337,0.060348,0.058192,0.055224,0.051972,0.049283,0.04757,
0.046226,0.044969,0.043922,0.043027,0.041934,0.040528,0.039018,0.037744,0.036762,
0.035923,0.035071,0.034267,0.033456,0.032629,0.03184,0.031078,0.030363,0.02968,0.029028,
0.028399,0.027788,0.027199,0.026642,0.026124,0.025635,0.02517,0.024719,0.024287,0.023867,
0.023457,0.023061,0.022685,0.022334,0.021998,0.021675,0.02136,0.021055,0.020758,0.020467,
0.020186,0.019919,0.019665,0.019421,0.019184,0.018951,0.018727,0.018514,0.018311,
0.018118,0.017929,0.017745,0.017564,0.017387,0.017214,0.017046,0.016886,0.016732,
0.016587,0.016446,0.016309,0.016174,0.016039,0.015906,0.015777,0.015653,0.015532,
0.015418,0.015308,0.015202,0.015097,0.014991,0.014885,0.014782,0.014683,0.014588,0.0145,
0.014415,0.014334,0.014254,0.014172,0.01409,0.014007,0.013926,0.013846,0.01377,0.013697,
0.013628,0.013559,0.013491,0.013423,0.013354,0.013288,0.013223,0.01316,0.013099,0.01304,
0.012983,0.012926,0.01287,0.012814,0.012758,0.012703,0.012649,0.012597,0.012547,0.012499,
0.01245,0.012402,0.012352,0.012302,0.012254,0.012205,0.012158,0.012113,0.012068,0.012025,
0.011982,0.01194,0.011899,0.011859,0.011819,0.01178,0.011741,0.011826349,0.011812263,
0.011798955,0.011786343,0.011774359,0.011762944,0.011752047,0.011741623,0.011731633,
0.011722043,0.011712822,0.011703943,0.011695383,0.011687118,0.01167913,0.011671401,
0.011663915,0.011656656,0.011649613,0.011642772,0.011636122,0.011629653,0.011623356,
0.011617221,0.011611241,0.011605408,0.011599715,0.011594155,0.011588724,0.011583413,
0.01157822,0.011573138,0.011568163,0.011563291,0.011558517,0.011553838,0.011549249,
0.011544748,0.011540332,0.011535997,0.01153174,0.011527558,0.01152345,0.011519412,
0.011515442,0.011511538,0.011507698,0.011503919,0.011500201,0.01149654,0.011492935,
0.011489385,0.011485888,0.011482442,0.011479046,0.011475699,0.011472399,0.011469144,
0.011465934,0.011462768,0.011459644,0.011456561,0.011453518,0.011450514,0.011447548,
0.011444619,0.011441727,0.011438869,0.011436047,0.011433258,0.011430502,0.011427778,
0.011425086,0.011422424,0.011419792,0.01141719,0.011414616,0.011412071,0.011409553,
0.011407062,0.011404597,0.011402158,0.011399744,0.011397355,0.01139499,0.01139265,
0.011390332,0.011388037,0.011385765,0.011383515,0.011381286,0.011379078,0.011376891,
0.011374725,0.011372579,0.011370452,0.011368344,0.011366256,0.011364186,0.011362134,
0.011360101,0.011358085,0.011356086,0.011354104,0.01135214,0.011350191,0.011348259,
0.011346343,0.011344443,0.011342558,0.011340688,0.011338834,0.011336994,0.011335168,
0.011333357,0.01133156,0.011329777,0.011328007,0.011326251,0.011324508,0.011322778,
0.011321061,0.011319356,0.011317664,0.011315985,0.011314317,0.011312661,0.011311018,
0.011309385,0.011307764,0.011306155,0.011304557,0.011302969,0.011301393,0.011299827,
0.011298272,0.011296727,0.011295192,0.011293668,0.011292153,0.011290649,0.011289154,
0.011287669,0.011286193,0.011284727,0.011283269,0.011281822,0.011280383,0.011278953,
0.011277532,0.011276119,0.011274716]
expected_result_npts = [305]
max_dist = 997.3632
dist_inc = 6.56
num_pts_ext = 16
ln_ln_trans = False
x_array_in = pd.Series([0.,6.5616,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632])
y_array_in = pd.Series([0.49997,0.37451,0.29849,0.25004,0.2138,0.19455,0.18448,0.17591,0.1678,0.15421,0.1401,
0.12693,0.11785,0.11144,0.10675,0.099496,0.092323,0.085695,0.079234,0.074253,0.070316,
0.067191,0.064594,0.062337,0.060348,0.058192,0.055224,0.051972,0.049283,0.04757,
0.046226,0.044969,0.043922,0.043027,0.041934,0.040528,0.039018,0.037744,0.036762,
0.035923,0.035071,0.034267,0.033456,0.032629,0.03184,0.031078,0.030363,0.02968,0.029028,
0.028399,0.027788,0.027199,0.026642,0.026124,0.025635,0.02517,0.024719,0.024287,0.023867,
0.023457,0.023061,0.022685,0.022334,0.021998,0.021675,0.02136,0.021055,0.020758,0.020467,
0.020186,0.019919,0.019665,0.019421,0.019184,0.018951,0.018727,0.018514,0.018311,
0.018118,0.017929,0.017745,0.017564,0.017387,0.017214,0.017046,0.016886,0.016732,
0.016587,0.016446,0.016309,0.016174,0.016039,0.015906,0.015777,0.015653,0.015532,
0.015418,0.015308,0.015202,0.015097,0.014991,0.014885,0.014782,0.014683,0.014588,0.0145,
0.014415,0.014334,0.014254,0.014172,0.01409,0.014007,0.013926,0.013846,0.01377,0.013697,
0.013628,0.013559,0.013491,0.013423,0.013354,0.013288,0.013223,0.01316,0.013099,0.01304,
0.012983,0.012926,0.01287,0.012814,0.012758,0.012703,0.012649,0.012597,0.012547,0.012499,
0.01245,0.012402,0.012352,0.012302,0.012254,0.012205,0.012158,0.012113,0.012068,0.012025,
0.011982,0.01194,0.011899,0.011859,0.011819,0.01178,0.011741])
x_array_out, y_array_out = agdrift_empty.extend_curve(x_array_in, y_array_in, max_dist, dist_inc, num_pts_ext,
ln_ln_trans)
npts_out = [len(y_array_out)]
#
#agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "extend_data.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts[0], npts_out[0]))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
# unittest will
# 1) call the setup method
# 2) then call every method starting with "test",
# 3) then the teardown method
if __name__ == '__main__':
unittest.main()
#pass
| [
"unittest.main",
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"tabulate.tabulate",
"pandas.Series",
"numpy.testing.assert_equal",
"inspect.currentframe",
"numpy.testing.assert_allclose"
] | [((167075, 167090), 'unittest.main', 'unittest.main', ([], {}), '()\n', (167088, 167090), False, 'import unittest\n'), ((1077, 1091), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1089, 1091), True, 'import pandas as pd\n'), ((2222, 2251), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""object"""'}), "([], dtype='object')\n", (2231, 2251), True, 'import pandas as pd\n'), ((2278, 3411), 'pandas.Series', 'pd.Series', (["['Valid Tier I Aquatic Aerial Scenario',\n 'Valid Tier I Terrestrial Aerial Scenario',\n 'Valid Tier I Aquatic Aerial Scenario',\n 'Valid Tier I Terrestrial Aerial Scenario',\n 'Valid Tier I Aquatic Aerial Scenario',\n 'Valid Tier I Terrestrial Ground Scenario',\n 'Valid Tier I Aquatic Ground Scenario',\n 'Valid Tier I Terrestrial Ground Scenario',\n 'Valid Tier I Aquatic Ground Scenario',\n 'Valid Tier I Terrestrial Airblast Scenario',\n 'Valid Tier I Aquatic Airblast Scenario',\n 'Valid Tier I Terrestrial Airblast Scenario',\n 'Valid Tier I Aquatic Airblast Scenario',\n 'Valid Tier I Terrestrial Airblast Scenario',\n 'Invalid Tier I Aquatic Aerial Scenario',\n 'Invalid Tier I Aquatic Ground Scenario',\n 'Invalid Tier I Aquatic Airblast Scenario',\n 'Invalid Tier I Terrestrial Aerial Scenario',\n 'Valid Tier I Terrestrial Ground Scenario',\n 'Valid Tier I Terrestrial Airblast Scenario',\n 'Invalid scenario ecosystem_type',\n 'Invalid Tier I Aquatic Assessment application method',\n 'Invalid Tier I Terrestrial Assessment application method']"], {'dtype': '"""object"""'}), "(['Valid Tier I Aquatic Aerial Scenario',\n 'Valid Tier I Terrestrial Aerial Scenario',\n 'Valid Tier I Aquatic Aerial Scenario',\n 'Valid Tier I Terrestrial Aerial Scenario',\n 'Valid Tier I Aquatic Aerial Scenario',\n 'Valid Tier I Terrestrial Ground Scenario',\n 'Valid Tier I Aquatic Ground Scenario',\n 'Valid Tier I Terrestrial Ground Scenario',\n 'Valid Tier I Aquatic Ground Scenario',\n 'Valid Tier I Terrestrial Airblast Scenario',\n 'Valid Tier I Aquatic Airblast Scenario',\n 'Valid Tier I Terrestrial Airblast Scenario',\n 'Valid Tier I Aquatic Airblast Scenario',\n 'Valid Tier I Terrestrial Airblast Scenario',\n 'Invalid Tier I Aquatic Aerial Scenario',\n 'Invalid Tier I Aquatic Ground Scenario',\n 'Invalid Tier I Aquatic Airblast Scenario',\n 'Invalid Tier I Terrestrial Aerial Scenario',\n 'Valid Tier I Terrestrial Ground Scenario',\n 'Valid Tier I Terrestrial Airblast Scenario',\n 'Invalid scenario ecosystem_type',\n 'Invalid Tier I Aquatic Assessment application method',\n 'Invalid Tier I Terrestrial Assessment application method'], dtype='object'\n )\n", (2287, 3411), True, 'import pandas as pd\n'), ((11331, 11608), 'pandas.Series', 'pd.Series', (["['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc', 'ground_low_vf',\n 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',\n 'airblast_normal', 'airblast_dense', 'airblast_sparse',\n 'airblast_vineyard', 'airblast_orchard', 'Invalid']"], {'dtype': '"""object"""'}), "(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',\n 'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',\n 'airblast_normal', 'airblast_dense', 'airblast_sparse',\n 'airblast_vineyard', 'airblast_orchard', 'Invalid'], dtype='object')\n", (11340, 11608), True, 'import pandas as pd\n'), ((19086, 19115), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""object"""'}), "([], dtype='object')\n", (19095, 19115), True, 'import pandas as pd\n'), ((19142, 19408), 'pandas.Series', 'pd.Series', (["['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc', 'ground_low_vf',\n 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',\n 'airblast_normal', 'airblast_dense', 'airblast_sparse',\n 'airblast_vineyard', 'airblast_orchard']"], {'dtype': '"""object"""'}), "(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',\n 'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',\n 'airblast_normal', 'airblast_dense', 'airblast_sparse',\n 'airblast_vineyard', 'airblast_orchard'], dtype='object')\n", (19151, 19408), True, 'import pandas as pd\n'), ((21269, 21297), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (21278, 21297), True, 'import pandas as pd\n'), ((24250, 24278), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (24259, 24278), True, 'import pandas as pd\n'), ((28260, 28289), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""object"""'}), "([], dtype='object')\n", (28269, 28289), True, 'import pandas as pd\n'), ((30073, 30129), 'pandas.Series', 'pd.Series', (['[0.0, 1.0, 4.0, 5.0, 6.0, 7.0]'], {'dtype': '"""float"""'}), "([0.0, 1.0, 4.0, 5.0, 6.0, 7.0], dtype='float')\n", (30082, 30129), True, 'import pandas as pd\n'), ((30147, 30209), 'pandas.Series', 'pd.Series', (['[10.0, 11.0, 14.0, 15.0, 16.0, 17.0]'], {'dtype': '"""float"""'}), "([10.0, 11.0, 14.0, 15.0, 16.0, 17.0], dtype='float')\n", (30156, 30209), True, 'import pandas as pd\n'), ((31718, 31781), 'pandas.Series', 'pd.Series', (['[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]'], {'dtype': '"""int"""'}), "([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], dtype='int')\n", (31727, 31781), True, 'import pandas as pd\n'), ((31801, 32938), 'pandas.Series', 'pd.Series', (['[[0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0], [1, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [2, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0], [3, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 18, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6, 19, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 20, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [\n 8, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [9, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [10, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0], [11, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [12, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]'], {'dtype': '"""int"""'}), "([[0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [1, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0], [2, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 17, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 18, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6, 19, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [\n 7, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [8, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [9, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0], [10, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [11, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [12, 25, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype='int')\n", (31810, 32938), True, 'import pandas as pd\n'), ((36230, 36296), 'pandas.Series', 'pd.Series', (['[208.7, 208.7, 100.0, 400.0, 150.0, 0.0]'], {'dtype': '"""float"""'}), "([208.7, 208.7, 100.0, 400.0, 150.0, 0.0], dtype='float')\n", (36239, 36296), True, 'import pandas as pd\n'), ((36319, 36391), 'pandas.Series', 'pd.Series', (['[515.8, 515.8, 1076.39, 269.098, 717.593, 0.0]'], {'dtype': '"""float"""'}), "([515.8, 515.8, 1076.39, 269.098, 717.593, 0.0], dtype='float')\n", (36328, 36391), True, 'import pandas as pd\n'), ((36416, 36477), 'pandas.Series', 'pd.Series', (['[6.56, 0.4921, 7.0, 23.0, 0.0, 0.0]'], {'dtype': '"""float"""'}), "([6.56, 0.4921, 7.0, 23.0, 0.0, 0.0], dtype='float')\n", (36425, 36477), True, 'import pandas as pd\n'), ((40790, 40824), 'pandas.Series', 'pd.Series', (['[0.1538462, 0.5, 240.0]'], {}), '([0.1538462, 0.5, 240.0])\n', (40799, 40824), True, 'import pandas as pd\n'), ((41924, 41962), 'pandas.Series', 'pd.Series', (['[6.5, 31250.0, 375000000.0]'], {}), '([6.5, 31250.0, 375000000.0])\n', (41933, 41962), True, 'import pandas as pd\n'), ((43046, 43084), 'pandas.Series', 'pd.Series', (['[0.1553846, 8.8e-06, 4e-08]'], {}), '([0.1553846, 8.8e-06, 4e-08])\n', (43055, 43084), True, 'import pandas as pd\n'), ((44294, 44337), 'pandas.Series', 'pd.Series', (['[0.01516739, 0.111524, 0.267659]'], {}), '([0.01516739, 0.111524, 0.267659])\n', (44303, 44337), True, 'import pandas as pd\n'), ((45728, 45779), 'pandas.Series', 'pd.Series', (['[2.311455e-05, 0.002209479, 0.002447423]'], {}), '([2.311455e-05, 0.002209479, 0.002447423])\n', (45737, 45779), True, 'import pandas as pd\n'), ((47556, 47601), 'pandas.Series', 'pd.Series', (['[0.02676538, 2.2304486, 44.608973]'], {}), '([0.02676538, 2.2304486, 44.608973])\n', (47565, 47601), True, 'import pandas as pd\n'), ((48847, 48891), 'pandas.Series', 'pd.Series', (['[1.401061, 0.3648362, 0.03362546]'], {}), '([1.401061, 0.3648362, 0.03362546])\n', (48856, 48891), True, 'import pandas as pd\n'), ((50278, 50319), 'pandas.Series', 'pd.Series', (['[70.07119, 18.24654, 22.41823]'], {}), '([70.07119, 18.24654, 22.41823])\n', (50287, 50319), True, 'import pandas as pd\n'), ((51960, 52013), 'pandas.Series', 'pd.Series', (['[1.401063e-05, 3.648369e-06, 3.362552e-07]'], {}), '([1.401063e-05, 3.648369e-06, 3.362552e-07])\n', (51969, 52013), True, 'import pandas as pd\n'), ((53482, 53510), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (53491, 53510), True, 'import pandas as pd\n'), ((53539, 53567), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (53548, 53567), True, 'import pandas as pd\n'), ((53599, 53628), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""object"""'}), "([], dtype='object')\n", (53608, 53628), True, 'import pandas as pd\n'), ((53651, 53679), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (53660, 53679), True, 'import pandas as pd\n'), ((53701, 53729), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (53710, 53729), True, 'import pandas as pd\n'), ((53752, 53780), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (53761, 53780), True, 'import pandas as pd\n'), ((53803, 53831), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (53812, 53831), True, 'import pandas as pd\n'), ((61068, 61096), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (61077, 61096), True, 'import pandas as pd\n'), ((61125, 61153), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (61134, 61153), True, 'import pandas as pd\n'), ((61185, 61214), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""object"""'}), "([], dtype='object')\n", (61194, 61214), True, 'import pandas as pd\n'), ((61237, 61265), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (61246, 61265), True, 'import pandas as pd\n'), ((61287, 61315), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (61296, 61315), True, 'import pandas as pd\n'), ((61338, 61366), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (61347, 61366), True, 'import pandas as pd\n'), ((61389, 61417), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (61398, 61417), True, 'import pandas as pd\n'), ((65009, 65037), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (65018, 65037), True, 'import pandas as pd\n'), ((65066, 65094), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (65075, 65094), True, 'import pandas as pd\n'), ((65126, 65155), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""object"""'}), "([], dtype='object')\n", (65135, 65155), True, 'import pandas as pd\n'), ((65177, 65205), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (65186, 65205), True, 'import pandas as pd\n'), ((65227, 65255), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (65236, 65255), True, 'import pandas as pd\n'), ((65278, 65306), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (65287, 65306), True, 'import pandas as pd\n'), ((65329, 65357), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (65338, 65357), True, 'import pandas as pd\n'), ((69390, 69418), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (69399, 69418), True, 'import pandas as pd\n'), ((69447, 69475), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (69456, 69475), True, 'import pandas as pd\n'), ((69507, 69536), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""object"""'}), "([], dtype='object')\n", (69516, 69536), True, 'import pandas as pd\n'), ((69559, 69587), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (69568, 69587), True, 'import pandas as pd\n'), ((69609, 69637), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (69618, 69637), True, 'import pandas as pd\n'), ((69660, 69688), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (69669, 69688), True, 'import pandas as pd\n'), ((69711, 69739), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (69720, 69739), True, 'import pandas as pd\n'), ((73232, 73260), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (73241, 73260), True, 'import pandas as pd\n'), ((73289, 73317), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (73298, 73317), True, 'import pandas as pd\n'), ((73349, 73378), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""object"""'}), "([], dtype='object')\n", (73358, 73378), True, 'import pandas as pd\n'), ((73401, 73429), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (73410, 73429), True, 'import pandas as pd\n'), ((73451, 73479), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (73460, 73479), True, 'import pandas as pd\n'), ((73502, 73530), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (73511, 73530), True, 'import pandas as pd\n'), ((73553, 73581), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (73562, 73581), True, 'import pandas as pd\n'), ((80603, 80631), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (80612, 80631), True, 'import pandas as pd\n'), ((80660, 80688), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (80669, 80688), True, 'import pandas as pd\n'), ((80711, 80739), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (80720, 80739), True, 'import pandas as pd\n'), ((80761, 80789), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (80770, 80789), True, 'import pandas as pd\n'), ((80812, 80840), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (80821, 80840), True, 'import pandas as pd\n'), ((80863, 80891), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (80872, 80891), True, 'import pandas as pd\n'), ((83921, 83949), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (83930, 83949), True, 'import pandas as pd\n'), ((83978, 84006), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (83987, 84006), True, 'import pandas as pd\n'), ((84029, 84057), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (84038, 84057), True, 'import pandas as pd\n'), ((84079, 84107), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (84088, 84107), True, 'import pandas as pd\n'), ((84130, 84158), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (84139, 84158), True, 'import pandas as pd\n'), ((84181, 84209), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (84190, 84209), True, 'import pandas as pd\n'), ((87671, 87699), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (87680, 87699), True, 'import pandas as pd\n'), ((87728, 87756), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (87737, 87756), True, 'import pandas as pd\n'), ((87779, 87807), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (87788, 87807), True, 'import pandas as pd\n'), ((87829, 87857), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (87838, 87857), True, 'import pandas as pd\n'), ((87880, 87908), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (87889, 87908), True, 'import pandas as pd\n'), ((87931, 87959), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (87940, 87959), True, 'import pandas as pd\n'), ((87993, 88021), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (88002, 88021), True, 'import pandas as pd\n'), ((91715, 91760), 'pandas.Series', 'pd.Series', (['(num_sims * [np.nan])'], {'dtype': '"""float"""'}), "(num_sims * [np.nan], dtype='float')\n", (91724, 91760), True, 'import pandas as pd\n'), ((91802, 91847), 'pandas.Series', 'pd.Series', (['(num_sims * [np.nan])'], {'dtype': '"""float"""'}), "(num_sims * [np.nan], dtype='float')\n", (91811, 91847), True, 'import pandas as pd\n'), ((91888, 91933), 'pandas.Series', 'pd.Series', (['(num_sims * [np.nan])'], {'dtype': '"""float"""'}), "(num_sims * [np.nan], dtype='float')\n", (91897, 91933), True, 'import pandas as pd\n'), ((91980, 92025), 'pandas.Series', 'pd.Series', (['(num_sims * [np.nan])'], {'dtype': '"""float"""'}), "(num_sims * [np.nan], dtype='float')\n", (91989, 92025), True, 'import pandas as pd\n'), ((92074, 92119), 'pandas.Series', 'pd.Series', (['(num_sims * [np.nan])'], {'dtype': '"""float"""'}), "(num_sims * [np.nan], dtype='float')\n", (92083, 92119), True, 'import pandas as pd\n'), ((92138, 92196), 'pandas.Series', 'pd.Series', (['(num_sims * [num_args * [np.nan]])'], {'dtype': '"""float"""'}), "(num_sims * [num_args * [np.nan]], dtype='float')\n", (92147, 92196), True, 'import pandas as pd\n'), ((92221, 92279), 'pandas.Series', 'pd.Series', (['(num_sims * [num_args * [np.nan]])'], {'dtype': '"""float"""'}), "(num_sims * [num_args * [np.nan]], dtype='float')\n", (92230, 92279), True, 'import pandas as pd\n'), ((102548, 102576), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (102557, 102576), True, 'import pandas as pd\n'), ((102605, 102633), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (102614, 102633), True, 'import pandas as pd\n'), ((102761, 102789), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (102770, 102789), True, 'import pandas as pd\n'), ((102812, 102840), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (102821, 102840), True, 'import pandas as pd\n'), ((119869, 119897), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (119878, 119897), True, 'import pandas as pd\n'), ((119926, 119954), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (119935, 119954), True, 'import pandas as pd\n'), ((120082, 120110), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (120091, 120110), True, 'import pandas as pd\n'), ((120133, 120161), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (120142, 120161), True, 'import pandas as pd\n'), ((136780, 136808), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (136789, 136808), True, 'import pandas as pd\n'), ((136837, 136865), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (136846, 136865), True, 'import pandas as pd\n'), ((136993, 137021), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (137002, 137021), True, 'import pandas as pd\n'), ((137044, 137072), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (137053, 137072), True, 'import pandas as pd\n'), ((153567, 153595), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (153576, 153595), True, 'import pandas as pd\n'), ((153624, 153652), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (153633, 153652), True, 'import pandas as pd\n'), ((153780, 153808), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (153789, 153808), True, 'import pandas as pd\n'), ((153831, 153859), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (153840, 153859), True, 'import pandas as pd\n'), ((3748, 4207), 'pandas.Series', 'pd.Series', (["['tier_1_aerial', 'tier_1_aerial', 'tier_1_aerial', 'tier_1_aerial',\n 'tier_1_aerial', 'tier_1_ground', 'tier_1_ground', 'tier_1_ground',\n 'tier_1_ground', 'tier_1_airblast', 'tier_1_airblast',\n 'tier_1_airblast', 'tier_1_airblast', 'tier_1_airblast',\n 'tier_1_aerial', 'tier_1_ground', 'tier_1_airblast', 'tier_1_aerial',\n 'tier_1_ground', 'tier_1_airblast', 'tier_1_aerial', 'Tier II Aerial',\n 'Tier III Aerial']"], {'dtype': '"""object"""'}), "(['tier_1_aerial', 'tier_1_aerial', 'tier_1_aerial',\n 'tier_1_aerial', 'tier_1_aerial', 'tier_1_ground', 'tier_1_ground',\n 'tier_1_ground', 'tier_1_ground', 'tier_1_airblast', 'tier_1_airblast',\n 'tier_1_airblast', 'tier_1_airblast', 'tier_1_airblast',\n 'tier_1_aerial', 'tier_1_ground', 'tier_1_airblast', 'tier_1_aerial',\n 'tier_1_ground', 'tier_1_airblast', 'tier_1_aerial', 'Tier II Aerial',\n 'Tier III Aerial'], dtype='object')\n", (3757, 4207), True, 'import pandas as pd\n'), ((4618, 5229), 'pandas.Series', 'pd.Series', (["['aquatic_assessment', 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment', 'aquatic_assessment',\n 'aquatic_assessment', 'terrestrial_assessment',\n 'terrestrial_assessment', 'terrestrial_assessment', 'Field Assessment',\n 'aquatic_assessment', 'terrestrial_assessment']"], {'dtype': '"""object"""'}), "(['aquatic_assessment', 'terrestrial_assessment',\n 'aquatic_assessment', 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment',\n 'terrestrial_assessment', 'aquatic_assessment', 'aquatic_assessment',\n 'aquatic_assessment', 'terrestrial_assessment',\n 'terrestrial_assessment', 'terrestrial_assessment', 'Field Assessment',\n 'aquatic_assessment', 'terrestrial_assessment'], dtype='object')\n", (4627, 5229), True, 'import pandas as pd\n'), ((5631, 6026), 'pandas.Series', 'pd.Series', (["['epa_defined_pond', 'NaN', 'epa_defined_wetland', 'NaN',\n 'user_defined_pond', 'NaN', 'user_defined_wetland', 'NaN',\n 'epa_defined_wetland', 'NaN', 'user_defined_pond', 'NaN',\n 'user_defined_wetland', 'NaN', 'Defined Pond', 'user_defined_pond',\n 'epa_defined_pond', 'NaN', 'NaN', 'NaN', 'epa_defined_pond',\n 'user_defined_wetland', 'user_defined_pond']"], {'dtype': '"""object"""'}), "(['epa_defined_pond', 'NaN', 'epa_defined_wetland', 'NaN',\n 'user_defined_pond', 'NaN', 'user_defined_wetland', 'NaN',\n 'epa_defined_wetland', 'NaN', 'user_defined_pond', 'NaN',\n 'user_defined_wetland', 'NaN', 'Defined Pond', 'user_defined_pond',\n 'epa_defined_pond', 'NaN', 'NaN', 'NaN', 'epa_defined_pond',\n 'user_defined_wetland', 'user_defined_pond'], dtype='object')\n", (5640, 6026), True, 'import pandas as pd\n'), ((6449, 6890), 'pandas.Series', 'pd.Series', (["['NaN', 'user_defined_terrestrial', 'NaN', 'epa_defined_terrestrial', 'NaN',\n 'user_defined_terrestrial', 'NaN', 'user_defined_terrestrial', 'NaN',\n 'epa_defined_terrestrial', 'NaN', 'user_defined_terrestrial', 'NaN',\n 'user_defined_terrestrial', 'NaN', 'NaN', 'NaN',\n 'user_defined_terrestrial', 'user_defined_terrestrial',\n 'user_defined_terrestrial', 'NaN', 'NaN', 'user_defined_terrestrial']"], {'dtype': '"""object"""'}), "(['NaN', 'user_defined_terrestrial', 'NaN',\n 'epa_defined_terrestrial', 'NaN', 'user_defined_terrestrial', 'NaN',\n 'user_defined_terrestrial', 'NaN', 'epa_defined_terrestrial', 'NaN',\n 'user_defined_terrestrial', 'NaN', 'user_defined_terrestrial', 'NaN',\n 'NaN', 'NaN', 'user_defined_terrestrial', 'user_defined_terrestrial',\n 'user_defined_terrestrial', 'NaN', 'NaN', 'user_defined_terrestrial'],\n dtype='object')\n", (6458, 6890), True, 'import pandas as pd\n'), ((7304, 7646), 'pandas.Series', 'pd.Series', (["['very_fine_to_fine', 'fine_to_medium', 'medium_to_coarse',\n 'coarse_to_very_coarse', 'fine_to_medium', 'NaN', 'NaN', 'NaN', 'NaN',\n 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'medium_to_coarse', 'NaN',\n 'very_fine_to_medium', 'NaN', 'very_fine Indeed', 'NaN',\n 'very_fine_to_medium', 'medium_to_coarse', 'NaN']"], {'dtype': '"""object"""'}), "(['very_fine_to_fine', 'fine_to_medium', 'medium_to_coarse',\n 'coarse_to_very_coarse', 'fine_to_medium', 'NaN', 'NaN', 'NaN', 'NaN',\n 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'medium_to_coarse', 'NaN',\n 'very_fine_to_medium', 'NaN', 'very_fine Indeed', 'NaN',\n 'very_fine_to_medium', 'medium_to_coarse', 'NaN'], dtype='object')\n", (7313, 7646), True, 'import pandas as pd\n'), ((8045, 8349), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'very_fine', 'fine_to_medium-coarse',\n 'very_fine', 'fine_to_medium-coarse', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN',\n 'NaN', 'very_fine', 'NaN', 'fine_to_medium-coarse', 'very_fine', 'NaN',\n 'very_fine_to_medium', 'NaN', 'very_fine']"], {'dtype': '"""object"""'}), "(['NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'very_fine',\n 'fine_to_medium-coarse', 'very_fine', 'fine_to_medium-coarse', 'NaN',\n 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'very_fine', 'NaN',\n 'fine_to_medium-coarse', 'very_fine', 'NaN', 'very_fine_to_medium',\n 'NaN', 'very_fine'], dtype='object')\n", (8054, 8349), True, 'import pandas as pd\n'), ((8743, 8942), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'high', 'low', 'high', 'low', 'NaN',\n 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'high', 'NaN',\n 'NaN', 'NaN', 'NaN']"], {'dtype': '"""object"""'}), "(['NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'high', 'low', 'high', 'low',\n 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'high',\n 'NaN', 'NaN', 'NaN', 'NaN'], dtype='object')\n", (8752, 8942), True, 'import pandas as pd\n'), ((9344, 9562), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'normal',\n 'dense', 'sparse', 'orchard', 'vineyard', 'NaN', 'NaN', 'NaN', 'NaN',\n 'NaN', 'vineyard', 'NaN', 'NaN', 'NaN']"], {'dtype': '"""object"""'}), "(['NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN',\n 'normal', 'dense', 'sparse', 'orchard', 'vineyard', 'NaN', 'NaN', 'NaN',\n 'NaN', 'NaN', 'vineyard', 'NaN', 'NaN', 'NaN'], dtype='object')\n", (9353, 9562), True, 'import pandas as pd\n'), ((10066, 10139), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['result', 'expected_result'], {'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, err_msg='', verbose=True)\n", (10088, 10139), True, 'import numpy.testing as npt\n'), ((12205, 12581), 'pandas.Series', 'pd.Series', (["['Valid Tier I Aerial', 'Valid Tier I Aerial', 'Valid Tier I Aerial',\n 'Valid Tier I Aerial', 'Valid Tier I Ground', 'Valid Tier I Ground',\n 'Valid Tier I Ground', 'Valid Tier I Ground', 'Valid Tier I Airblast',\n 'Valid Tier I Airblast', 'Valid Tier I Airblast',\n 'Valid Tier I Airblast', 'Valid Tier I Airblast', 'Invalid Scenario']"], {'dtype': '"""object"""'}), "(['Valid Tier I Aerial', 'Valid Tier I Aerial',\n 'Valid Tier I Aerial', 'Valid Tier I Aerial', 'Valid Tier I Ground',\n 'Valid Tier I Ground', 'Valid Tier I Ground', 'Valid Tier I Ground',\n 'Valid Tier I Airblast', 'Valid Tier I Airblast',\n 'Valid Tier I Airblast', 'Valid Tier I Airblast',\n 'Valid Tier I Airblast', 'Invalid Scenario'], dtype='object')\n", (12214, 12581), True, 'import pandas as pd\n'), ((13364, 13655), 'pandas.Series', 'pd.Series', (["['tier_1_aerial', 'tier_1_aerial', 'tier_1_aerial', 'tier_1_aerial',\n 'tier_1_ground', 'tier_1_ground', 'tier_1_ground', 'tier_1_ground',\n 'tier_1_airblast', 'tier_1_airblast', 'tier_1_airblast',\n 'tier_1_airblast', 'tier_1_airblast', 'tier_1_aerial']"], {'dtype': '"""object"""'}), "(['tier_1_aerial', 'tier_1_aerial', 'tier_1_aerial',\n 'tier_1_aerial', 'tier_1_ground', 'tier_1_ground', 'tier_1_ground',\n 'tier_1_ground', 'tier_1_airblast', 'tier_1_airblast',\n 'tier_1_airblast', 'tier_1_airblast', 'tier_1_airblast',\n 'tier_1_aerial'], dtype='object')\n", (13373, 13655), True, 'import pandas as pd\n'), ((14439, 14628), 'pandas.Series', 'pd.Series', (["['very_fine_to_fine', 'fine_to_medium', 'medium_to_coarse',\n 'coarse_to_very_coarse', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN',\n 'NaN', 'NaN', 'NaN', 'NaN']"], {'dtype': '"""object"""'}), "(['very_fine_to_fine', 'fine_to_medium', 'medium_to_coarse',\n 'coarse_to_very_coarse', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN',\n 'NaN', 'NaN', 'NaN', 'NaN'], dtype='object')\n", (14448, 14628), True, 'import pandas as pd\n'), ((15303, 15484), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 'NaN', 'NaN', 'very_fine', 'fine_to_medium-coarse',\n 'very_fine', 'fine_to_medium-coarse', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN',\n 'NaN']"], {'dtype': '"""object"""'}), "(['NaN', 'NaN', 'NaN', 'NaN', 'very_fine', 'fine_to_medium-coarse',\n 'very_fine', 'fine_to_medium-coarse', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN',\n 'NaN'], dtype='object')\n", (15312, 15484), True, 'import pandas as pd\n'), ((16154, 16285), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 'NaN', 'NaN', 'low', 'low', 'high', 'high', 'NaN', 'NaN',\n 'NaN', 'NaN', 'NaN', 'NaN']"], {'dtype': '"""object"""'}), "(['NaN', 'NaN', 'NaN', 'NaN', 'low', 'low', 'high', 'high', 'NaN',\n 'NaN', 'NaN', 'NaN', 'NaN', 'NaN'], dtype='object')\n", (16163, 16285), True, 'import pandas as pd\n'), ((16987, 17133), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'normal', 'dense',\n 'sparse', 'vineyard', 'orchard', 'NaN']"], {'dtype': '"""object"""'}), "(['NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'normal',\n 'dense', 'sparse', 'vineyard', 'orchard', 'NaN'], dtype='object')\n", (16996, 17133), True, 'import pandas as pd\n'), ((17935, 18008), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['result', 'expected_result'], {'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, err_msg='', verbose=True)\n", (17957, 18008), True, 'import numpy.testing as npt\n'), ((19600, 19865), 'pandas.Series', 'pd.Series', (["['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc', 'ground_low_vf',\n 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',\n 'airblast_normal', 'airblast_dense', 'airblast_sparse',\n 'airblast_vineyard', 'airblast_orchard', 'distance_ft']"], {}), "(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',\n 'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',\n 'airblast_normal', 'airblast_dense', 'airblast_sparse',\n 'airblast_vineyard', 'airblast_orchard', 'distance_ft'])\n", (19609, 19865), True, 'import pandas as pd\n'), ((20162, 20235), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['result', 'expected_result'], {'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, err_msg='', verbose=True)\n", (20184, 20235), True, 'import numpy.testing as npt\n'), ((23282, 23376), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (23301, 23376), True, 'import numpy.testing as npt\n'), ((27148, 27242), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (27167, 27242), True, 'import numpy.testing as npt\n'), ((28767, 28840), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['result', 'expected_result'], {'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, err_msg='', verbose=True)\n", (28789, 28840), True, 'import numpy.testing as npt\n'), ((30232, 30298), 'pandas.Series', 'pd.Series', (['[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]'], {'dtype': '"""float"""'}), "([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], dtype='float')\n", (30241, 30298), True, 'import pandas as pd\n'), ((30303, 30379), 'pandas.Series', 'pd.Series', (["[10.0, 11.0, 'nan', 'nan', 14.0, 15.0, 16.0, 17.0]"], {'dtype': '"""float"""'}), "([10.0, 11.0, 'nan', 'nan', 14.0, 15.0, 16.0, 17.0], dtype='float')\n", (30312, 30379), True, 'import pandas as pd\n'), ((30504, 30602), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result_x', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result_x, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (30523, 30602), True, 'import numpy.testing as npt\n'), ((30610, 30708), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result_y', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result_y, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (30629, 30708), True, 'import numpy.testing as npt\n'), ((33118, 33384), 'pandas.Series', 'pd.Series', (["['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc', 'ground_low_vf',\n 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',\n 'airblast_normal', 'airblast_dense', 'airblast_sparse',\n 'airblast_vineyard', 'airblast_orchard']"], {'dtype': '"""object"""'}), "(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',\n 'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',\n 'airblast_normal', 'airblast_dense', 'airblast_sparse',\n 'airblast_vineyard', 'airblast_orchard'], dtype='object')\n", (33127, 33384), True, 'import pandas as pd\n'), ((33550, 34059), 'pandas.Series', 'pd.Series', (["['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc', 'ground_low_vf',\n 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',\n 'airblast_normal', 'airblast_dense', 'airblast_sparse',\n 'airblast_vineyard', 'airblast_orchard', 'aerial_vf2f', 'aerial_f2m',\n 'aerial_m2c', 'aerial_c2vc', 'ground_low_vf', 'ground_low_fmc',\n 'ground_high_vf', 'ground_high_fmc', 'airblast_normal',\n 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',\n 'airblast_orchard']"], {'dtype': '"""object"""'}), "(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',\n 'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',\n 'airblast_normal', 'airblast_dense', 'airblast_sparse',\n 'airblast_vineyard', 'airblast_orchard', 'aerial_vf2f', 'aerial_f2m',\n 'aerial_m2c', 'aerial_c2vc', 'ground_low_vf', 'ground_low_fmc',\n 'ground_high_vf', 'ground_high_fmc', 'airblast_normal',\n 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',\n 'airblast_orchard'], dtype='object')\n", (33559, 34059), True, 'import pandas as pd\n'), ((34550, 34638), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['result_num_sims', 'expected_num_sims'], {'err_msg': '""""""', 'verbose': '(True)'}), "(result_num_sims, expected_num_sims, err_msg='',\n verbose=True)\n", (34572, 34638), True, 'import numpy.testing as npt\n'), ((34647, 34741), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['result_sim_indices', 'expected_sim_indices'], {'err_msg': '""""""', 'verbose': '(True)'}), "(result_sim_indices, expected_sim_indices, err_msg='',\n verbose=True)\n", (34669, 34741), True, 'import numpy.testing as npt\n'), ((36531, 36706), 'pandas.Series', 'pd.Series', (["['aquatic_assessment', 'aquatic_assessment', 'aquatic_assessment',\n 'aquatic_assessment', 'terrestrial_assessment', 'terrestrial_assessment']"], {'dtype': '"""object"""'}), "(['aquatic_assessment', 'aquatic_assessment', 'aquatic_assessment',\n 'aquatic_assessment', 'terrestrial_assessment',\n 'terrestrial_assessment'], dtype='object')\n", (36540, 36706), True, 'import pandas as pd\n'), ((37015, 37148), 'pandas.Series', 'pd.Series', (["['epa_defined_pond', 'epa_defined_wetland', 'user_defined_pond',\n 'user_defined_wetland', 'NaN', 'NaN']"], {'dtype': '"""object"""'}), "(['epa_defined_pond', 'epa_defined_wetland', 'user_defined_pond',\n 'user_defined_wetland', 'NaN', 'NaN'], dtype='object')\n", (37024, 37148), True, 'import pandas as pd\n'), ((37481, 37595), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 'NaN', 'NaN', 'user_defined_terrestrial',\n 'epa_defined_terrestrial']"], {'dtype': '"""object"""'}), "(['NaN', 'NaN', 'NaN', 'NaN', 'user_defined_terrestrial',\n 'epa_defined_terrestrial'], dtype='object')\n", (37490, 37595), True, 'import pandas as pd\n'), ((38218, 38286), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 100.0, 'NaN', 'NaN', 'NaN']"], {'dtype': '"""float"""'}), "(['NaN', 'NaN', 100.0, 'NaN', 'NaN', 'NaN'], dtype='float')\n", (38227, 38286), True, 'import pandas as pd\n'), ((38330, 38396), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 7.0, 'NaN', 'NaN', 'NaN']"], {'dtype': '"""float"""'}), "(['NaN', 'NaN', 7.0, 'NaN', 'NaN', 'NaN'], dtype='float')\n", (38339, 38396), True, 'import pandas as pd\n'), ((38443, 38511), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 'NaN', 400.0, 'NaN', 'NaN']"], {'dtype': '"""float"""'}), "(['NaN', 'NaN', 'NaN', 400.0, 'NaN', 'NaN'], dtype='float')\n", (38452, 38511), True, 'import pandas as pd\n'), ((38558, 38625), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 'NaN', 23.0, 'NaN', 'NaN']"], {'dtype': '"""float"""'}), "(['NaN', 'NaN', 'NaN', 23.0, 'NaN', 'NaN'], dtype='float')\n", (38567, 38625), True, 'import pandas as pd\n'), ((38675, 38743), 'pandas.Series', 'pd.Series', (["['NaN', 'NaN', 'NaN', 'NaN', 150.0, 'NaN']"], {'dtype': '"""float"""'}), "(['NaN', 'NaN', 'NaN', 'NaN', 150.0, 'NaN'], dtype='float')\n", (38684, 38743), True, 'import pandas as pd\n'), ((38771, 38822), 'pandas.Series', 'pd.Series', (["(num_simulations * ['NaN'])"], {'dtype': '"""float"""'}), "(num_simulations * ['NaN'], dtype='float')\n", (38780, 38822), True, 'import pandas as pd\n'), ((38851, 38902), 'pandas.Series', 'pd.Series', (["(num_simulations * ['NaN'])"], {'dtype': '"""float"""'}), "(num_simulations * ['NaN'], dtype='float')\n", (38860, 38902), True, 'import pandas as pd\n'), ((38930, 38981), 'pandas.Series', 'pd.Series', (["(num_simulations * ['NaN'])"], {'dtype': '"""float"""'}), "(num_simulations * ['NaN'], dtype='float')\n", (38939, 38981), True, 'import pandas as pd\n'), ((39025, 39076), 'pandas.Series', 'pd.Series', (["(num_simulations * ['nan'])"], {'dtype': '"""float"""'}), "(num_simulations * ['nan'], dtype='float')\n", (39034, 39076), True, 'import pandas as pd\n'), ((39121, 39172), 'pandas.Series', 'pd.Series', (["(num_simulations * ['nan'])"], {'dtype': '"""float"""'}), "(num_simulations * ['nan'], dtype='float')\n", (39130, 39172), True, 'import pandas as pd\n'), ((39216, 39267), 'pandas.Series', 'pd.Series', (["(num_simulations * ['nan'])"], {'dtype': '"""float"""'}), "(num_simulations * ['nan'], dtype='float')\n", (39225, 39267), True, 'import pandas as pd\n'), ((39492, 39591), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['width_result', 'expected_width'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(width_result, expected_width, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (39511, 39591), True, 'import numpy.testing as npt\n'), ((39599, 39700), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['length_result', 'expected_length'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(length_result, expected_length, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (39618, 39700), True, 'import numpy.testing as npt\n'), ((39708, 39807), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['depth_result', 'expected_depth'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(depth_result, expected_depth, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (39727, 39807), True, 'import numpy.testing as npt\n'), ((40871, 40919), 'pandas.Series', 'pd.Series', (['[1.0, 125.0, 300000.0]'], {'dtype': '"""float"""'}), "([1.0, 125.0, 300000.0], dtype='float')\n", (40880, 40919), True, 'import pandas as pd\n'), ((40946, 40992), 'pandas.Series', 'pd.Series', (['[6.5, 250.0, 1250.0]'], {'dtype': '"""float"""'}), "([6.5, 250.0, 1250.0], dtype='float')\n", (40955, 40992), True, 'import pandas as pd\n'), ((41096, 41190), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (41115, 41190), True, 'import numpy.testing as npt\n'), ((41998, 42046), 'pandas.Series', 'pd.Series', (['[1.0, 125.0, 300000.0]'], {'dtype': '"""float"""'}), "([1.0, 125.0, 300000.0], dtype='float')\n", (42007, 42046), True, 'import pandas as pd\n'), ((42069, 42115), 'pandas.Series', 'pd.Series', (['[6.5, 250.0, 1250.0]'], {'dtype': '"""float"""'}), "([6.5, 250.0, 1250.0], dtype='float')\n", (42078, 42115), True, 'import pandas as pd\n'), ((42209, 42303), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (42228, 42303), True, 'import numpy.testing as npt\n'), ((43130, 43177), 'pandas.Series', 'pd.Series', (['[1.01, 0.0022, 5e-05]'], {'dtype': '"""float"""'}), "([1.01, 0.0022, 5e-05], dtype='float')\n", (43139, 43177), True, 'import pandas as pd\n'), ((43211, 43257), 'pandas.Series', 'pd.Series', (['[6.5, 250.0, 1250.0]'], {'dtype': '"""float"""'}), "([6.5, 250.0, 1250.0], dtype='float')\n", (43220, 43257), True, 'import pandas as pd\n'), ((43361, 43455), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (43380, 43455), True, 'import numpy.testing as npt\n'), ((44378, 44424), 'pandas.Series', 'pd.Series', (['[17.0, 125.0, 300.0]'], {'dtype': '"""float"""'}), "([17.0, 125.0, 300.0], dtype='float')\n", (44387, 44424), True, 'import pandas as pd\n'), ((44607, 44701), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (44626, 44701), True, 'import numpy.testing as npt\n'), ((45828, 45874), 'pandas.Series', 'pd.Series', (['[17.0, 125.0, 300.0]'], {'dtype': '"""float"""'}), "([17.0, 125.0, 300.0], dtype='float')\n", (45837, 45874), True, 'import pandas as pd\n'), ((45896, 45942), 'pandas.Series', 'pd.Series', (['[50.0, 200.0, 500.0]'], {'dtype': '"""float"""'}), "([50.0, 200.0, 500.0], dtype='float')\n", (45905, 45942), True, 'import pandas as pd\n'), ((45966, 46014), 'pandas.Series', 'pd.Series', (['[6331.0, 538.0, 215.0]'], {'dtype': '"""float"""'}), "([6331.0, 538.0, 215.0], dtype='float')\n", (45975, 46014), True, 'import pandas as pd\n'), ((46037, 46078), 'pandas.Series', 'pd.Series', (['[0.5, 6.5, 3.0]'], {'dtype': '"""float"""'}), "([0.5, 6.5, 3.0], dtype='float')\n", (46046, 46078), True, 'import pandas as pd\n'), ((46534, 46628), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (46553, 46628), True, 'import numpy.testing as npt\n'), ((47652, 47683), 'pandas.Series', 'pd.Series', (['[0.0003, 0.025, 0.5]'], {}), '([0.0003, 0.025, 0.5])\n', (47661, 47683), True, 'import pandas as pd\n'), ((47971, 48065), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (47990, 48065), True, 'import numpy.testing as npt\n'), ((48933, 48986), 'pandas.Series', 'pd.Series', (['[0.00125, 0.0003255, 3e-05]'], {'dtype': '"""float"""'}), "([0.00125, 0.0003255, 3e-05], dtype='float')\n", (48942, 48986), True, 'import pandas as pd\n'), ((49163, 49257), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (49182, 49257), True, 'import numpy.testing as npt\n'), ((50361, 50414), 'pandas.Series', 'pd.Series', (['[0.00125, 0.0003255, 3e-05]'], {'dtype': '"""float"""'}), "([0.00125, 0.0003255, 3e-05], dtype='float')\n", (50370, 50414), True, 'import pandas as pd\n'), ((50436, 50482), 'pandas.Series', 'pd.Series', (['[6.56, 208.7, 997.0]'], {'dtype': '"""float"""'}), "([6.56, 208.7, 997.0], dtype='float')\n", (50445, 50482), True, 'import pandas as pd\n'), ((50508, 50564), 'pandas.Series', 'pd.Series', (['[16408.38, 515.7595, 107.9629]'], {'dtype': '"""float"""'}), "([16408.38, 515.7595, 107.9629], dtype='float')\n", (50517, 50564), True, 'import pandas as pd\n'), ((50592, 50638), 'pandas.Series', 'pd.Series', (['[6.56, 6.56, 0.4921]'], {'dtype': '"""float"""'}), "([6.56, 6.56, 0.4921], dtype='float')\n", (50601, 50638), True, 'import pandas as pd\n'), ((50955, 51049), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (50974, 51049), True, 'import numpy.testing as npt\n'), ((52052, 52105), 'pandas.Series', 'pd.Series', (['[0.00125, 0.0003255, 3e-05]'], {'dtype': '"""float"""'}), "([0.00125, 0.0003255, 3e-05], dtype='float')\n", (52061, 52105), True, 'import pandas as pd\n'), ((52377, 52471), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result', 'expected_result'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result, expected_result, rtol=1e-05, atol=0, err_msg='',\n verbose=True)\n", (52396, 52471), True, 'import numpy.testing as npt\n'), ((59138, 59206), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (59160, 59206), True, 'import numpy.testing as npt\n'), ((59219, 59320), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (59238, 59320), True, 'import numpy.testing as npt\n'), ((59328, 59429), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (59347, 59429), True, 'import numpy.testing as npt\n'), ((63181, 63249), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (63203, 63249), True, 'import numpy.testing as npt\n'), ((63262, 63363), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (63281, 63363), True, 'import numpy.testing as npt\n'), ((63371, 63472), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (63390, 63472), True, 'import numpy.testing as npt\n'), ((67195, 67263), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (67217, 67263), True, 'import numpy.testing as npt\n'), ((67276, 67377), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (67295, 67377), True, 'import numpy.testing as npt\n'), ((67385, 67486), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (67404, 67486), True, 'import numpy.testing as npt\n'), ((71501, 71569), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (71523, 71569), True, 'import numpy.testing as npt\n'), ((71582, 71683), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (71601, 71683), True, 'import numpy.testing as npt\n'), ((71691, 71792), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (71710, 71792), True, 'import numpy.testing as npt\n'), ((78886, 78975), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_x_dist_of_interest', 'x_dist_of_interest'], {'verbose': '(True)'}), '(expected_x_dist_of_interest, x_dist_of_interest,\n verbose=True)\n', (78908, 78975), True, 'import numpy.testing as npt\n'), ((78984, 79052), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (79006, 79052), True, 'import numpy.testing as npt\n'), ((79065, 79166), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (79084, 79166), True, 'import numpy.testing as npt\n'), ((79174, 79275), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (79193, 79275), True, 'import numpy.testing as npt\n'), ((82214, 82303), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_x_dist_of_interest', 'x_dist_of_interest'], {'verbose': '(True)'}), '(expected_x_dist_of_interest, x_dist_of_interest,\n verbose=True)\n', (82236, 82303), True, 'import numpy.testing as npt\n'), ((82312, 82380), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (82334, 82380), True, 'import numpy.testing as npt\n'), ((82393, 82494), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (82412, 82494), True, 'import numpy.testing as npt\n'), ((82502, 82603), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (82521, 82603), True, 'import numpy.testing as npt\n'), ((85966, 86055), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_x_dist_of_interest', 'x_dist_of_interest'], {'verbose': '(True)'}), '(expected_x_dist_of_interest, x_dist_of_interest,\n verbose=True)\n', (85988, 86055), True, 'import numpy.testing as npt\n'), ((86064, 86132), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (86086, 86132), True, 'import numpy.testing as npt\n'), ((86145, 86246), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (86164, 86246), True, 'import numpy.testing as npt\n'), ((86254, 86355), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (86273, 86355), True, 'import numpy.testing as npt\n'), ((89724, 89813), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_x_dist_of_interest', 'x_dist_of_interest'], {'verbose': '(True)'}), '(expected_x_dist_of_interest, x_dist_of_interest,\n verbose=True)\n', (89746, 89813), True, 'import numpy.testing as npt\n'), ((89823, 89891), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (89845, 89891), True, 'import numpy.testing as npt\n'), ((89905, 90006), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (89924, 90006), True, 'import numpy.testing as npt\n'), ((90014, 90115), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (90033, 90115), True, 'import numpy.testing as npt\n'), ((92629, 92684), 'pandas.Series', 'pd.Series', (['[1.2567, 0.000355, 3.454e-05]'], {'dtype': '"""float"""'}), "([1.2567, 0.000355, 3.454e-05], dtype='float')\n", (92638, 92684), True, 'import pandas as pd\n'), ((92707, 92762), 'pandas.Series', 'pd.Series', (['[1.2567, 0.000355, 3.454e-05]'], {'dtype': '"""float"""'}), "([1.2567, 0.000355, 3.454e-05], dtype='float')\n", (92716, 92762), True, 'import pandas as pd\n'), ((92785, 92840), 'pandas.Series', 'pd.Series', (['[1.2567, 0.000355, 3.454e-05]'], {'dtype': '"""float"""'}), "([1.2567, 0.000355, 3.454e-05], dtype='float')\n", (92794, 92840), True, 'import pandas as pd\n'), ((92869, 92924), 'pandas.Series', 'pd.Series', (['[1.2567, 0.000355, 3.454e-05]'], {'dtype': '"""float"""'}), "([1.2567, 0.000355, 3.454e-05], dtype='float')\n", (92878, 92924), True, 'import pandas as pd\n'), ((92955, 93010), 'pandas.Series', 'pd.Series', (['[1.2567, 0.000355, 3.454e-05]'], {'dtype': '"""float"""'}), "([1.2567, 0.000355, 3.454e-05], dtype='float')\n", (92964, 93010), True, 'import pandas as pd\n'), ((93614, 93714), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result[0]', 'expected_result[0]'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result[0], expected_result[0], rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (93633, 93714), True, 'import numpy.testing as npt\n'), ((93722, 93822), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result[1]', 'expected_result[1]'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result[1], expected_result[1], rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (93741, 93822), True, 'import numpy.testing as npt\n'), ((93830, 93930), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['result[2]', 'expected_result[2]'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(result[2], expected_result[2], rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (93849, 93930), True, 'import numpy.testing as npt\n'), ((98825, 98880), 'numpy.testing.assert_equal', 'npt.assert_equal', (['expected_result', 'result'], {'verbose': '(True)'}), '(expected_result, result, verbose=True)\n', (98841, 98880), True, 'import numpy.testing as npt\n'), ((111623, 113210), 'pandas.Series', 'pd.Series', (['[0.0, 6.5616, 13.1232, 19.6848, 26.2464, 32.808, 39.3696, 45.9312, 52.4928,\n 59.0544, 65.616, 72.1776, 78.7392, 85.3008, 91.8624, 98.424, 104.9856, \n 111.5472, 118.1088, 124.6704, 131.232, 137.7936, 144.3552, 150.9168, \n 157.4784, 164.04, 170.6016, 177.1632, 183.7248, 190.2864, 196.848, \n 203.4096, 209.9712, 216.5328, 223.0944, 229.656, 236.2176, 242.7792, \n 249.3408, 255.9024, 262.464, 269.0256, 275.5872, 282.1488, 288.7104, \n 295.272, 301.8336, 308.3952, 314.9568, 321.5184, 328.08, 334.6416, \n 341.2032, 347.7648, 354.3264, 360.888, 367.4496, 374.0112, 380.5728, \n 387.1344, 393.696, 400.2576, 406.8192, 413.3808, 419.9424, 426.504, \n 433.0656, 439.6272, 446.1888, 452.7504, 459.312, 465.8736, 472.4352, \n 478.9968, 485.5584, 492.12, 498.6816, 505.2432, 511.8048, 518.3664, \n 524.928, 531.4896, 538.0512, 544.6128, 551.1744, 557.736, 564.2976, \n 570.8592, 577.4208, 583.9824, 590.544, 597.1056, 603.6672, 610.2288, \n 616.7904, 623.352, 629.9136, 636.4752, 643.0368, 649.5984, 656.16, \n 662.7216, 669.2832, 675.8448, 682.4064, 688.968, 695.5296, 702.0912, \n 708.6528, 715.2144, 721.776, 728.3376, 734.8992, 741.4608, 748.0224, \n 754.584, 761.1456, 767.7072, 774.2688, 780.8304, 787.392, 793.9536, \n 800.5152, 807.0768, 813.6384, 820.2, 826.7616, 833.3232, 839.8848, \n 846.4464, 853.008, 859.5696, 866.1312, 872.6928, 879.2544, 885.816, \n 892.3776, 898.9392, 905.5008, 912.0624, 918.624, 925.1856, 931.7472, \n 938.3088, 944.8704, 951.432, 957.9936, 964.5552, 971.1168, 977.6784, \n 984.24, 990.8016, 997.3632]'], {}), '([0.0, 6.5616, 13.1232, 19.6848, 26.2464, 32.808, 39.3696, 45.9312,\n 52.4928, 59.0544, 65.616, 72.1776, 78.7392, 85.3008, 91.8624, 98.424, \n 104.9856, 111.5472, 118.1088, 124.6704, 131.232, 137.7936, 144.3552, \n 150.9168, 157.4784, 164.04, 170.6016, 177.1632, 183.7248, 190.2864, \n 196.848, 203.4096, 209.9712, 216.5328, 223.0944, 229.656, 236.2176, \n 242.7792, 249.3408, 255.9024, 262.464, 269.0256, 275.5872, 282.1488, \n 288.7104, 295.272, 301.8336, 308.3952, 314.9568, 321.5184, 328.08, \n 334.6416, 341.2032, 347.7648, 354.3264, 360.888, 367.4496, 374.0112, \n 380.5728, 387.1344, 393.696, 400.2576, 406.8192, 413.3808, 419.9424, \n 426.504, 433.0656, 439.6272, 446.1888, 452.7504, 459.312, 465.8736, \n 472.4352, 478.9968, 485.5584, 492.12, 498.6816, 505.2432, 511.8048, \n 518.3664, 524.928, 531.4896, 538.0512, 544.6128, 551.1744, 557.736, \n 564.2976, 570.8592, 577.4208, 583.9824, 590.544, 597.1056, 603.6672, \n 610.2288, 616.7904, 623.352, 629.9136, 636.4752, 643.0368, 649.5984, \n 656.16, 662.7216, 669.2832, 675.8448, 682.4064, 688.968, 695.5296, \n 702.0912, 708.6528, 715.2144, 721.776, 728.3376, 734.8992, 741.4608, \n 748.0224, 754.584, 761.1456, 767.7072, 774.2688, 780.8304, 787.392, \n 793.9536, 800.5152, 807.0768, 813.6384, 820.2, 826.7616, 833.3232, \n 839.8848, 846.4464, 853.008, 859.5696, 866.1312, 872.6928, 879.2544, \n 885.816, 892.3776, 898.9392, 905.5008, 912.0624, 918.624, 925.1856, \n 931.7472, 938.3088, 944.8704, 951.432, 957.9936, 964.5552, 971.1168, \n 977.6784, 984.24, 990.8016, 997.3632])\n', (111632, 113210), True, 'import pandas as pd\n'), ((113329, 114942), 'pandas.Series', 'pd.Series', (['[0.49997, 0.37451, 0.29849, 0.25004, 0.2138, 0.19455, 0.18448, 0.17591, \n 0.1678, 0.15421, 0.1401, 0.12693, 0.11785, 0.11144, 0.10675, 0.099496, \n 0.092323, 0.085695, 0.079234, 0.074253, 0.070316, 0.067191, 0.064594, \n 0.062337, 0.060348, 0.058192, 0.055224, 0.051972, 0.049283, 0.04757, \n 0.046226, 0.044969, 0.043922, 0.043027, 0.041934, 0.040528, 0.039018, \n 0.037744, 0.036762, 0.035923, 0.035071, 0.034267, 0.033456, 0.032629, \n 0.03184, 0.031078, 0.030363, 0.02968, 0.029028, 0.028399, 0.027788, \n 0.027199, 0.026642, 0.026124, 0.025635, 0.02517, 0.024719, 0.024287, \n 0.023867, 0.023457, 0.023061, 0.022685, 0.022334, 0.021998, 0.021675, \n 0.02136, 0.021055, 0.020758, 0.020467, 0.020186, 0.019919, 0.019665, \n 0.019421, 0.019184, 0.018951, 0.018727, 0.018514, 0.018311, 0.018118, \n 0.017929, 0.017745, 0.017564, 0.017387, 0.017214, 0.017046, 0.016886, \n 0.016732, 0.016587, 0.016446, 0.016309, 0.016174, 0.016039, 0.015906, \n 0.015777, 0.015653, 0.015532, 0.015418, 0.015308, 0.015202, 0.015097, \n 0.014991, 0.014885, 0.014782, 0.014683, 0.014588, 0.0145, 0.014415, \n 0.014334, 0.014254, 0.014172, 0.01409, 0.014007, 0.013926, 0.013846, \n 0.01377, 0.013697, 0.013628, 0.013559, 0.013491, 0.013423, 0.013354, \n 0.013288, 0.013223, 0.01316, 0.013099, 0.01304, 0.012983, 0.012926, \n 0.01287, 0.012814, 0.012758, 0.012703, 0.012649, 0.012597, 0.012547, \n 0.012499, 0.01245, 0.012402, 0.012352, 0.012302, 0.012254, 0.012205, \n 0.012158, 0.012113, 0.012068, 0.012025, 0.011982, 0.01194, 0.011899, \n 0.011859, 0.011819, 0.01178, 0.011741]'], {}), '([0.49997, 0.37451, 0.29849, 0.25004, 0.2138, 0.19455, 0.18448, \n 0.17591, 0.1678, 0.15421, 0.1401, 0.12693, 0.11785, 0.11144, 0.10675, \n 0.099496, 0.092323, 0.085695, 0.079234, 0.074253, 0.070316, 0.067191, \n 0.064594, 0.062337, 0.060348, 0.058192, 0.055224, 0.051972, 0.049283, \n 0.04757, 0.046226, 0.044969, 0.043922, 0.043027, 0.041934, 0.040528, \n 0.039018, 0.037744, 0.036762, 0.035923, 0.035071, 0.034267, 0.033456, \n 0.032629, 0.03184, 0.031078, 0.030363, 0.02968, 0.029028, 0.028399, \n 0.027788, 0.027199, 0.026642, 0.026124, 0.025635, 0.02517, 0.024719, \n 0.024287, 0.023867, 0.023457, 0.023061, 0.022685, 0.022334, 0.021998, \n 0.021675, 0.02136, 0.021055, 0.020758, 0.020467, 0.020186, 0.019919, \n 0.019665, 0.019421, 0.019184, 0.018951, 0.018727, 0.018514, 0.018311, \n 0.018118, 0.017929, 0.017745, 0.017564, 0.017387, 0.017214, 0.017046, \n 0.016886, 0.016732, 0.016587, 0.016446, 0.016309, 0.016174, 0.016039, \n 0.015906, 0.015777, 0.015653, 0.015532, 0.015418, 0.015308, 0.015202, \n 0.015097, 0.014991, 0.014885, 0.014782, 0.014683, 0.014588, 0.0145, \n 0.014415, 0.014334, 0.014254, 0.014172, 0.01409, 0.014007, 0.013926, \n 0.013846, 0.01377, 0.013697, 0.013628, 0.013559, 0.013491, 0.013423, \n 0.013354, 0.013288, 0.013223, 0.01316, 0.013099, 0.01304, 0.012983, \n 0.012926, 0.01287, 0.012814, 0.012758, 0.012703, 0.012649, 0.012597, \n 0.012547, 0.012499, 0.01245, 0.012402, 0.012352, 0.012302, 0.012254, \n 0.012205, 0.012158, 0.012113, 0.012068, 0.012025, 0.011982, 0.01194, \n 0.011899, 0.011859, 0.011819, 0.01178, 0.011741])\n', (113338, 114942), True, 'import pandas as pd\n'), ((115549, 115617), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (115571, 115617), True, 'import numpy.testing as npt\n'), ((115630, 115731), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (115649, 115731), True, 'import numpy.testing as npt\n'), ((115739, 115840), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (115758, 115840), True, 'import numpy.testing as npt\n'), ((128540, 130127), 'pandas.Series', 'pd.Series', (['[0.0, 6.5616, 13.1232, 19.6848, 26.2464, 32.808, 39.3696, 45.9312, 52.4928,\n 59.0544, 65.616, 72.1776, 78.7392, 85.3008, 91.8624, 98.424, 104.9856, \n 111.5472, 118.1088, 124.6704, 131.232, 137.7936, 144.3552, 150.9168, \n 157.4784, 164.04, 170.6016, 177.1632, 183.7248, 190.2864, 196.848, \n 203.4096, 209.9712, 216.5328, 223.0944, 229.656, 236.2176, 242.7792, \n 249.3408, 255.9024, 262.464, 269.0256, 275.5872, 282.1488, 288.7104, \n 295.272, 301.8336, 308.3952, 314.9568, 321.5184, 328.08, 334.6416, \n 341.2032, 347.7648, 354.3264, 360.888, 367.4496, 374.0112, 380.5728, \n 387.1344, 393.696, 400.2576, 406.8192, 413.3808, 419.9424, 426.504, \n 433.0656, 439.6272, 446.1888, 452.7504, 459.312, 465.8736, 472.4352, \n 478.9968, 485.5584, 492.12, 498.6816, 505.2432, 511.8048, 518.3664, \n 524.928, 531.4896, 538.0512, 544.6128, 551.1744, 557.736, 564.2976, \n 570.8592, 577.4208, 583.9824, 590.544, 597.1056, 603.6672, 610.2288, \n 616.7904, 623.352, 629.9136, 636.4752, 643.0368, 649.5984, 656.16, \n 662.7216, 669.2832, 675.8448, 682.4064, 688.968, 695.5296, 702.0912, \n 708.6528, 715.2144, 721.776, 728.3376, 734.8992, 741.4608, 748.0224, \n 754.584, 761.1456, 767.7072, 774.2688, 780.8304, 787.392, 793.9536, \n 800.5152, 807.0768, 813.6384, 820.2, 826.7616, 833.3232, 839.8848, \n 846.4464, 853.008, 859.5696, 866.1312, 872.6928, 879.2544, 885.816, \n 892.3776, 898.9392, 905.5008, 912.0624, 918.624, 925.1856, 931.7472, \n 938.3088, 944.8704, 951.432, 957.9936, 964.5552, 971.1168, 977.6784, \n 984.24, 990.8016, 997.3632]'], {}), '([0.0, 6.5616, 13.1232, 19.6848, 26.2464, 32.808, 39.3696, 45.9312,\n 52.4928, 59.0544, 65.616, 72.1776, 78.7392, 85.3008, 91.8624, 98.424, \n 104.9856, 111.5472, 118.1088, 124.6704, 131.232, 137.7936, 144.3552, \n 150.9168, 157.4784, 164.04, 170.6016, 177.1632, 183.7248, 190.2864, \n 196.848, 203.4096, 209.9712, 216.5328, 223.0944, 229.656, 236.2176, \n 242.7792, 249.3408, 255.9024, 262.464, 269.0256, 275.5872, 282.1488, \n 288.7104, 295.272, 301.8336, 308.3952, 314.9568, 321.5184, 328.08, \n 334.6416, 341.2032, 347.7648, 354.3264, 360.888, 367.4496, 374.0112, \n 380.5728, 387.1344, 393.696, 400.2576, 406.8192, 413.3808, 419.9424, \n 426.504, 433.0656, 439.6272, 446.1888, 452.7504, 459.312, 465.8736, \n 472.4352, 478.9968, 485.5584, 492.12, 498.6816, 505.2432, 511.8048, \n 518.3664, 524.928, 531.4896, 538.0512, 544.6128, 551.1744, 557.736, \n 564.2976, 570.8592, 577.4208, 583.9824, 590.544, 597.1056, 603.6672, \n 610.2288, 616.7904, 623.352, 629.9136, 636.4752, 643.0368, 649.5984, \n 656.16, 662.7216, 669.2832, 675.8448, 682.4064, 688.968, 695.5296, \n 702.0912, 708.6528, 715.2144, 721.776, 728.3376, 734.8992, 741.4608, \n 748.0224, 754.584, 761.1456, 767.7072, 774.2688, 780.8304, 787.392, \n 793.9536, 800.5152, 807.0768, 813.6384, 820.2, 826.7616, 833.3232, \n 839.8848, 846.4464, 853.008, 859.5696, 866.1312, 872.6928, 879.2544, \n 885.816, 892.3776, 898.9392, 905.5008, 912.0624, 918.624, 925.1856, \n 931.7472, 938.3088, 944.8704, 951.432, 957.9936, 964.5552, 971.1168, \n 977.6784, 984.24, 990.8016, 997.3632])\n', (128549, 130127), True, 'import pandas as pd\n'), ((130246, 131859), 'pandas.Series', 'pd.Series', (['[0.49997, 0.37451, 0.29849, 0.25004, 0.2138, 0.19455, 0.18448, 0.17591, \n 0.1678, 0.15421, 0.1401, 0.12693, 0.11785, 0.11144, 0.10675, 0.099496, \n 0.092323, 0.085695, 0.079234, 0.074253, 0.070316, 0.067191, 0.064594, \n 0.062337, 0.060348, 0.058192, 0.055224, 0.051972, 0.049283, 0.04757, \n 0.046226, 0.044969, 0.043922, 0.043027, 0.041934, 0.040528, 0.039018, \n 0.037744, 0.036762, 0.035923, 0.035071, 0.034267, 0.033456, 0.032629, \n 0.03184, 0.031078, 0.030363, 0.02968, 0.029028, 0.028399, 0.027788, \n 0.027199, 0.026642, 0.026124, 0.025635, 0.02517, 0.024719, 0.024287, \n 0.023867, 0.023457, 0.023061, 0.022685, 0.022334, 0.021998, 0.021675, \n 0.02136, 0.021055, 0.020758, 0.020467, 0.020186, 0.019919, 0.019665, \n 0.019421, 0.019184, 0.018951, 0.018727, 0.018514, 0.018311, 0.018118, \n 0.017929, 0.017745, 0.017564, 0.017387, 0.017214, 0.017046, 0.016886, \n 0.016732, 0.016587, 0.016446, 0.016309, 0.016174, 0.016039, 0.015906, \n 0.015777, 0.015653, 0.015532, 0.015418, 0.015308, 0.015202, 0.015097, \n 0.014991, 0.014885, 0.014782, 0.014683, 0.014588, 0.0145, 0.014415, \n 0.014334, 0.014254, 0.014172, 0.01409, 0.014007, 0.013926, 0.013846, \n 0.01377, 0.013697, 0.013628, 0.013559, 0.013491, 0.013423, 0.013354, \n 0.013288, 0.013223, 0.01316, 0.013099, 0.01304, 0.012983, 0.012926, \n 0.01287, 0.012814, 0.012758, 0.012703, 0.012649, 0.012597, 0.012547, \n 0.012499, 0.01245, 0.012402, 0.012352, 0.012302, 0.012254, 0.012205, \n 0.012158, 0.012113, 0.012068, 0.012025, 0.011982, 0.01194, 0.011899, \n 0.011859, 0.011819, 0.01178, 0.011741]'], {}), '([0.49997, 0.37451, 0.29849, 0.25004, 0.2138, 0.19455, 0.18448, \n 0.17591, 0.1678, 0.15421, 0.1401, 0.12693, 0.11785, 0.11144, 0.10675, \n 0.099496, 0.092323, 0.085695, 0.079234, 0.074253, 0.070316, 0.067191, \n 0.064594, 0.062337, 0.060348, 0.058192, 0.055224, 0.051972, 0.049283, \n 0.04757, 0.046226, 0.044969, 0.043922, 0.043027, 0.041934, 0.040528, \n 0.039018, 0.037744, 0.036762, 0.035923, 0.035071, 0.034267, 0.033456, \n 0.032629, 0.03184, 0.031078, 0.030363, 0.02968, 0.029028, 0.028399, \n 0.027788, 0.027199, 0.026642, 0.026124, 0.025635, 0.02517, 0.024719, \n 0.024287, 0.023867, 0.023457, 0.023061, 0.022685, 0.022334, 0.021998, \n 0.021675, 0.02136, 0.021055, 0.020758, 0.020467, 0.020186, 0.019919, \n 0.019665, 0.019421, 0.019184, 0.018951, 0.018727, 0.018514, 0.018311, \n 0.018118, 0.017929, 0.017745, 0.017564, 0.017387, 0.017214, 0.017046, \n 0.016886, 0.016732, 0.016587, 0.016446, 0.016309, 0.016174, 0.016039, \n 0.015906, 0.015777, 0.015653, 0.015532, 0.015418, 0.015308, 0.015202, \n 0.015097, 0.014991, 0.014885, 0.014782, 0.014683, 0.014588, 0.0145, \n 0.014415, 0.014334, 0.014254, 0.014172, 0.01409, 0.014007, 0.013926, \n 0.013846, 0.01377, 0.013697, 0.013628, 0.013559, 0.013491, 0.013423, \n 0.013354, 0.013288, 0.013223, 0.01316, 0.013099, 0.01304, 0.012983, \n 0.012926, 0.01287, 0.012814, 0.012758, 0.012703, 0.012649, 0.012597, \n 0.012547, 0.012499, 0.01245, 0.012402, 0.012352, 0.012302, 0.012254, \n 0.012205, 0.012158, 0.012113, 0.012068, 0.012025, 0.011982, 0.01194, \n 0.011899, 0.011859, 0.011819, 0.01178, 0.011741])\n', (130255, 131859), True, 'import pandas as pd\n'), ((132465, 132533), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (132487, 132533), True, 'import numpy.testing as npt\n'), ((132546, 132647), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (132565, 132647), True, 'import numpy.testing as npt\n'), ((132655, 132757), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(0.0001)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=0.0001, atol=0,\n err_msg='', verbose=True)\n", (132674, 132757), True, 'import numpy.testing as npt\n'), ((145330, 146917), 'pandas.Series', 'pd.Series', (['[0.0, 6.5616, 13.1232, 19.6848, 26.2464, 32.808, 39.3696, 45.9312, 52.4928,\n 59.0544, 65.616, 72.1776, 78.7392, 85.3008, 91.8624, 98.424, 104.9856, \n 111.5472, 118.1088, 124.6704, 131.232, 137.7936, 144.3552, 150.9168, \n 157.4784, 164.04, 170.6016, 177.1632, 183.7248, 190.2864, 196.848, \n 203.4096, 209.9712, 216.5328, 223.0944, 229.656, 236.2176, 242.7792, \n 249.3408, 255.9024, 262.464, 269.0256, 275.5872, 282.1488, 288.7104, \n 295.272, 301.8336, 308.3952, 314.9568, 321.5184, 328.08, 334.6416, \n 341.2032, 347.7648, 354.3264, 360.888, 367.4496, 374.0112, 380.5728, \n 387.1344, 393.696, 400.2576, 406.8192, 413.3808, 419.9424, 426.504, \n 433.0656, 439.6272, 446.1888, 452.7504, 459.312, 465.8736, 472.4352, \n 478.9968, 485.5584, 492.12, 498.6816, 505.2432, 511.8048, 518.3664, \n 524.928, 531.4896, 538.0512, 544.6128, 551.1744, 557.736, 564.2976, \n 570.8592, 577.4208, 583.9824, 590.544, 597.1056, 603.6672, 610.2288, \n 616.7904, 623.352, 629.9136, 636.4752, 643.0368, 649.5984, 656.16, \n 662.7216, 669.2832, 675.8448, 682.4064, 688.968, 695.5296, 702.0912, \n 708.6528, 715.2144, 721.776, 728.3376, 734.8992, 741.4608, 748.0224, \n 754.584, 761.1456, 767.7072, 774.2688, 780.8304, 787.392, 793.9536, \n 800.5152, 807.0768, 813.6384, 820.2, 826.7616, 833.3232, 839.8848, \n 846.4464, 853.008, 859.5696, 866.1312, 872.6928, 879.2544, 885.816, \n 892.3776, 898.9392, 905.5008, 912.0624, 918.624, 925.1856, 931.7472, \n 938.3088, 944.8704, 951.432, 957.9936, 964.5552, 971.1168, 977.6784, \n 984.24, 990.8016, 997.3632]'], {}), '([0.0, 6.5616, 13.1232, 19.6848, 26.2464, 32.808, 39.3696, 45.9312,\n 52.4928, 59.0544, 65.616, 72.1776, 78.7392, 85.3008, 91.8624, 98.424, \n 104.9856, 111.5472, 118.1088, 124.6704, 131.232, 137.7936, 144.3552, \n 150.9168, 157.4784, 164.04, 170.6016, 177.1632, 183.7248, 190.2864, \n 196.848, 203.4096, 209.9712, 216.5328, 223.0944, 229.656, 236.2176, \n 242.7792, 249.3408, 255.9024, 262.464, 269.0256, 275.5872, 282.1488, \n 288.7104, 295.272, 301.8336, 308.3952, 314.9568, 321.5184, 328.08, \n 334.6416, 341.2032, 347.7648, 354.3264, 360.888, 367.4496, 374.0112, \n 380.5728, 387.1344, 393.696, 400.2576, 406.8192, 413.3808, 419.9424, \n 426.504, 433.0656, 439.6272, 446.1888, 452.7504, 459.312, 465.8736, \n 472.4352, 478.9968, 485.5584, 492.12, 498.6816, 505.2432, 511.8048, \n 518.3664, 524.928, 531.4896, 538.0512, 544.6128, 551.1744, 557.736, \n 564.2976, 570.8592, 577.4208, 583.9824, 590.544, 597.1056, 603.6672, \n 610.2288, 616.7904, 623.352, 629.9136, 636.4752, 643.0368, 649.5984, \n 656.16, 662.7216, 669.2832, 675.8448, 682.4064, 688.968, 695.5296, \n 702.0912, 708.6528, 715.2144, 721.776, 728.3376, 734.8992, 741.4608, \n 748.0224, 754.584, 761.1456, 767.7072, 774.2688, 780.8304, 787.392, \n 793.9536, 800.5152, 807.0768, 813.6384, 820.2, 826.7616, 833.3232, \n 839.8848, 846.4464, 853.008, 859.5696, 866.1312, 872.6928, 879.2544, \n 885.816, 892.3776, 898.9392, 905.5008, 912.0624, 918.624, 925.1856, \n 931.7472, 938.3088, 944.8704, 951.432, 957.9936, 964.5552, 971.1168, \n 977.6784, 984.24, 990.8016, 997.3632])\n', (145339, 146917), True, 'import pandas as pd\n'), ((147036, 148649), 'pandas.Series', 'pd.Series', (['[0.49997, 0.37451, 0.29849, 0.25004, 0.2138, 0.19455, 0.18448, 0.17591, \n 0.1678, 0.15421, 0.1401, 0.12693, 0.11785, 0.11144, 0.10675, 0.099496, \n 0.092323, 0.085695, 0.079234, 0.074253, 0.070316, 0.067191, 0.064594, \n 0.062337, 0.060348, 0.058192, 0.055224, 0.051972, 0.049283, 0.04757, \n 0.046226, 0.044969, 0.043922, 0.043027, 0.041934, 0.040528, 0.039018, \n 0.037744, 0.036762, 0.035923, 0.035071, 0.034267, 0.033456, 0.032629, \n 0.03184, 0.031078, 0.030363, 0.02968, 0.029028, 0.028399, 0.027788, \n 0.027199, 0.026642, 0.026124, 0.025635, 0.02517, 0.024719, 0.024287, \n 0.023867, 0.023457, 0.023061, 0.022685, 0.022334, 0.021998, 0.021675, \n 0.02136, 0.021055, 0.020758, 0.020467, 0.020186, 0.019919, 0.019665, \n 0.019421, 0.019184, 0.018951, 0.018727, 0.018514, 0.018311, 0.018118, \n 0.017929, 0.017745, 0.017564, 0.017387, 0.017214, 0.017046, 0.016886, \n 0.016732, 0.016587, 0.016446, 0.016309, 0.016174, 0.016039, 0.015906, \n 0.015777, 0.015653, 0.015532, 0.015418, 0.015308, 0.015202, 0.015097, \n 0.014991, 0.014885, 0.014782, 0.014683, 0.014588, 0.0145, 0.014415, \n 0.014334, 0.014254, 0.014172, 0.01409, 0.014007, 0.013926, 0.013846, \n 0.01377, 0.013697, 0.013628, 0.013559, 0.013491, 0.013423, 0.013354, \n 0.013288, 0.013223, 0.01316, 0.013099, 0.01304, 0.012983, 0.012926, \n 0.01287, 0.012814, 0.012758, 0.012703, 0.012649, 0.012597, 0.012547, \n 0.012499, 0.01245, 0.012402, 0.012352, 0.012302, 0.012254, 0.012205, \n 0.012158, 0.012113, 0.012068, 0.012025, 0.011982, 0.01194, 0.011899, \n 0.011859, 0.011819, 0.01178, 0.011741]'], {}), '([0.49997, 0.37451, 0.29849, 0.25004, 0.2138, 0.19455, 0.18448, \n 0.17591, 0.1678, 0.15421, 0.1401, 0.12693, 0.11785, 0.11144, 0.10675, \n 0.099496, 0.092323, 0.085695, 0.079234, 0.074253, 0.070316, 0.067191, \n 0.064594, 0.062337, 0.060348, 0.058192, 0.055224, 0.051972, 0.049283, \n 0.04757, 0.046226, 0.044969, 0.043922, 0.043027, 0.041934, 0.040528, \n 0.039018, 0.037744, 0.036762, 0.035923, 0.035071, 0.034267, 0.033456, \n 0.032629, 0.03184, 0.031078, 0.030363, 0.02968, 0.029028, 0.028399, \n 0.027788, 0.027199, 0.026642, 0.026124, 0.025635, 0.02517, 0.024719, \n 0.024287, 0.023867, 0.023457, 0.023061, 0.022685, 0.022334, 0.021998, \n 0.021675, 0.02136, 0.021055, 0.020758, 0.020467, 0.020186, 0.019919, \n 0.019665, 0.019421, 0.019184, 0.018951, 0.018727, 0.018514, 0.018311, \n 0.018118, 0.017929, 0.017745, 0.017564, 0.017387, 0.017214, 0.017046, \n 0.016886, 0.016732, 0.016587, 0.016446, 0.016309, 0.016174, 0.016039, \n 0.015906, 0.015777, 0.015653, 0.015532, 0.015418, 0.015308, 0.015202, \n 0.015097, 0.014991, 0.014885, 0.014782, 0.014683, 0.014588, 0.0145, \n 0.014415, 0.014334, 0.014254, 0.014172, 0.01409, 0.014007, 0.013926, \n 0.013846, 0.01377, 0.013697, 0.013628, 0.013559, 0.013491, 0.013423, \n 0.013354, 0.013288, 0.013223, 0.01316, 0.013099, 0.01304, 0.012983, \n 0.012926, 0.01287, 0.012814, 0.012758, 0.012703, 0.012649, 0.012597, \n 0.012547, 0.012499, 0.01245, 0.012402, 0.012352, 0.012302, 0.012254, \n 0.012205, 0.012158, 0.012113, 0.012068, 0.012025, 0.011982, 0.01194, \n 0.011899, 0.011859, 0.011819, 0.01178, 0.011741])\n', (147045, 148649), True, 'import pandas as pd\n'), ((149251, 149319), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (149273, 149319), True, 'import numpy.testing as npt\n'), ((149332, 149433), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (149351, 149433), True, 'import numpy.testing as npt\n'), ((149441, 149542), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (149460, 149542), True, 'import numpy.testing as npt\n'), ((162125, 163712), 'pandas.Series', 'pd.Series', (['[0.0, 6.5616, 13.1232, 19.6848, 26.2464, 32.808, 39.3696, 45.9312, 52.4928,\n 59.0544, 65.616, 72.1776, 78.7392, 85.3008, 91.8624, 98.424, 104.9856, \n 111.5472, 118.1088, 124.6704, 131.232, 137.7936, 144.3552, 150.9168, \n 157.4784, 164.04, 170.6016, 177.1632, 183.7248, 190.2864, 196.848, \n 203.4096, 209.9712, 216.5328, 223.0944, 229.656, 236.2176, 242.7792, \n 249.3408, 255.9024, 262.464, 269.0256, 275.5872, 282.1488, 288.7104, \n 295.272, 301.8336, 308.3952, 314.9568, 321.5184, 328.08, 334.6416, \n 341.2032, 347.7648, 354.3264, 360.888, 367.4496, 374.0112, 380.5728, \n 387.1344, 393.696, 400.2576, 406.8192, 413.3808, 419.9424, 426.504, \n 433.0656, 439.6272, 446.1888, 452.7504, 459.312, 465.8736, 472.4352, \n 478.9968, 485.5584, 492.12, 498.6816, 505.2432, 511.8048, 518.3664, \n 524.928, 531.4896, 538.0512, 544.6128, 551.1744, 557.736, 564.2976, \n 570.8592, 577.4208, 583.9824, 590.544, 597.1056, 603.6672, 610.2288, \n 616.7904, 623.352, 629.9136, 636.4752, 643.0368, 649.5984, 656.16, \n 662.7216, 669.2832, 675.8448, 682.4064, 688.968, 695.5296, 702.0912, \n 708.6528, 715.2144, 721.776, 728.3376, 734.8992, 741.4608, 748.0224, \n 754.584, 761.1456, 767.7072, 774.2688, 780.8304, 787.392, 793.9536, \n 800.5152, 807.0768, 813.6384, 820.2, 826.7616, 833.3232, 839.8848, \n 846.4464, 853.008, 859.5696, 866.1312, 872.6928, 879.2544, 885.816, \n 892.3776, 898.9392, 905.5008, 912.0624, 918.624, 925.1856, 931.7472, \n 938.3088, 944.8704, 951.432, 957.9936, 964.5552, 971.1168, 977.6784, \n 984.24, 990.8016, 997.3632]'], {}), '([0.0, 6.5616, 13.1232, 19.6848, 26.2464, 32.808, 39.3696, 45.9312,\n 52.4928, 59.0544, 65.616, 72.1776, 78.7392, 85.3008, 91.8624, 98.424, \n 104.9856, 111.5472, 118.1088, 124.6704, 131.232, 137.7936, 144.3552, \n 150.9168, 157.4784, 164.04, 170.6016, 177.1632, 183.7248, 190.2864, \n 196.848, 203.4096, 209.9712, 216.5328, 223.0944, 229.656, 236.2176, \n 242.7792, 249.3408, 255.9024, 262.464, 269.0256, 275.5872, 282.1488, \n 288.7104, 295.272, 301.8336, 308.3952, 314.9568, 321.5184, 328.08, \n 334.6416, 341.2032, 347.7648, 354.3264, 360.888, 367.4496, 374.0112, \n 380.5728, 387.1344, 393.696, 400.2576, 406.8192, 413.3808, 419.9424, \n 426.504, 433.0656, 439.6272, 446.1888, 452.7504, 459.312, 465.8736, \n 472.4352, 478.9968, 485.5584, 492.12, 498.6816, 505.2432, 511.8048, \n 518.3664, 524.928, 531.4896, 538.0512, 544.6128, 551.1744, 557.736, \n 564.2976, 570.8592, 577.4208, 583.9824, 590.544, 597.1056, 603.6672, \n 610.2288, 616.7904, 623.352, 629.9136, 636.4752, 643.0368, 649.5984, \n 656.16, 662.7216, 669.2832, 675.8448, 682.4064, 688.968, 695.5296, \n 702.0912, 708.6528, 715.2144, 721.776, 728.3376, 734.8992, 741.4608, \n 748.0224, 754.584, 761.1456, 767.7072, 774.2688, 780.8304, 787.392, \n 793.9536, 800.5152, 807.0768, 813.6384, 820.2, 826.7616, 833.3232, \n 839.8848, 846.4464, 853.008, 859.5696, 866.1312, 872.6928, 879.2544, \n 885.816, 892.3776, 898.9392, 905.5008, 912.0624, 918.624, 925.1856, \n 931.7472, 938.3088, 944.8704, 951.432, 957.9936, 964.5552, 971.1168, \n 977.6784, 984.24, 990.8016, 997.3632])\n', (162134, 163712), True, 'import pandas as pd\n'), ((163831, 165444), 'pandas.Series', 'pd.Series', (['[0.49997, 0.37451, 0.29849, 0.25004, 0.2138, 0.19455, 0.18448, 0.17591, \n 0.1678, 0.15421, 0.1401, 0.12693, 0.11785, 0.11144, 0.10675, 0.099496, \n 0.092323, 0.085695, 0.079234, 0.074253, 0.070316, 0.067191, 0.064594, \n 0.062337, 0.060348, 0.058192, 0.055224, 0.051972, 0.049283, 0.04757, \n 0.046226, 0.044969, 0.043922, 0.043027, 0.041934, 0.040528, 0.039018, \n 0.037744, 0.036762, 0.035923, 0.035071, 0.034267, 0.033456, 0.032629, \n 0.03184, 0.031078, 0.030363, 0.02968, 0.029028, 0.028399, 0.027788, \n 0.027199, 0.026642, 0.026124, 0.025635, 0.02517, 0.024719, 0.024287, \n 0.023867, 0.023457, 0.023061, 0.022685, 0.022334, 0.021998, 0.021675, \n 0.02136, 0.021055, 0.020758, 0.020467, 0.020186, 0.019919, 0.019665, \n 0.019421, 0.019184, 0.018951, 0.018727, 0.018514, 0.018311, 0.018118, \n 0.017929, 0.017745, 0.017564, 0.017387, 0.017214, 0.017046, 0.016886, \n 0.016732, 0.016587, 0.016446, 0.016309, 0.016174, 0.016039, 0.015906, \n 0.015777, 0.015653, 0.015532, 0.015418, 0.015308, 0.015202, 0.015097, \n 0.014991, 0.014885, 0.014782, 0.014683, 0.014588, 0.0145, 0.014415, \n 0.014334, 0.014254, 0.014172, 0.01409, 0.014007, 0.013926, 0.013846, \n 0.01377, 0.013697, 0.013628, 0.013559, 0.013491, 0.013423, 0.013354, \n 0.013288, 0.013223, 0.01316, 0.013099, 0.01304, 0.012983, 0.012926, \n 0.01287, 0.012814, 0.012758, 0.012703, 0.012649, 0.012597, 0.012547, \n 0.012499, 0.01245, 0.012402, 0.012352, 0.012302, 0.012254, 0.012205, \n 0.012158, 0.012113, 0.012068, 0.012025, 0.011982, 0.01194, 0.011899, \n 0.011859, 0.011819, 0.01178, 0.011741]'], {}), '([0.49997, 0.37451, 0.29849, 0.25004, 0.2138, 0.19455, 0.18448, \n 0.17591, 0.1678, 0.15421, 0.1401, 0.12693, 0.11785, 0.11144, 0.10675, \n 0.099496, 0.092323, 0.085695, 0.079234, 0.074253, 0.070316, 0.067191, \n 0.064594, 0.062337, 0.060348, 0.058192, 0.055224, 0.051972, 0.049283, \n 0.04757, 0.046226, 0.044969, 0.043922, 0.043027, 0.041934, 0.040528, \n 0.039018, 0.037744, 0.036762, 0.035923, 0.035071, 0.034267, 0.033456, \n 0.032629, 0.03184, 0.031078, 0.030363, 0.02968, 0.029028, 0.028399, \n 0.027788, 0.027199, 0.026642, 0.026124, 0.025635, 0.02517, 0.024719, \n 0.024287, 0.023867, 0.023457, 0.023061, 0.022685, 0.022334, 0.021998, \n 0.021675, 0.02136, 0.021055, 0.020758, 0.020467, 0.020186, 0.019919, \n 0.019665, 0.019421, 0.019184, 0.018951, 0.018727, 0.018514, 0.018311, \n 0.018118, 0.017929, 0.017745, 0.017564, 0.017387, 0.017214, 0.017046, \n 0.016886, 0.016732, 0.016587, 0.016446, 0.016309, 0.016174, 0.016039, \n 0.015906, 0.015777, 0.015653, 0.015532, 0.015418, 0.015308, 0.015202, \n 0.015097, 0.014991, 0.014885, 0.014782, 0.014683, 0.014588, 0.0145, \n 0.014415, 0.014334, 0.014254, 0.014172, 0.01409, 0.014007, 0.013926, \n 0.013846, 0.01377, 0.013697, 0.013628, 0.013559, 0.013491, 0.013423, \n 0.013354, 0.013288, 0.013223, 0.01316, 0.013099, 0.01304, 0.012983, \n 0.012926, 0.01287, 0.012814, 0.012758, 0.012703, 0.012649, 0.012597, \n 0.012547, 0.012499, 0.01245, 0.012402, 0.012352, 0.012302, 0.012254, \n 0.012205, 0.012158, 0.012113, 0.012068, 0.012025, 0.011982, 0.01194, \n 0.011899, 0.011859, 0.011819, 0.01178, 0.011741])\n', (163840, 165444), True, 'import pandas as pd\n'), ((166046, 166114), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['expected_result_npts', 'npts_out'], {'verbose': '(True)'}), '(expected_result_npts, npts_out, verbose=True)\n', (166068, 166114), True, 'import numpy.testing as npt\n'), ((166127, 166228), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['x_array_out', 'expected_result_x'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(x_array_out, expected_result_x, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (166146, 166228), True, 'import numpy.testing as npt\n'), ((166236, 166337), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['y_array_out', 'expected_result_y'], {'rtol': '(1e-05)', 'atol': '(0)', 'err_msg': '""""""', 'verbose': '(True)'}), "(y_array_out, expected_result_y, rtol=1e-05, atol=0,\n err_msg='', verbose=True)\n", (166255, 166337), True, 'import numpy.testing as npt\n'), ((10300, 10345), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (10308, 10345), False, 'from tabulate import tabulate\n'), ((18169, 18214), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (18177, 18214), False, 'from tabulate import tabulate\n'), ((20396, 20441), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (20404, 20441), False, 'from tabulate import tabulate\n'), ((23532, 23577), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (23540, 23577), False, 'from tabulate import tabulate\n'), ((27398, 27443), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (27406, 27443), False, 'from tabulate import tabulate\n'), ((29001, 29046), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (29009, 29046), False, 'from tabulate import tabulate\n'), ((30916, 30961), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (30924, 30961), False, 'from tabulate import tabulate\n'), ((34951, 34996), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (34959, 34996), False, 'from tabulate import tabulate\n'), ((40030, 40075), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (40038, 40075), False, 'from tabulate import tabulate\n'), ((41346, 41391), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (41354, 41391), False, 'from tabulate import tabulate\n'), ((42459, 42504), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (42467, 42504), False, 'from tabulate import tabulate\n'), ((43611, 43656), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (43619, 43656), False, 'from tabulate import tabulate\n'), ((44857, 44902), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (44865, 44902), False, 'from tabulate import tabulate\n'), ((46784, 46829), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (46792, 46829), False, 'from tabulate import tabulate\n'), ((48221, 48266), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (48229, 48266), False, 'from tabulate import tabulate\n'), ((49413, 49458), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (49421, 49458), False, 'from tabulate import tabulate\n'), ((51205, 51250), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (51213, 51250), False, 'from tabulate import tabulate\n'), ((52627, 52672), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (52635, 52672), False, 'from tabulate import tabulate\n'), ((59824, 59870), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (59832, 59870), False, 'from tabulate import tabulate\n'), ((59943, 59989), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (59951, 59989), False, 'from tabulate import tabulate\n'), ((63814, 63860), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (63822, 63860), False, 'from tabulate import tabulate\n'), ((63880, 63926), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (63888, 63926), False, 'from tabulate import tabulate\n'), ((67828, 67874), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (67836, 67874), False, 'from tabulate import tabulate\n'), ((67894, 67940), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (67902, 67940), False, 'from tabulate import tabulate\n'), ((72134, 72180), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (72142, 72180), False, 'from tabulate import tabulate\n'), ((72200, 72246), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (72208, 72246), False, 'from tabulate import tabulate\n'), ((79789, 79835), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (79797, 79835), False, 'from tabulate import tabulate\n'), ((79908, 79954), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (79916, 79954), False, 'from tabulate import tabulate\n'), ((83117, 83163), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (83125, 83163), False, 'from tabulate import tabulate\n'), ((83236, 83282), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (83244, 83282), False, 'from tabulate import tabulate\n'), ((86869, 86915), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (86877, 86915), False, 'from tabulate import tabulate\n'), ((86988, 87034), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (86996, 87034), False, 'from tabulate import tabulate\n'), ((90629, 90675), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (90637, 90675), False, 'from tabulate import tabulate\n'), ((90748, 90794), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (90756, 90794), False, 'from tabulate import tabulate\n'), ((94086, 94131), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (94094, 94131), False, 'from tabulate import tabulate\n'), ((99041, 99086), 'tabulate.tabulate', 'tabulate', (['tab'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab, headers='keys', tablefmt='rst')\n", (99049, 99086), False, 'from tabulate import tabulate\n'), ((116241, 116287), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (116249, 116287), False, 'from tabulate import tabulate\n'), ((116360, 116406), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (116368, 116406), False, 'from tabulate import tabulate\n'), ((133157, 133203), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (133165, 133203), False, 'from tabulate import tabulate\n'), ((133276, 133322), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (133284, 133322), False, 'from tabulate import tabulate\n'), ((149943, 149989), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (149951, 149989), False, 'from tabulate import tabulate\n'), ((150062, 150108), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (150070, 150108), False, 'from tabulate import tabulate\n'), ((166738, 166784), 'tabulate.tabulate', 'tabulate', (['tab1'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab1, headers='keys', tablefmt='rst')\n", (166746, 166784), False, 'from tabulate import tabulate\n'), ((166857, 166903), 'tabulate.tabulate', 'tabulate', (['tab2'], {'headers': '"""keys"""', 'tablefmt': '"""rst"""'}), "(tab2, headers='keys', tablefmt='rst')\n", (166865, 166903), False, 'from tabulate import tabulate\n'), ((10243, 10265), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (10263, 10265), False, 'import inspect\n'), ((18112, 18134), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (18132, 18134), False, 'import inspect\n'), ((20339, 20361), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (20359, 20361), False, 'import inspect\n'), ((23475, 23497), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (23495, 23497), False, 'import inspect\n'), ((27341, 27363), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (27361, 27363), False, 'import inspect\n'), ((28944, 28966), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (28964, 28966), False, 'import inspect\n'), ((30859, 30881), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (30879, 30881), False, 'import inspect\n'), ((34894, 34916), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (34914, 34916), False, 'import inspect\n'), ((39973, 39995), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (39993, 39995), False, 'import inspect\n'), ((41289, 41311), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (41309, 41311), False, 'import inspect\n'), ((42402, 42424), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (42422, 42424), False, 'import inspect\n'), ((43554, 43576), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (43574, 43576), False, 'import inspect\n'), ((44800, 44822), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (44820, 44822), False, 'import inspect\n'), ((46727, 46749), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (46747, 46749), False, 'import inspect\n'), ((48164, 48186), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (48184, 48186), False, 'import inspect\n'), ((49356, 49378), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (49376, 49378), False, 'import inspect\n'), ((51148, 51170), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (51168, 51170), False, 'import inspect\n'), ((52570, 52592), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (52590, 52592), False, 'import inspect\n'), ((59605, 59627), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (59625, 59627), False, 'import inspect\n'), ((63648, 63670), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (63668, 63670), False, 'import inspect\n'), ((67662, 67684), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (67682, 67684), False, 'import inspect\n'), ((71968, 71990), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (71988, 71990), False, 'import inspect\n'), ((79451, 79473), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (79471, 79473), False, 'import inspect\n'), ((82779, 82801), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (82799, 82801), False, 'import inspect\n'), ((86531, 86553), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (86551, 86553), False, 'import inspect\n'), ((90291, 90313), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (90311, 90313), False, 'import inspect\n'), ((94029, 94051), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (94049, 94051), False, 'import inspect\n'), ((98984, 99006), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (99004, 99006), False, 'import inspect\n'), ((116016, 116038), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (116036, 116038), False, 'import inspect\n'), ((132932, 132954), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (132952, 132954), False, 'import inspect\n'), ((149718, 149740), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (149738, 149740), False, 'import inspect\n'), ((166513, 166535), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (166533, 166535), False, 'import inspect\n')] |
#!/usr/bin/env python3
''' Functions used for common spatial patterns'''
import numpy as np
from scipy.special import binom
import pyriemann.utils.mean as rie_mean
from filters import butter_fir_filter
from eig import gevd
__author__ = "<NAME> and <NAME>"
__email__ = "<EMAIL>,<EMAIL>"
def csp_one_one(cov_matrix,NO_csp,NO_classes):
'''
calculate spatial filter for class all pairs of classes
Keyword arguments:
cov_matrix -- numpy array of size [NO_channels, NO_channels]
NO_csp -- number of spatial filters (24)
Return: spatial filter numpy array of size [22,NO_csp]
'''
N, _ = cov_matrix[0].shape
n_comb = binom(NO_classes,2)
NO_filtpairs = int(NO_csp/(n_comb*2))
w = np.zeros((N,NO_csp))
kk = 0 # internal counter
for cc1 in range(0,NO_classes):
for cc2 in range(cc1+1,NO_classes):
w[:,NO_filtpairs*2*(kk):NO_filtpairs*2*(kk+1)] = gevd(cov_matrix[cc1], cov_matrix[cc2],NO_filtpairs)
kk +=1
return w
def generate_projection(data,class_vec,NO_csp,filter_bank,time_windows,NO_classes=4):
''' generate spatial filters for every timewindow and frequancy band
Keyword arguments:
data -- numpy array of size [NO_trials,channels,time_samples]
class_vec -- containing the class labels, numpy array of size [NO_trials]
NO_csp -- number of spatial filters (24)
filter_bank -- numpy array containing butter sos filter coeffitions dim [NO_bands,order,6]
time_windows -- numpy array [[start_time1,end_time1],...,[start_timeN,end_timeN]]
Return: spatial filter numpy array of size [NO_timewindows,NO_freqbands,22,NO_csp]
'''
time_windows = time_windows.reshape((-1,2))
NO_bands = filter_bank.shape[0]
NO_time_windows = len(time_windows[:,0])
NO_channels = len(data[0,:,0])
NO_trials = class_vec.size
# Initialize spatial filter:
w = np.zeros((NO_time_windows,NO_bands,NO_channels,NO_csp))
# iterate through all time windows
for t_wind in range(0,NO_time_windows):
# get start and end point of current time window
t_start = time_windows[t_wind,0]
t_end = time_windows[t_wind,1]
# iterate through all frequency bandwids
for subband in range(0,NO_bands):
cov = np.zeros((NO_classes,NO_trials, NO_channels,NO_channels)) # sum of covariance depending on the class
cov_avg = np.zeros((NO_classes,NO_channels,NO_channels))
cov_cntr = np.zeros(NO_classes).astype(int) # counter of class occurence
#go through all trials and estimate covariance matrix of every class
for trial in range(0,NO_trials):
#frequency band of every channel
data_filter = butter_fir_filter(data[trial,:,t_start:t_end], filter_bank[subband])
cur_class_idx = int(class_vec[trial]-1)
# caclulate current covariance matrix
cov[cur_class_idx,cov_cntr[cur_class_idx],:,:] = np.dot(data_filter,np.transpose(data_filter))
# update covariance matrix and class counter
cov_cntr[cur_class_idx] += 1
# calculate average of covariance matrix
for clas in range(0,NO_classes):
cov_avg[clas,:,:] = rie_mean.mean_covariance(cov[clas,:cov_cntr[clas],:,:], metric = 'euclid')
w[t_wind,subband,:,:] = csp_one_one(cov_avg,NO_csp,NO_classes)
return w
def generate_eye(data,class_vec,filter_bank,time_windows):
''' generate unity spatial filters for every timewindow and frequancy band
Keyword arguments:
data -- numpy array of size [NO_trials,channels,time_samples]
class_vec -- containing the class labels, numpy array of size [NO_trials]
filter_bank -- numpy array containing butter sos filter coeffitions dim [NO_bands,order,6]
time_windows -- numpy array [[start_time1,end_time1],...,[start_timeN,end_timeN]]
Return: spatial unity filter numpy array of size [NO_timewindows,NO_freqbands,22,NO_csp]
'''
time_windows = time_windows.reshape((-1,2))
NO_bands = filter_bank.shape[0]
NO_time_windows = len(time_windows[:,0])
NO_channels = len(data[0,:,0])
NO_trials = class_vec.size
# Initialize spatial filter:
w = np.zeros((NO_time_windows,NO_bands,NO_channels,NO_channels))
for t_wind in range(NO_time_windows):
for band in range(NO_bands):
w[t_wind,band] = np.eye(NO_channels)
return w
def extract_feature(data,w,filter_bank,time_windows):
''' calculate features using the precalculated spatial filters
Keyword arguments:
data -- numpy array of size [NO_trials,channels,time_samples]
w -- spatial filters, numpy array of size [NO_timewindows,NO_freqbands,22,NO_csp]
filter_bank -- numpy array containing butter sos filter coeffitions dim [NO_bands,order,6]
time_windows -- numpy array [[start_time1,end_time1],...,[start_timeN,end_timeN]]
Return: features, numpy array of size [NO_trials,(NO_csp*NO_bands*NO_time_windows)]
'''
NO_csp = len(w[0,0,0,:])
time_windows = time_windows.reshape((-1,2))
NO_time_windows = int(time_windows.size/2)
NO_bands = filter_bank.shape[0]
NO_trials = len(data[:,0,0])
NO_features = NO_csp*NO_bands*NO_time_windows
feature_mat = np.zeros((NO_trials, NO_time_windows,NO_bands,NO_csp))
# initialize feature vector
feat = np.zeros((NO_time_windows,NO_bands,NO_csp))
# go through all trials
for trial in range(0,NO_trials):
# iterate through all time windows
for t_wind in range(0,NO_time_windows):
# get start and end point of current time window
t_start = time_windows[t_wind,0]
t_end = time_windows[t_wind,1]
for subband in range(0,NO_bands):
#Apply spatial Filter to data
cur_data_s = np.dot(np.transpose(w[t_wind,subband]),data[trial,:,t_start:t_end])
#frequency filtering
cur_data_f_s = butter_fir_filter(cur_data_s,filter_bank[subband])
# calculate variance of all channels
feat[t_wind,subband] = np.var(cur_data_f_s,axis=1)
# calculate log10 of normalized feature vector
for subband in range(0,NO_bands):
feat[:,subband] = np.log10(feat[:,subband])#/np.sum(feat[:,subband]))
# store feature in list
feature_mat[trial,:,:,:] = feat
return np.reshape(feature_mat,(NO_trials,-1)) # | [
"scipy.special.binom",
"pyriemann.utils.mean.mean_covariance",
"eig.gevd",
"numpy.zeros",
"numpy.transpose",
"numpy.reshape",
"numpy.eye",
"numpy.log10",
"numpy.var",
"filters.butter_fir_filter"
] | [((629, 649), 'scipy.special.binom', 'binom', (['NO_classes', '(2)'], {}), '(NO_classes, 2)\n', (634, 649), False, 'from scipy.special import binom\n'), ((696, 717), 'numpy.zeros', 'np.zeros', (['(N, NO_csp)'], {}), '((N, NO_csp))\n', (704, 717), True, 'import numpy as np\n'), ((1789, 1847), 'numpy.zeros', 'np.zeros', (['(NO_time_windows, NO_bands, NO_channels, NO_csp)'], {}), '((NO_time_windows, NO_bands, NO_channels, NO_csp))\n', (1797, 1847), True, 'import numpy as np\n'), ((3932, 3995), 'numpy.zeros', 'np.zeros', (['(NO_time_windows, NO_bands, NO_channels, NO_channels)'], {}), '((NO_time_windows, NO_bands, NO_channels, NO_channels))\n', (3940, 3995), True, 'import numpy as np\n'), ((4910, 4966), 'numpy.zeros', 'np.zeros', (['(NO_trials, NO_time_windows, NO_bands, NO_csp)'], {}), '((NO_trials, NO_time_windows, NO_bands, NO_csp))\n', (4918, 4966), True, 'import numpy as np\n'), ((5005, 5050), 'numpy.zeros', 'np.zeros', (['(NO_time_windows, NO_bands, NO_csp)'], {}), '((NO_time_windows, NO_bands, NO_csp))\n', (5013, 5050), True, 'import numpy as np\n'), ((5913, 5953), 'numpy.reshape', 'np.reshape', (['feature_mat', '(NO_trials, -1)'], {}), '(feature_mat, (NO_trials, -1))\n', (5923, 5953), True, 'import numpy as np\n'), ((870, 922), 'eig.gevd', 'gevd', (['cov_matrix[cc1]', 'cov_matrix[cc2]', 'NO_filtpairs'], {}), '(cov_matrix[cc1], cov_matrix[cc2], NO_filtpairs)\n', (874, 922), False, 'from eig import gevd\n'), ((2138, 2197), 'numpy.zeros', 'np.zeros', (['(NO_classes, NO_trials, NO_channels, NO_channels)'], {}), '((NO_classes, NO_trials, NO_channels, NO_channels))\n', (2146, 2197), True, 'import numpy as np\n'), ((2252, 2300), 'numpy.zeros', 'np.zeros', (['(NO_classes, NO_channels, NO_channels)'], {}), '((NO_classes, NO_channels, NO_channels))\n', (2260, 2300), True, 'import numpy as np\n'), ((4083, 4102), 'numpy.eye', 'np.eye', (['NO_channels'], {}), '(NO_channels)\n', (4089, 4102), True, 'import numpy as np\n'), ((5789, 5815), 'numpy.log10', 'np.log10', (['feat[:, subband]'], {}), '(feat[:, subband])\n', (5797, 5815), True, 'import numpy as np\n'), ((2546, 2616), 'filters.butter_fir_filter', 'butter_fir_filter', (['data[trial, :, t_start:t_end]', 'filter_bank[subband]'], {}), '(data[trial, :, t_start:t_end], filter_bank[subband])\n', (2563, 2616), False, 'from filters import butter_fir_filter\n'), ((2992, 3067), 'pyriemann.utils.mean.mean_covariance', 'rie_mean.mean_covariance', (['cov[clas, :cov_cntr[clas], :, :]'], {'metric': '"""euclid"""'}), "(cov[clas, :cov_cntr[clas], :, :], metric='euclid')\n", (3016, 3067), True, 'import pyriemann.utils.mean as rie_mean\n'), ((5524, 5575), 'filters.butter_fir_filter', 'butter_fir_filter', (['cur_data_s', 'filter_bank[subband]'], {}), '(cur_data_s, filter_bank[subband])\n', (5541, 5575), False, 'from filters import butter_fir_filter\n'), ((5645, 5673), 'numpy.var', 'np.var', (['cur_data_f_s'], {'axis': '(1)'}), '(cur_data_f_s, axis=1)\n', (5651, 5673), True, 'import numpy as np\n'), ((2313, 2333), 'numpy.zeros', 'np.zeros', (['NO_classes'], {}), '(NO_classes)\n', (2321, 2333), True, 'import numpy as np\n'), ((2775, 2800), 'numpy.transpose', 'np.transpose', (['data_filter'], {}), '(data_filter)\n', (2787, 2800), True, 'import numpy as np\n'), ((5412, 5444), 'numpy.transpose', 'np.transpose', (['w[t_wind, subband]'], {}), '(w[t_wind, subband])\n', (5424, 5444), True, 'import numpy as np\n')] |
import plotly.graph_objs as go
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from simulation import load_sample
import sys
colors = {"ncv": ("rgb(31,120,180)", "rgb(166, 206, 227)", "rgba(166, 206, 227,0.1)"),
"bncv": ("rgb(51,160,44)", "rgb(178,223,138)", "rgba(178,223,138,0.1)"),
"bncv_top_3": ("rgb(227,26,28)", "rgb(251,154,153)", "rgba(251,154,153,0.1)"),
"bncv_top_5": ("rgb(255,127,0)", "rgb(253,191,111)", "rgba(253,191,111,0.1)"),
"bayes": ("rgb(106,61,154)", "rgb(106,61,154)", "rgba(106,61,154,0.1)")}
name_dic = {
'ncv': 'NCV',
'bncv': 'NECV-1',
'bncv_top_3': 'NECV-3',
'bncv_top_5': 'NECV'
}
def naive_bayes():
list_std = list(range(1, 22))
acc_list = []
for std in list_std:
x, y = load_sample(10000, 256, std)
answers = (x.sum(axis=1) > 0).astype(int)
acc = accuracy_score(y.argmax(axis=1), 1 - answers)
acc_list.append(acc * 100)
return acc_list
def continuous_line_plots(df):
x = "std"
df["score"] = df["score"] * 100
variables = list(df["name"].unique())
x_axis = list(df["std"].unique())
x_axis.sort()
children = []
n = df.loc[(df["name"] == "ncv") & (df["std"] == 1)].copy().shape[0]
k = 1.96 / (n ** 0.5)
for name in variables:
score_std_name = []
ave_var = []
std_var = []
for s in x_axis:
tmp = df.loc[(df["name"] == name) & (df["std"] == s)].copy()
ave_var.append(tmp["score"].mean())
std_var.append(tmp["score"].std())
ave_var = np.array(ave_var)
std_var = np.array(std_var)
children += [go.Scatter(
name=name_dic[name],
x=x_axis,
y=ave_var,
mode='lines',
line=dict(width=0.5, color=colors[name][0]),
)]
children += [go.Scatter(
name=f'{name} Upper Bound',
x=x_axis,
y=ave_var+k*std_var,
mode='lines',
marker=dict(color="#444"),
line=dict(width=0.2, color=colors[name][1]),
showlegend=False
)]
children += [go.Scatter(
name=f'{name} Lower Bound',
x=x_axis,
y=ave_var-k*std_var,
marker=dict(color="#444"),
line=dict(width=0.2, color=colors[name][1]),
mode='lines',
fillcolor=colors[name][2],
fill='tonexty',
showlegend=False
)]
score_bayes = naive_bayes()
children += [go.Scatter(
name="Naive bayes",
x=x_axis,
y=score_bayes,
mode='lines',
line=dict(width=0.5, color=colors["bayes"][0]),
)]
fig = go.Figure(children)
fig.update_layout(
yaxis_title='Accuracy (%) (with 95% confidence intervals)',
title='NECV vs NCV',
xaxis_title='Standard deviation of simulated data',
xaxis={'tickformat': ',d'},
template='simple_white',
)
fig.write_html(sys.argv[2])
if __name__ == '__main__':
df = pd.read_csv(sys.argv[1])
continuous_line_plots(df)
| [
"pandas.read_csv",
"plotly.graph_objs.Figure",
"numpy.array",
"simulation.load_sample"
] | [((3167, 3186), 'plotly.graph_objs.Figure', 'go.Figure', (['children'], {}), '(children)\n', (3176, 3186), True, 'import plotly.graph_objs as go\n'), ((3512, 3536), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3523, 3536), True, 'import pandas as pd\n'), ((815, 843), 'simulation.load_sample', 'load_sample', (['(10000)', '(256)', 'std'], {}), '(10000, 256, std)\n', (826, 843), False, 'from simulation import load_sample\n'), ((1620, 1637), 'numpy.array', 'np.array', (['ave_var'], {}), '(ave_var)\n', (1628, 1637), True, 'import numpy as np\n'), ((1656, 1673), 'numpy.array', 'np.array', (['std_var'], {}), '(std_var)\n', (1664, 1673), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import spatialmath.base.argcheck as argcheck
import cv2 as cv
import machinevisiontoolbox.base.color as color
from scipy import interpolate
# import scipy as sp
# from scipy import signal
# from scipy import interpolate
# from collecitons import namedtuple
# from pathlib import Path
class ImageProcessingColorMixin:
"""
Image processing color operations on the Image class
"""
def red(self):
"""
Extract the red plane of a color image
:raises ValueError: if image is not color
:return out: greyscale image representing the red image plane
:rtype: Image instance
"""
if not self.iscolor:
raise ValueError('cannot extract color plane from greyscale image')
out = [im.rgb[:, :, 0] for im in self]
# out = []
# for im in self:
# out.append(im.image[:, :, 0])
return self.__class__(out)
def green(self):
"""
Extract the green plane of a color image
:raises ValueError: if image is not color
:return out: greyscale image representing the green image plane
:rtype: Image instance
"""
if not self.iscolor:
raise ValueError('cannot extract color plane from greyscale image')
out = [im.rgb[:, :, 1] for im in self]
# out = []
# for im in self:
# out.append(im.image[:, :, 1])
return self.__class__(out)
def blue(self):
"""
Extract the blue plane of a color image
:raises ValueError: if image is not color
:return out: greyscale image representing the blue image plane
:rtype: Image instance
"""
if not self.iscolor:
raise ValueError('cannot extract color plane from greyscale image')
out = [im.rgb[:, :, 2] for im in self]
# out = []
# for im in self:
# out.append(im.image[:, :, 2])
return self.__class__(out)
def colorise(self, c=[1, 1, 1]):
"""
Colorise a greyscale image
:param c: color to color image :type c: string or rgb-tuple :return
out: Image with float64 precision elements ranging from 0 to 1 :rtype:
Image instance
- ``IM.color()`` is a color image out, where each color plane is equal
to image.
- ``IM.imcolor(c)`` as above but each output pixel is ``c``(3,1) times
the corresponding element of image.
Example:
.. autorun:: pycon
.. note::
- Can convert a monochrome sequence (h,W,N) to a color image
sequence (H,W,3,N).
:references:
- Robotics, Vision & Control, Section 10.1, <NAME>,
Springer 2011.
"""
c = argcheck.getvector(c).astype(self.dtype)
c = c[::-1] # reverse because of bgr
# make sure im are greyscale
img = self.mono()
if img.iscolor is False:
# only one plane to convert
# recall opencv uses BGR
out = [np.dstack((c[0] * im.image,
c[1] * im.image,
c[2] * im.image))
for im in img]
else:
raise ValueError(self.image, 'Image must be greyscale')
return self.__class__(out)
def colorspace(self, conv, **kwargs):
"""
Transform a color image between color representations
:param conv: color code for color conversion (OpenCV codes for now)
:type conv: string (see below)
:param kwargs: keywords/options for OpenCV's cvtColor
:type kwargs: name/value pairs
:return: out
:rtype: numpy array, shape (N,M) or (N,3)
- ``IM.colorspace(conv)`` transforms the color representation of image
where ``conv`` is a string specifying the conversion. The image
should be a real full double array of size (M,3) or (M,N,3). The
output is the same size as ``IM``
``conv`` tells the source and destination color spaces,
``conv`` = 'dest<-src', or alternatively, ``conv`` = 'src->dest'.
Supported color spaces are
'RGB' sRGB IEC 61966-2-1
'YCbCr' Luma + Chroma ("digitized" version of Y'PbPr)
'JPEG-YCbCr' Luma + Chroma space used in JFIF JPEG
'YDbDr' SECAM Y'DbDr Luma + Chroma
'YPbPr' Luma (ITU-R BT.601) + Chroma
'YUV' NTSC PAL Y'UV Luma + Chroma
'YIQ' NTSC Y'IQ Luma + Chroma
'HSV' or 'HSB' Hue Saturation Value/Brightness
'HSL' or 'HLS' Hue Saturation Luminance
'HSI' Hue Saturation Intensity
'XYZ' CIE 1931 XYZ
'Lab' CIE 1976 L*a*b* (CIELAB)
'Luv' CIE L*u*v* (CIELUV)
'LCH' CIE L*C*H* (CIELCH)
'CAT02 LMS' CIE CAT02 LMS
.. note::
- All conversions assume 2 degree observer and D65 illuminant.
Color space names are case insensitive and spaces are ignored.
When sRGB is the source or destination, it can be omitted. For
example 'yuv<-' is short for 'yuv<-rgb'. For sRGB, the values
should be scaled between 0 and 1. Beware that transformations
generally do not constrain colors to be "in gamut." Particularly,
transforming from another space to sRGB may obtain R'G'B' values
outside of the [0,1] range. So the result should be clamped to
[0,1] before displaying. image(min(max(B,0),1)); lamp B to [0,1]
and display sRGB (Red Green Blue) is the (ITU-R BT.709
gamma-corrected) standard red-green-blue representation of colors
used in digital imaging. The components should be scaled between
0 and 1. The space can be visualized geometrically as a cube.
- Y'PbPr, Y'CbCr, Y'DbDr, Y'UV, and Y'IQ are related to sRGB by
linear transformations. These spaces separate a color into a
grayscale luminance component Y and two chroma components. The
valid ranges of the components depends on the space.
- HSV (Hue Saturation Value) is related to sRGB by
H = hexagonal hue angle (0 <= H < 360),
S = C/V (0 <= S <= 1),
V = max(R',G',B') (0 <= V <= 1),
where C = max(R',G',B') - min(R',G',B').
- The hue angle H is computed on a hexagon. The space is
geometrically a hexagonal cone.
- HSL (Hue Saturation Lightness) is related to sRGB by
H = hexagonal hue angle (0 <= H < 360),
S = C/(1 - abs(2L-1)) (0 <= S <= 1),
L = (max(R',G',B') + min(R',G',B'))/2 (0 <= L <= 1),
where H and C are the same as in HSV. Geometrically, the space
is a double hexagonal cone.
- HSI (Hue Saturation Intensity) is related to sRGB by
H = polar hue angle (0 <= H < 360),
S = 1 - min(R',G',B')/I (0 <= S <= 1),
I = (R'+G'+B')/3 (0 <= I <= 1).
Unlike HSV and HSL, the hue angle H is computed on a circle
rather than a hexagon.
- CIE XYZ is related to sRGB by inverse gamma correction followed
by a linear transform. Other CIE color spaces are defined
relative to XYZ.
- CIE L*a*b*, L*u*v*, and L*C*H* are nonlinear functions of XYZ.
The L* component is designed to match closely with human
perception of lightness. The other two components describe the
chroma.
- CIE CAT02 LMS is the linear transformation of XYZ using the
MCAT02 chromatic adaptation matrix. The space is designed to
model the response of the three types of cones in the human eye,
where L, M, S, correspond respectively to red ("long"), green
("medium"), and blue ("short").
:references:
- Robotics, Vision & Control, Chapter 10, <NAME>, Springer 2011.
"""
# TODO other color cases
# TODO check conv is valid
# TODO conv string parsing
# ensure floats? unsure if cv.cvtColor operates on ints
imf = self.float()
out = []
for im in imf:
if conv == 'xyz2bgr':
# note that using cv.COLOR_XYZ2RGB does not seem to work
BGR_raw = cv.cvtColor(im.bgr, cv.COLOR_XYZ2BGR, **kwargs)
# desaturate and rescale to constrain resulting RGB values
# to [0,1]
B = BGR_raw[:, :, 0]
G = BGR_raw[:, :, 1]
R = BGR_raw[:, :, 2]
add_white = -np.minimum(np.minimum(np.minimum(R, G), B), 0)
B += add_white
G += add_white
R += add_white
# inverse gamma correction
B = self._gammacorrection(B)
G = self._gammacorrection(G)
R = self._gammacorrection(R)
out.append(np.dstack((B, G, R))) # BGR
elif conv == 'Lab2bgr':
# convert source from Lab to xyz
# in colorspace.m, image was parsed into a (251001,1,3)
labim = np.reshape(im.image,
(im.shape[0], 1, im.shape[1]))
fY = (labim[:, :, 0] + 16) / 116
fX = fY + labim[:, :, 1] / 500
fZ = fY - labim[:, :, 2] / 200
# cie xyz whitepoint
WhitePoint = np.r_[0.950456, 1, 1.088754]
xyz = np.zeros(labim.shape)
xyz[:, :, 0] = WhitePoint[0] * self._invf(fX)
xyz[:, :, 1] = WhitePoint[1] * self._invf(fY)
xyz[:, :, 2] = WhitePoint[2] * self._invf(fZ)
# then call function again with conv = xyz2bgr
xyz = self.__class__(xyz)
out.append(xyz.colorspace('xyz2bgr').image)
else:
raise ValueError('other conv options not yet implemented')
# TODO other color conversion cases
# out.append(cv.cvtColor(np.float32(im), **kwargs))
return self.__class__(out)
def _invf(self, fY):
"""
Inverse f from colorspace.m
"""
Y = fY ** 3
Y[Y < 0.008856] = (fY[Y < 0.008856] - 4 / 29) * (108 / 841)
return Y
def gamma_encode(self, gamma):
"""
Gamma encoding
:param gamma: gamma value
:type gam: string or float
:return: gamma encoded version of image
:rtype: Image instance
- ``IM.gamma_encode(gamma)`` is the image with an gamma correction based
applied. This takes a linear luminance image and converts it to a
form suitable for display on a non-linear monitor.
Example:
.. autorun:: pycon
.. note::
- Gamma encoding is typically performed in a camera with
GAMMA=0.45.
- For images with multiple planes the gamma correction is applied
to all planes.
- For images sequences the gamma correction is applied to all
elements.
- For images of type double the pixels are assumed to be in the
range 0 to 1.
- For images of type int the pixels are assumed in the range 0 to
the maximum value of their class. Pixels are converted first to
double, processed, then converted back to the integer class.
:references:
- Robotics, Vision & Control, Chapter 10, <NAME>, Springer 2011.
"""
out = []
for im in self:
if im.iscolor:
R = color.gamma_encode(im.red, gamma)
G = color.gamma_encode(im.green, gamma)
B = color.gamma_encode(im.blue, gamma)
out.append(np.dstack((R, G, B)))
else:
out.append(color.gamma_encode(im.image, gamma))
return self.__class__(out)
def gamma_decode(self, gamma):
"""
Gamma decoding
:param gamma: gamma value
:type gam: string or float
:return: gamma decoded version of image
:rtype: Image instance
- ``IM.gamma_decode(gamma)`` is the image with an gamma correction
applied. This takes a gamma-corrected image and converts it to a
linear luminance image.
Example:
.. autorun:: pycon
.. note::
- Gamma decoding should be applied to any color image prior to
colometric operations.
- Gamma decoding is typically performed in the display with
GAMMA=2.2.
- For images with multiple planes the gamma correction is applied
to all planes.
- For images sequences the gamma correction is applied to all
elements.
- For images of type double the pixels are assumed to be in the
range 0 to 1.
- For images of type int the pixels are assumed in the range 0 to
the maximum value of their class. Pixels are converted first to
double, processed, then converted back to the integer class.
:references:
- Robotics, Vision & Control, Chapter 10, <NAME>, Springer 2011.
"""
out = []
for im in self:
if im.iscolor:
R = gamma_decode(m.red, gamma)
G = gamma_decode(im.green, gamma)
B = gamma_decode(im.blue, gamma)
out.append(np.dstack((R, G, B)))
else:
out.append(gamma_decode(im.image, gamma))
return self.__class__(out)
# --------------------------------------------------------------------------#
if __name__ == '__main__':
# test run ImageProcessingColor.py
print('ImageProcessingColor.py')
from machinevisiontoolbox.Image import Image
im = Image('monalisa.png')
im.disp()
imcs = Image.showcolorspace()
imcs.disp()
import code
code.interact(local=dict(globals(), **locals()))
| [
"numpy.dstack",
"machinevisiontoolbox.base.color.gamma_encode",
"machinevisiontoolbox.Image.Image",
"numpy.minimum",
"cv2.cvtColor",
"numpy.zeros",
"machinevisiontoolbox.Image.Image.showcolorspace",
"numpy.reshape",
"spatialmath.base.argcheck.getvector"
] | [((14381, 14402), 'machinevisiontoolbox.Image.Image', 'Image', (['"""monalisa.png"""'], {}), "('monalisa.png')\n", (14386, 14402), False, 'from machinevisiontoolbox.Image import Image\n'), ((14429, 14451), 'machinevisiontoolbox.Image.Image.showcolorspace', 'Image.showcolorspace', ([], {}), '()\n', (14449, 14451), False, 'from machinevisiontoolbox.Image import Image\n'), ((2826, 2847), 'spatialmath.base.argcheck.getvector', 'argcheck.getvector', (['c'], {}), '(c)\n', (2844, 2847), True, 'import spatialmath.base.argcheck as argcheck\n'), ((3107, 3169), 'numpy.dstack', 'np.dstack', (['(c[0] * im.image, c[1] * im.image, c[2] * im.image)'], {}), '((c[0] * im.image, c[1] * im.image, c[2] * im.image))\n', (3116, 3169), True, 'import numpy as np\n'), ((8755, 8802), 'cv2.cvtColor', 'cv.cvtColor', (['im.bgr', 'cv.COLOR_XYZ2BGR'], {}), '(im.bgr, cv.COLOR_XYZ2BGR, **kwargs)\n', (8766, 8802), True, 'import cv2 as cv\n'), ((12115, 12148), 'machinevisiontoolbox.base.color.gamma_encode', 'color.gamma_encode', (['im.red', 'gamma'], {}), '(im.red, gamma)\n', (12133, 12148), True, 'import machinevisiontoolbox.base.color as color\n'), ((12169, 12204), 'machinevisiontoolbox.base.color.gamma_encode', 'color.gamma_encode', (['im.green', 'gamma'], {}), '(im.green, gamma)\n', (12187, 12204), True, 'import machinevisiontoolbox.base.color as color\n'), ((12225, 12259), 'machinevisiontoolbox.base.color.gamma_encode', 'color.gamma_encode', (['im.blue', 'gamma'], {}), '(im.blue, gamma)\n', (12243, 12259), True, 'import machinevisiontoolbox.base.color as color\n'), ((9393, 9413), 'numpy.dstack', 'np.dstack', (['(B, G, R)'], {}), '((B, G, R))\n', (9402, 9413), True, 'import numpy as np\n'), ((9605, 9656), 'numpy.reshape', 'np.reshape', (['im.image', '(im.shape[0], 1, im.shape[1])'], {}), '(im.image, (im.shape[0], 1, im.shape[1]))\n', (9615, 9656), True, 'import numpy as np\n'), ((9954, 9975), 'numpy.zeros', 'np.zeros', (['labim.shape'], {}), '(labim.shape)\n', (9962, 9975), True, 'import numpy as np\n'), ((12287, 12307), 'numpy.dstack', 'np.dstack', (['(R, G, B)'], {}), '((R, G, B))\n', (12296, 12307), True, 'import numpy as np\n'), ((12354, 12389), 'machinevisiontoolbox.base.color.gamma_encode', 'color.gamma_encode', (['im.image', 'gamma'], {}), '(im.image, gamma)\n', (12372, 12389), True, 'import machinevisiontoolbox.base.color as color\n'), ((14004, 14024), 'numpy.dstack', 'np.dstack', (['(R, G, B)'], {}), '((R, G, B))\n', (14013, 14024), True, 'import numpy as np\n'), ((9068, 9084), 'numpy.minimum', 'np.minimum', (['R', 'G'], {}), '(R, G)\n', (9078, 9084), True, 'import numpy as np\n')] |
#!/usr/bin/env python3.7
"""
The copyrights of this software are owned by Duke University.
Please refer to the LICENSE.txt and README.txt files for licensing instructions.
The source code can be found on the following GitHub repository: https://github.com/wmglab-duke/ascent
"""
# builtins
import os
import time
import sys
from typing import List, Tuple, Union
# packages
import cv2
import shutil
import numpy as np
import matplotlib.pyplot as plt
import subprocess
from shapely.geometry import LineString
from scipy.ndimage.morphology import binary_fill_holes
from skimage import morphology
# ascent
from src.core import Slide, Map, Fascicle, Nerve, Trace
from .deformable import Deformable
from src.utils import Exceptionable, Configurable, Saveable, SetupMode, Config, MaskFileNames, NerveMode, \
MaskInputMode, ReshapeNerveMode, DeformationMode, PerineuriumThicknessMode, WriteMode, CuffInnerMode, \
TemplateOutput, TemplateMode, ScaleInputMode
class Sample(Exceptionable, Configurable, Saveable):
"""
Required (Config.) JSON's:
SAMPLE
RUN
"""
def __init__(self, exception_config: list):
"""
:param master_config: preloaded configuration data for master
:param exception_config: preloaded configuration data for exceptions
:param map_mode: setup mode. If you want to build a new map from a directory, then NEW. Otherwise, or if for
a single slide, OLD.
"""
# Initializes superclasses
Exceptionable.__init__(self, SetupMode.OLD, exception_config)
Configurable.__init__(self)
# Initialize slides
self.slides: List[Slide] = []
# Set instance variable map
self.map = None
# Set instance variable morphology
self.morphology = dict()
# Add JSON for perineurium thickness relationship with nerve morphology metrics -- used to calculate contact impedances if "use_ci" is True
self.add(SetupMode.NEW, Config.CI_PERINEURIUM_THICKNESS, os.path.join('config',
'system',
'ci_peri_thickness.json'))
def init_map(self, map_mode: SetupMode) -> 'Sample':
"""
Initialize the map. NOTE: the Config.SAMPLE json must have been externally added.
:param map_mode: should be old for now, but keeping as parameter in case needed in future
"""
if Config.SAMPLE.value not in self.configs.keys():
self.throw(38)
# Make a slide map
self.map = Map(self.configs[Config.EXCEPTIONS.value])
self.map.add(SetupMode.OLD, Config.SAMPLE, self.configs[Config.SAMPLE.value])
self.map.init_post_config(map_mode)
return self
def scale(self, factor) -> 'Sample':
"""
Scale all slides to the correct unit.
:param factor: factor by which to scale the image (1=no change)
"""
for slide in self.slides:
slide.scale(factor)
return self
def smooth(self, n_distance, i_distance) -> 'Sample':
"""
Smooth traces for all slides
:param n_distance: distance to inflate and deflate the nerve trace
:param i_distance: distance to inflate and deflate the fascicle traces
"""
for slide in self.slides:
slide.smooth_traces(n_distance, i_distance)
return self
def generate_perineurium(self, fit: dict) -> 'Sample':
"""
Adds perineurium to inners
"""
for slide in self.slides:
slide.generate_perineurium(fit)
return self
def im_preprocess(self, path):
"""
Performs cleaning operations on the input image
:param path: path to image which will be processed
"""
img = cv2.imread(path, -1)
if self.search(Config.SAMPLE, 'image_preprocessing', 'fill_holes', optional=True) == True:
if self.search_mode(MaskInputMode, Config.SAMPLE) == MaskInputMode.INNER_AND_OUTER_COMPILED:
print('WARNING: Skipping fill holes since MaskInputMode is INNER_AND_OUTER_COMPILED. Change fill_holes to False to suppress this warning.')
else:
img = binary_fill_holes(img)
removal_size = self.search(Config.SAMPLE, 'image_preprocessing', 'object_removal_area', optional=True)
if removal_size:
if removal_size < 0: self.throw(119)
img = morphology.remove_small_objects(img, removal_size)
cv2.imwrite(path, img.astype(int) * 255)
def get_factor(self, scale_bar_mask_path: str, scale_bar_length: float, scale_bar_is_literal: bool) -> 'Sample':
"""
Returns scaling factor (micrometers per pixel)
:param scale_bar_mask_path: path to binary mask with white straight (horizontal) scale bar
:param scale_bar_length: length (in global units as determined by config/user) of the scale bar
"""
if scale_bar_is_literal:
# use explicitly specified um/px scale instead of drawing from a scale bar image
factor = scale_bar_length
else:
# load in image
image_raw: np.ndarray = cv2.imread(scale_bar_mask_path)
# get maximum of each column (each "pixel" is a 4-item vector)
row_of_column_maxes: np.ndarray = image_raw.max(0)
# find the indices of columns in original image where the first pixel item was maxed (i.e. white)
if row_of_column_maxes.ndim == 2: # masks from histology, 3 or 4 bit
indices = np.where(row_of_column_maxes[:, 0] == max(row_of_column_maxes[:, 0]))[0]
elif row_of_column_maxes.ndim == 1: # masks from mock morphology, 1 bit
indices = np.where(row_of_column_maxes[:] == max(row_of_column_maxes[:]))[0]
else:
# may need to expand here in future?
self.throw(97)
# find the length of the scale bar by finding total range of "max white" indices
scale_bar_pixels = max(indices) - min(indices) + 1
# calculate scale factor as unit/pixel
factor = scale_bar_length / scale_bar_pixels
return factor
def build_file_structure(self, printing: bool = False) -> 'Sample':
"""
:param printing: bool, gives user console output
"""
scale_input_mode = self.search_mode(ScaleInputMode, Config.SAMPLE, optional=True)
# For backwards compatibility, if scale mode is not specified assume a mask image is provided
if scale_input_mode is None:
scale_input_mode = ScaleInputMode.MASK
sample_index = self.search(Config.RUN, 'sample')
# get starting point so able to go back
start_directory: str = os.getcwd()
# go to samples root
samples_path = self.path(Config.SAMPLE, 'samples_path')
# get sample NAME
sample: str = self.search(Config.SAMPLE, 'sample')
# ADDITION: if only one slide present, check if names abide by <NAME>_0_0_<CODE>.tif format
# if not abiding, rename files so that they abide
if len(self.map.slides) == 1:
print('Renaming input files to conform with map input interface where necessary.')
source_dir = os.path.join(*self.map.slides[0].data()[3])
source_files = os.listdir(source_dir)
for mask_fname in [f.value for f in MaskFileNames if f.value in source_files]:
shutil.move(
os.path.join(source_dir, mask_fname),
os.path.join(source_dir, '{}_0_0_{}'.format(sample, mask_fname))
)
else:
self.throw(96)
# loop through each slide
for slide_info in self.map.slides:
# unpack data and force cast to string
cassette, number, _, source_directory = slide_info.data()
cassette, number = (str(item) for item in (cassette, number))
scale_was_copied = False
for directory_part in samples_path, str(sample_index), 'slides', cassette, number, 'masks':
if not os.path.exists(directory_part):
os.makedirs(directory_part)
os.chdir(directory_part)
if scale_input_mode == ScaleInputMode.MASK:
# only try to copy scale image if it is being used
if (directory_part == str(sample_index)) and not scale_was_copied:
scale_source_file = os.path.join(start_directory,
*source_directory,
'_'.join([sample,
cassette,
number,
MaskFileNames.SCALE_BAR.value]))
if os.path.exists(scale_source_file):
shutil.copy2(scale_source_file, MaskFileNames.SCALE_BAR.value)
else:
print('ERROR: scale_source_file: {} not found'.format(scale_source_file))
self.throw(98)
scale_was_copied = True
for target_file in [item.value for item in MaskFileNames if item != MaskFileNames.SCALE_BAR]:
source_file = os.path.join(start_directory,
*source_directory,
'_'.join([sample, cassette, number, target_file]))
if printing:
print('source: {}\ntarget: {}'.format(source_file, target_file))
if os.path.exists(source_file):
if printing:
print('\tFOUND\n')
shutil.copy2(source_file, target_file)
else:
if printing:
print('\tNOT FOUND\n')
os.chdir(start_directory)
return self
def populate(self, deform_animate: bool = True) -> 'Sample':
"""
:param deform_animate: boolean indicating whether to show nerve deformation
:return:
"""
# get parameters (modes) from configuration file
mask_input_mode = self.search_mode(MaskInputMode, Config.SAMPLE)
nerve_mode = self.search_mode(NerveMode, Config.SAMPLE)
reshape_nerve_mode = self.search_mode(ReshapeNerveMode, Config.SAMPLE)
deform_mode = self.search_mode(DeformationMode, Config.SAMPLE)
deform_ratio = None
scale_input_mode = self.search_mode(ScaleInputMode, Config.SAMPLE, optional=True)
plot = self.search(Config.SAMPLE, 'plot', optional=True)
plot_folder = self.search(Config.SAMPLE, 'plot_folder', optional=True)
# For backwards compatibility, if scale mode is not specified assume a mask image is provided
if scale_input_mode is None:
scale_input_mode = ScaleInputMode.MASK
def exists(mask_file_name: MaskFileNames):
return os.path.exists(mask_file_name.value)
# get starting point so able to go back
start_directory: str = os.getcwd()
# get sample name
sample: str = str(self.search(Config.RUN, 'sample'))
# create scale bar path
if scale_input_mode == ScaleInputMode.MASK:
scale_path = os.path.join('samples', sample, MaskFileNames.SCALE_BAR.value)
elif scale_input_mode == ScaleInputMode.RATIO:
scale_path = ''
else:
self.throw(108)
plotpath = os.path.join('samples', str(sample), 'plots')
if not os.path.exists(plotpath):
os.makedirs(plotpath)
for slide_info in self.map.slides:
orientation_centroid: Union[Tuple[float, float], None] = None
# unpack data and force cast to string
cassette, number, position, _ = slide_info.data()
cassette, number = (str(item) for item in (cassette, number))
os.chdir(os.path.join('samples', str(sample), 'slides', cassette, number, 'masks'))
# convert any TIFF to TIF
proc = None
if any(fname.endswith('.tiff') for fname in os.listdir('.')):
if sys.platform.startswith('darwin') or sys.platform.startswith('linux'):
proc = subprocess.Popen(['bash',
'for file in *.tiff; do mv "$file" "${file%.tiff}.tif"; done'])
else:
proc = subprocess.Popen(['powershell.exe',
'Dir | Rename-Item –NewName { $_.name –replace “.tiff“,”.tif” }'])
proc.wait()
if not exists(MaskFileNames.RAW):
print('No raw tif found, but continuing. (Sample.populate)')
# self.throw(18)
if exists(MaskFileNames.ORIENTATION):
img = np.flipud(cv2.imread(MaskFileNames.ORIENTATION.value, -1))
if len(img.shape) > 2 and img.shape[2] > 1:
img = img[:, :, 0]
contour, _ = cv2.findContours(img,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
if len(contour) > 1: self.throw(124)
if len(contour) < 1: self.throw(125)
trace = Trace([point + [0] for point in contour[0][:, 0, :]], self.configs[Config.EXCEPTIONS.value])
orientation_centroid = trace.centroid()
else:
print('No orientation tif found, but continuing. (Sample.populate)')
# preprocess binary masks
mask_dims = []
for mask in ["COMPILED", "INNERS", "OUTERS", "NERVE"]:
maskfile = getattr(MaskFileNames, mask)
if exists(maskfile):
mask_dims.append(cv2.imread(getattr(maskfile, 'value')).shape)
self.im_preprocess(getattr(maskfile, 'value'))
if len(mask_dims) == 0: self.throw(121)
if not np.all(np.array(mask_dims) == mask_dims[0]): self.throw(122)
# fascicles list
fascicles: List[Fascicle] = []
# load fascicles and check that the files exist, then generate fascicles
if mask_input_mode == MaskInputMode.INNERS:
if exists(MaskFileNames.INNERS):
fascicles = Fascicle.to_list(MaskFileNames.INNERS.value, None,
self.configs[Config.EXCEPTIONS.value])
else:
self.throw(21)
elif mask_input_mode == MaskInputMode.OUTERS:
# fascicles = Fascicle.outer_to_list(MaskFileNames.OUTERS.value,
# self.configs[Config.EXCEPTIONS.value])
self.throw(20)
elif mask_input_mode == MaskInputMode.INNER_AND_OUTER_SEPARATE:
if exists(MaskFileNames.INNERS) and exists(MaskFileNames.OUTERS):
fascicles = Fascicle.to_list(MaskFileNames.INNERS.value,
MaskFileNames.OUTERS.value,
self.configs[Config.EXCEPTIONS.value])
else:
self.throw(22)
elif mask_input_mode == MaskInputMode.INNER_AND_OUTER_COMPILED:
if exists(MaskFileNames.COMPILED):
# first generate outer and inner images
i_image = os.path.split(MaskFileNames.COMPILED.value)[0] + 'i_from_c.tif'
o_image = os.path.split(MaskFileNames.COMPILED.value)[0] + 'o_from_c.tif'
self.io_from_compiled(MaskFileNames.COMPILED.value, i_image, o_image)
# then get fascicles
fascicles = Fascicle.to_list(i_image, o_image, self.configs[Config.EXCEPTIONS.value])
else:
self.throw(23)
else: # exhaustive
pass
nerve = None
if nerve_mode == NerveMode.PRESENT:
# check and load in nerve, throw error if not present
if exists(MaskFileNames.NERVE):
img_nerve = cv2.imread(MaskFileNames.NERVE.value, -1)
if len(img_nerve.shape) > 2 and img_nerve.shape[2] > 1:
img_nerve = img_nerve[:, :, 0]
contour, _ = cv2.findContours(np.flipud(img_nerve),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
nerve = Nerve(Trace([point + [0] for point in contour[0][:, 0, :]],
self.configs[Config.EXCEPTIONS.value]))
if len(fascicles) > 1 and nerve_mode != NerveMode.PRESENT:
self.throw(110)
slide: Slide = Slide(fascicles,
nerve,
nerve_mode,
self.configs[Config.EXCEPTIONS.value],
will_reposition=(deform_mode != DeformationMode.NONE))
# get orientation angle (used later to calculate pos_ang for model.json)
if orientation_centroid is not None:
# logic updated 10/11/2021
# choose outer (based on if nerve is present)
outer = slide.nerve if (slide.nerve is not None) else slide.fascicles[0].outer
# create line between outer centroid and orientation centroid
outer_x, outer_y = outer.centroid()
ori_x, ori_y = orientation_centroid
# set orientation_angle
slide.orientation_angle = np.arctan2(ori_y - outer_y, ori_x - outer_x)
# shrinkage correction
slide.scale(1 + self.search(Config.SAMPLE, "scale", "shrinkage"))
self.slides.append(slide)
os.chdir(start_directory)
# get scaling factor (to convert from pixels to microns)
if os.path.exists(scale_path) and scale_input_mode == ScaleInputMode.MASK:
factor = self.get_factor(scale_path, self.search(Config.SAMPLE, 'scale', 'scale_bar_length'), False)
elif scale_input_mode == ScaleInputMode.RATIO:
factor = self.get_factor(scale_path, self.search(Config.SAMPLE, 'scale', 'scale_ratio'), True)
else:
print(scale_path)
self.throw(19)
# scale to microns
self.scale(factor)
if plot == True:
plt.figure()
slide.plot(final=False)
if plot_folder == True:
plt.savefig(plotpath + '/sample_initial')
plt.close('all')
else:
plt.show()
# get smoothing params
n_distance = self.search(Config.SAMPLE, 'smoothing', 'nerve_distance', optional=True)
i_distance = self.search(Config.SAMPLE, 'smoothing', 'fascicle_distance', optional=True)
# smooth traces
if not (n_distance == i_distance == None):
if nerve_mode == NerveMode.PRESENT and n_distance is None:
self.throw(112)
else:
self.smooth(n_distance, i_distance)
self.scale(1) # does not scale but reconnects ends of traces after offset
# after scaling, if only inners were provided, generate outers
if mask_input_mode == MaskInputMode.INNERS:
peri_thick_mode: PerineuriumThicknessMode = self.search_mode(PerineuriumThicknessMode,
Config.SAMPLE)
perineurium_thk_info: dict = self.search(Config.CI_PERINEURIUM_THICKNESS,
PerineuriumThicknessMode.parameters.value,
str(peri_thick_mode).split('.')[-1])
self.generate_perineurium(perineurium_thk_info)
# repositioning!
for i, slide in enumerate(self.slides):
print('\tslide {} of {}'.format(1 + i, len(self.slides)))
# title = ''
if nerve_mode == NerveMode.NOT_PRESENT and deform_mode is not DeformationMode.NONE:
self.throw(40)
partially_deformed_nerve = None
if deform_mode == DeformationMode.PHYSICS:
print('\t\tsetting up physics')
if 'morph_count' in self.search(Config.SAMPLE).keys():
morph_count = self.search(Config.SAMPLE, 'morph_count')
else:
morph_count = 100
if 'deform_ratio' in self.search(Config.SAMPLE).keys():
deform_ratio = self.search(Config.SAMPLE, 'deform_ratio')
print('\t\tdeform ratio set to {}'.format(deform_ratio))
else:
self.throw(118)
# title = 'morph count: {}'.format(morph_count)
sep_fascicles = self.search(Config.SAMPLE, "boundary_separation", "fascicles")
sep_nerve = None
print('\t\tensuring minimum fascicle separation of {} um'.format(sep_fascicles))
if 'nerve' in self.search(Config.SAMPLE, 'boundary_separation').keys():
sep_nerve = self.search(Config.SAMPLE, 'boundary_separation', 'nerve')
print('\t\tensuring minimum nerve:fascicle separation of {} um'.format(sep_nerve))
deformable = Deformable.from_slide(slide,
ReshapeNerveMode.CIRCLE,
sep_nerve=sep_nerve)
movements, rotations = deformable.deform(morph_count=morph_count,
render=deform_animate,
minimum_distance=sep_fascicles,
ratio=deform_ratio)
partially_deformed_nerve = Deformable.deform_steps(deformable.start,
deformable.end,
morph_count,
deform_ratio)[-1]
for move, angle, fascicle in zip(movements, rotations, slide.fascicles):
fascicle.shift(list(move) + [0])
fascicle.rotate(angle)
elif deform_mode == DeformationMode.JITTER:
slide.reposition_fascicles(slide.reshaped_nerve(reshape_nerve_mode), 10)
else: # must be DeformationMode.NONE
import warnings
if 'nerve' in self.search(Config.SAMPLE, 'boundary_separation').keys():
sep_nerve = self.search(Config.SAMPLE, 'boundary_separation', 'nerve')
if sep_nerve != 0:
warnings.warn(
'NO DEFORMATION is happening! AND sep_nerve is not 0, sep_nerve = {}'.format(sep_nerve))
else:
warnings.warn('NO DEFORMATION is happening!')
if nerve_mode is NerveMode.PRESENT and deform_mode != DeformationMode.NONE:
if deform_ratio != 1 and partially_deformed_nerve is not None:
partially_deformed_nerve.shift(-np.asarray(list(partially_deformed_nerve.centroid()) + [0]))
slide.nerve = partially_deformed_nerve
slide.nerve.offset(distance=sep_nerve)
else:
slide.nerve = slide.reshaped_nerve(reshape_nerve_mode)
slide.nerve.offset(distance=sep_nerve)
# shift slide about (0,0)
slide.move_center(np.array([0, 0]))
# Generate orientation point so src/core/query.py is happy
if slide.orientation_angle is not None:
# choose outer (based on if nerve is present)
outer = slide.nerve if (slide.nerve is not None) else slide.fascicles[0].outer
length = outer.mean_radius() * 10
o_pt = np.array([np.cos(slide.orientation_angle), np.sin(slide.orientation_angle)]) * length
ray = LineString([outer.centroid(), o_pt])
# find intersection point with outer (interpolated)
slide.orientation_point = np.array(ray.intersection(outer.polygon().boundary))
# scale with ratio = 1 (no scaling happens, but connects the ends of each trace to itself)
self.scale(1)
# slide.plot(fix_aspect_ratio=True, title=title)
if plot == True:
plt.figure()
slide.plot(final=False)
if plot_folder == True:
plt.savefig(plotpath + '/sample_final')
plt.close()
else:
plt.show()
# plt.figure(2)
# slide.nerve.plot()
# plt.plot(*tuple(slide.nerve.points[slide.orientation_point_index][:2]), 'b*')
# plt.show()
return self
def io_from_compiled(self, imgin, i_out, o_out):
"""
Generate inner and outer mask from compiled mask
:param imgin: path to input image (hint: c.tif)
:param i_out: full path to desired output inner mask
:param o_out: full path to desired output outer mask
"""
compiled = cv2.imread(imgin, -1)
imgnew = cv2.bitwise_not(compiled)
h, w = imgnew.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(imgnew, mask, (0, 0), 0);
cv2.imwrite(i_out, imgnew)
cv2.imwrite(o_out, compiled + imgnew)
def write(self, mode: WriteMode) -> 'Sample':
"""
Write entire list of slides.
"""
# get starting point so able to go back
start_directory: str = os.getcwd()
# get path to sample slides
sample_path = os.path.join(self.path(Config.SAMPLE, 'samples_path'),
str(self.search(Config.RUN, 'sample')),
'slides')
# loop through the slide info (index i SHOULD correspond to slide in self.slides)
for i, slide_info in enumerate(self.map.slides):
# unpack data and force cast to string
cassette, number, _, source_directory = slide_info.data()
cassette, number = (str(item) for item in (cassette, number))
# build path to slide and ensure that it exists before proceeding
slide_path = os.path.join(sample_path, cassette, number)
if not os.path.exists(slide_path):
self.throw(27)
else:
# change directories to slide path
os.chdir(slide_path)
# build the directory for output (name is the write mode)
directory_to_create = ''
if mode == WriteMode.SECTIONWISE2D:
directory_to_create = 'sectionwise2d'
elif mode == WriteMode.SECTIONWISE:
directory_to_create = 'sectionwise'
else:
self.throw(28)
if not os.path.exists(directory_to_create):
os.makedirs(directory_to_create)
os.chdir(directory_to_create)
# WRITE
self.slides[i].write(mode, os.getcwd())
# go back up to start directory, then to top of loop
os.chdir(start_directory)
return self
def make_electrode_input(self) -> 'Sample':
# load template for electrode input
electrode_input: dict = TemplateOutput.read(TemplateMode.ELECTRODE_INPUT)
for cuff_inner_mode in CuffInnerMode:
string_mode = str(cuff_inner_mode).split('.')[1]
if cuff_inner_mode == CuffInnerMode.CIRCLE:
(minx, miny, maxx, maxy) = self.slides[0].nerve.polygon().bounds
electrode_input[string_mode]['r'] = max([(maxx - minx) / 2, (maxy - miny) / 2])
elif cuff_inner_mode == CuffInnerMode.BOUNDING_BOX:
(minx, miny, maxx, maxy) = self.slides[0].nerve.polygon().bounds
electrode_input[string_mode]['x'] = maxx - minx
electrode_input[string_mode]['y'] = maxy - miny
else:
pass
# write template for electrode input
TemplateOutput.write(electrode_input, TemplateMode.ELECTRODE_INPUT, self)
return self
def output_morphology_data(self) -> 'Sample':
nerve_mode = self.search_mode(NerveMode, Config.SAMPLE)
fascicles = [fascicle.morphology_data() for fascicle in self.slides[0].fascicles]
if nerve_mode == NerveMode.PRESENT:
nerve = Nerve.morphology_data(self.slides[0].nerve)
morphology_input = {"Nerve": nerve, "Fascicles": fascicles}
else:
morphology_input = {"Nerve": None, "Fascicles": fascicles}
self.configs[Config.SAMPLE.value]["Morphology"] = morphology_input
sample_path = os.path.join(self.path(Config.SAMPLE, 'samples_path'),
str(self.search(Config.RUN, 'sample')),
'sample.json')
TemplateOutput.write(self.configs[Config.SAMPLE.value], sample_path)
self.morphology = morphology_input
return self
| [
"sys.platform.startswith",
"matplotlib.pyplot.savefig",
"src.core.Trace",
"src.core.Nerve.morphology_data",
"numpy.arctan2",
"src.utils.TemplateOutput.read",
"matplotlib.pyplot.figure",
"numpy.sin",
"cv2.floodFill",
"os.path.join",
"os.chdir",
"cv2.imwrite",
"matplotlib.pyplot.close",
"os.... | [((1501, 1562), 'src.utils.Exceptionable.__init__', 'Exceptionable.__init__', (['self', 'SetupMode.OLD', 'exception_config'], {}), '(self, SetupMode.OLD, exception_config)\n', (1523, 1562), False, 'from src.utils import Exceptionable, Configurable, Saveable, SetupMode, Config, MaskFileNames, NerveMode, MaskInputMode, ReshapeNerveMode, DeformationMode, PerineuriumThicknessMode, WriteMode, CuffInnerMode, TemplateOutput, TemplateMode, ScaleInputMode\n'), ((1571, 1598), 'src.utils.Configurable.__init__', 'Configurable.__init__', (['self'], {}), '(self)\n', (1592, 1598), False, 'from src.utils import Exceptionable, Configurable, Saveable, SetupMode, Config, MaskFileNames, NerveMode, MaskInputMode, ReshapeNerveMode, DeformationMode, PerineuriumThicknessMode, WriteMode, CuffInnerMode, TemplateOutput, TemplateMode, ScaleInputMode\n'), ((2637, 2679), 'src.core.Map', 'Map', (['self.configs[Config.EXCEPTIONS.value]'], {}), '(self.configs[Config.EXCEPTIONS.value])\n', (2640, 2679), False, 'from src.core import Slide, Map, Fascicle, Nerve, Trace\n'), ((3894, 3914), 'cv2.imread', 'cv2.imread', (['path', '(-1)'], {}), '(path, -1)\n', (3904, 3914), False, 'import cv2\n'), ((6890, 6901), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6899, 6901), False, 'import os\n'), ((11419, 11430), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11428, 11430), False, 'import os\n'), ((25927, 25948), 'cv2.imread', 'cv2.imread', (['imgin', '(-1)'], {}), '(imgin, -1)\n', (25937, 25948), False, 'import cv2\n'), ((25967, 25992), 'cv2.bitwise_not', 'cv2.bitwise_not', (['compiled'], {}), '(compiled)\n', (25982, 25992), False, 'import cv2\n'), ((26042, 26076), 'numpy.zeros', 'np.zeros', (['(h + 2, w + 2)', 'np.uint8'], {}), '((h + 2, w + 2), np.uint8)\n', (26050, 26076), True, 'import numpy as np\n'), ((26086, 26124), 'cv2.floodFill', 'cv2.floodFill', (['imgnew', 'mask', '(0, 0)', '(0)'], {}), '(imgnew, mask, (0, 0), 0)\n', (26099, 26124), False, 'import cv2\n'), ((26135, 26161), 'cv2.imwrite', 'cv2.imwrite', (['i_out', 'imgnew'], {}), '(i_out, imgnew)\n', (26146, 26161), False, 'import cv2\n'), ((26171, 26208), 'cv2.imwrite', 'cv2.imwrite', (['o_out', '(compiled + imgnew)'], {}), '(o_out, compiled + imgnew)\n', (26182, 26208), False, 'import cv2\n'), ((26402, 26413), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (26411, 26413), False, 'import os\n'), ((28206, 28255), 'src.utils.TemplateOutput.read', 'TemplateOutput.read', (['TemplateMode.ELECTRODE_INPUT'], {}), '(TemplateMode.ELECTRODE_INPUT)\n', (28225, 28255), False, 'from src.utils import Exceptionable, Configurable, Saveable, SetupMode, Config, MaskFileNames, NerveMode, MaskInputMode, ReshapeNerveMode, DeformationMode, PerineuriumThicknessMode, WriteMode, CuffInnerMode, TemplateOutput, TemplateMode, ScaleInputMode\n'), ((28969, 29042), 'src.utils.TemplateOutput.write', 'TemplateOutput.write', (['electrode_input', 'TemplateMode.ELECTRODE_INPUT', 'self'], {}), '(electrode_input, TemplateMode.ELECTRODE_INPUT, self)\n', (28989, 29042), False, 'from src.utils import Exceptionable, Configurable, Saveable, SetupMode, Config, MaskFileNames, NerveMode, MaskInputMode, ReshapeNerveMode, DeformationMode, PerineuriumThicknessMode, WriteMode, CuffInnerMode, TemplateOutput, TemplateMode, ScaleInputMode\n'), ((29825, 29893), 'src.utils.TemplateOutput.write', 'TemplateOutput.write', (['self.configs[Config.SAMPLE.value]', 'sample_path'], {}), '(self.configs[Config.SAMPLE.value], sample_path)\n', (29845, 29893), False, 'from src.utils import Exceptionable, Configurable, Saveable, SetupMode, Config, MaskFileNames, NerveMode, MaskInputMode, ReshapeNerveMode, DeformationMode, PerineuriumThicknessMode, WriteMode, CuffInnerMode, TemplateOutput, TemplateMode, ScaleInputMode\n'), ((2018, 2076), 'os.path.join', 'os.path.join', (['"""config"""', '"""system"""', '"""ci_peri_thickness.json"""'], {}), "('config', 'system', 'ci_peri_thickness.json')\n", (2030, 2076), False, 'import os\n'), ((4542, 4592), 'skimage.morphology.remove_small_objects', 'morphology.remove_small_objects', (['img', 'removal_size'], {}), '(img, removal_size)\n', (4573, 4592), False, 'from skimage import morphology\n'), ((5286, 5317), 'cv2.imread', 'cv2.imread', (['scale_bar_mask_path'], {}), '(scale_bar_mask_path)\n', (5296, 5317), False, 'import cv2\n'), ((7480, 7502), 'os.listdir', 'os.listdir', (['source_dir'], {}), '(source_dir)\n', (7490, 7502), False, 'import os\n'), ((10192, 10217), 'os.chdir', 'os.chdir', (['start_directory'], {}), '(start_directory)\n', (10200, 10217), False, 'import os\n'), ((11302, 11338), 'os.path.exists', 'os.path.exists', (['mask_file_name.value'], {}), '(mask_file_name.value)\n', (11316, 11338), False, 'import os\n'), ((11629, 11691), 'os.path.join', 'os.path.join', (['"""samples"""', 'sample', 'MaskFileNames.SCALE_BAR.value'], {}), "('samples', sample, MaskFileNames.SCALE_BAR.value)\n", (11641, 11691), False, 'import os\n'), ((11898, 11922), 'os.path.exists', 'os.path.exists', (['plotpath'], {}), '(plotpath)\n', (11912, 11922), False, 'import os\n'), ((11936, 11957), 'os.makedirs', 'os.makedirs', (['plotpath'], {}), '(plotpath)\n', (11947, 11957), False, 'import os\n'), ((17290, 17421), 'src.core.Slide', 'Slide', (['fascicles', 'nerve', 'nerve_mode', 'self.configs[Config.EXCEPTIONS.value]'], {'will_reposition': '(deform_mode != DeformationMode.NONE)'}), '(fascicles, nerve, nerve_mode, self.configs[Config.EXCEPTIONS.value],\n will_reposition=deform_mode != DeformationMode.NONE)\n', (17295, 17421), False, 'from src.core import Slide, Map, Fascicle, Nerve, Trace\n'), ((18365, 18390), 'os.chdir', 'os.chdir', (['start_directory'], {}), '(start_directory)\n', (18373, 18390), False, 'import os\n'), ((18468, 18494), 'os.path.exists', 'os.path.exists', (['scale_path'], {}), '(scale_path)\n', (18482, 18494), False, 'import os\n'), ((18979, 18991), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18989, 18991), True, 'import matplotlib.pyplot as plt\n'), ((25181, 25193), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25191, 25193), True, 'import matplotlib.pyplot as plt\n'), ((27095, 27138), 'os.path.join', 'os.path.join', (['sample_path', 'cassette', 'number'], {}), '(sample_path, cassette, number)\n', (27107, 27138), False, 'import os\n'), ((28033, 28058), 'os.chdir', 'os.chdir', (['start_directory'], {}), '(start_directory)\n', (28041, 28058), False, 'import os\n'), ((29336, 29379), 'src.core.Nerve.morphology_data', 'Nerve.morphology_data', (['self.slides[0].nerve'], {}), '(self.slides[0].nerve)\n', (29357, 29379), False, 'from src.core import Slide, Map, Fascicle, Nerve, Trace\n'), ((4316, 4338), 'scipy.ndimage.morphology.binary_fill_holes', 'binary_fill_holes', (['img'], {}), '(img)\n', (4333, 4338), False, 'from scipy.ndimage.morphology import binary_fill_holes\n'), ((8360, 8384), 'os.chdir', 'os.chdir', (['directory_part'], {}), '(directory_part)\n', (8368, 8384), False, 'import os\n'), ((9913, 9940), 'os.path.exists', 'os.path.exists', (['source_file'], {}), '(source_file)\n', (9927, 9940), False, 'import os\n'), ((13396, 13457), 'cv2.findContours', 'cv2.findContours', (['img', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (13412, 13457), False, 'import cv2\n'), ((13680, 13779), 'src.core.Trace', 'Trace', (['[(point + [0]) for point in contour[0][:, 0, :]]', 'self.configs[Config.EXCEPTIONS.value]'], {}), '([(point + [0]) for point in contour[0][:, 0, :]], self.configs[Config\n .EXCEPTIONS.value])\n', (13685, 13779), False, 'from src.core import Slide, Map, Fascicle, Nerve, Trace\n'), ((18154, 18198), 'numpy.arctan2', 'np.arctan2', (['(ori_y - outer_y)', '(ori_x - outer_x)'], {}), '(ori_y - outer_y, ori_x - outer_x)\n', (18164, 18198), True, 'import numpy as np\n'), ((19080, 19121), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plotpath + '/sample_initial')"], {}), "(plotpath + '/sample_initial')\n", (19091, 19121), True, 'import matplotlib.pyplot as plt\n'), ((19138, 19154), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (19147, 19154), True, 'import matplotlib.pyplot as plt\n'), ((19189, 19199), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19197, 19199), True, 'import matplotlib.pyplot as plt\n'), ((24279, 24295), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (24287, 24295), True, 'import numpy as np\n'), ((25282, 25321), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plotpath + '/sample_final')"], {}), "(plotpath + '/sample_final')\n", (25293, 25321), True, 'import matplotlib.pyplot as plt\n'), ((25338, 25349), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (25347, 25349), True, 'import matplotlib.pyplot as plt\n'), ((25384, 25394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25392, 25394), True, 'import matplotlib.pyplot as plt\n'), ((27158, 27184), 'os.path.exists', 'os.path.exists', (['slide_path'], {}), '(slide_path)\n', (27172, 27184), False, 'import os\n'), ((27302, 27322), 'os.chdir', 'os.chdir', (['slide_path'], {}), '(slide_path)\n', (27310, 27322), False, 'import os\n'), ((27844, 27873), 'os.chdir', 'os.chdir', (['directory_to_create'], {}), '(directory_to_create)\n', (27852, 27873), False, 'import os\n'), ((7643, 7679), 'os.path.join', 'os.path.join', (['source_dir', 'mask_fname'], {}), '(source_dir, mask_fname)\n', (7655, 7679), False, 'import os\n'), ((8264, 8294), 'os.path.exists', 'os.path.exists', (['directory_part'], {}), '(directory_part)\n', (8278, 8294), False, 'import os\n'), ((8316, 8343), 'os.makedirs', 'os.makedirs', (['directory_part'], {}), '(directory_part)\n', (8327, 8343), False, 'import os\n'), ((10038, 10076), 'shutil.copy2', 'shutil.copy2', (['source_file', 'target_file'], {}), '(source_file, target_file)\n', (10050, 10076), False, 'import shutil\n'), ((12518, 12551), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (12541, 12551), False, 'import sys\n'), ((12555, 12587), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (12578, 12587), False, 'import sys\n'), ((12616, 12709), 'subprocess.Popen', 'subprocess.Popen', (['[\'bash\', \'for file in *.tiff; do mv "$file" "${file%.tiff}.tif"; done\']'], {}), '([\'bash\',\n \'for file in *.tiff; do mv "$file" "${file%.tiff}.tif"; done\'])\n', (12632, 12709), False, 'import subprocess\n'), ((12800, 12906), 'subprocess.Popen', 'subprocess.Popen', (["['powershell.exe',\n 'Dir | Rename-Item –NewName { $_.name –replace “.tiff“,”.tif” }']"], {}), "(['powershell.exe',\n 'Dir | Rename-Item –NewName { $_.name –replace “.tiff“,”.tif” }'])\n", (12816, 12906), False, 'import subprocess\n'), ((13217, 13264), 'cv2.imread', 'cv2.imread', (['MaskFileNames.ORIENTATION.value', '(-1)'], {}), '(MaskFileNames.ORIENTATION.value, -1)\n', (13227, 13264), False, 'import cv2\n'), ((14737, 14831), 'src.core.Fascicle.to_list', 'Fascicle.to_list', (['MaskFileNames.INNERS.value', 'None', 'self.configs[Config.EXCEPTIONS.value]'], {}), '(MaskFileNames.INNERS.value, None, self.configs[Config.\n EXCEPTIONS.value])\n', (14753, 14831), False, 'from src.core import Slide, Map, Fascicle, Nerve, Trace\n'), ((16603, 16644), 'cv2.imread', 'cv2.imread', (['MaskFileNames.NERVE.value', '(-1)'], {}), '(MaskFileNames.NERVE.value, -1)\n', (16613, 16644), False, 'import cv2\n'), ((27738, 27773), 'os.path.exists', 'os.path.exists', (['directory_to_create'], {}), '(directory_to_create)\n', (27752, 27773), False, 'import os\n'), ((27795, 27827), 'os.makedirs', 'os.makedirs', (['directory_to_create'], {}), '(directory_to_create)\n', (27806, 27827), False, 'import os\n'), ((27942, 27953), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (27951, 27953), False, 'import os\n'), ((9107, 9140), 'os.path.exists', 'os.path.exists', (['scale_source_file'], {}), '(scale_source_file)\n', (9121, 9140), False, 'import os\n'), ((12481, 12496), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (12491, 12496), False, 'import os\n'), ((14386, 14405), 'numpy.array', 'np.array', (['mask_dims'], {}), '(mask_dims)\n', (14394, 14405), True, 'import numpy as np\n'), ((16828, 16848), 'numpy.flipud', 'np.flipud', (['img_nerve'], {}), '(img_nerve)\n', (16837, 16848), True, 'import numpy as np\n'), ((17024, 17123), 'src.core.Trace', 'Trace', (['[(point + [0]) for point in contour[0][:, 0, :]]', 'self.configs[Config.EXCEPTIONS.value]'], {}), '([(point + [0]) for point in contour[0][:, 0, :]], self.configs[Config\n .EXCEPTIONS.value])\n', (17029, 17123), False, 'from src.core import Slide, Map, Fascicle, Nerve, Trace\n'), ((23609, 23654), 'warnings.warn', 'warnings.warn', (['"""NO DEFORMATION is happening!"""'], {}), "('NO DEFORMATION is happening!')\n", (23622, 23654), False, 'import warnings\n'), ((9170, 9232), 'shutil.copy2', 'shutil.copy2', (['scale_source_file', 'MaskFileNames.SCALE_BAR.value'], {}), '(scale_source_file, MaskFileNames.SCALE_BAR.value)\n', (9182, 9232), False, 'import shutil\n'), ((15387, 15502), 'src.core.Fascicle.to_list', 'Fascicle.to_list', (['MaskFileNames.INNERS.value', 'MaskFileNames.OUTERS.value', 'self.configs[Config.EXCEPTIONS.value]'], {}), '(MaskFileNames.INNERS.value, MaskFileNames.OUTERS.value,\n self.configs[Config.EXCEPTIONS.value])\n', (15403, 15502), False, 'from src.core import Slide, Map, Fascicle, Nerve, Trace\n'), ((24663, 24694), 'numpy.cos', 'np.cos', (['slide.orientation_angle'], {}), '(slide.orientation_angle)\n', (24669, 24694), True, 'import numpy as np\n'), ((24696, 24727), 'numpy.sin', 'np.sin', (['slide.orientation_angle'], {}), '(slide.orientation_angle)\n', (24702, 24727), True, 'import numpy as np\n'), ((16193, 16266), 'src.core.Fascicle.to_list', 'Fascicle.to_list', (['i_image', 'o_image', 'self.configs[Config.EXCEPTIONS.value]'], {}), '(i_image, o_image, self.configs[Config.EXCEPTIONS.value])\n', (16209, 16266), False, 'from src.core import Slide, Map, Fascicle, Nerve, Trace\n'), ((15872, 15915), 'os.path.split', 'os.path.split', (['MaskFileNames.COMPILED.value'], {}), '(MaskFileNames.COMPILED.value)\n', (15885, 15915), False, 'import os\n'), ((15966, 16009), 'os.path.split', 'os.path.split', (['MaskFileNames.COMPILED.value'], {}), '(MaskFileNames.COMPILED.value)\n', (15979, 16009), False, 'import os\n')] |
import numpy as np
def rotationMatrix3D(roll, pitch, yaw):
# RPY <--> XYZ, roll first, picth then, yaw final
si, sj, sk = np.sin(roll), np.sin(pitch), np.sin(yaw)
ci, cj, ck = np.cos(roll), np.cos(pitch), np.cos(yaw)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
R = np.identity(3)
R[0, 0] = cj * ck
R[0, 1] = sj * sc - cs
R[0, 2] = sj * cc + ss
R[1, 0] = cj * sk
R[1, 1] = sj * ss + cc
R[1, 2] = sj * cs - sc
R[2, 0] = -sj
R[2, 1] = cj * si
R[2, 2] = cj * ci
return R
def rotationMatrixRoll(roll):
R = np.identity(3)
R[1, 1] = np.cos(roll)
R[2, 2] = np.cos(roll)
R[2, 1] = np.sin(roll)
R[1, 2] = -np.sin(roll)
return R
def rotarotationMatrixPitch(pitch):
R = np.identity(3)
R[0, 0] = np.cos(pitch)
R[2, 2] = np.cos(pitch)
R[2, 0] = -np.sin(pitch)
R[0, 2] = np.sin(pitch)
return R
def rotarotationMatrixYaw(yaw):
R = np.identity(3)
R[0, 0] = np.cos(yaw)
R[1, 1] = np.cos(yaw)
R[1, 0] = np.sin(yaw)
R[0, 1] = -np.sin(yaw)
return R
def rotationMatrix3DYPR(roll, pitch, yaw):
return np.dot(np.dot(rotationMatrixRoll(roll), rotarotationMatrixPitch(pitch)), rotarotationMatrixYaw(yaw))
def reverseX():
I = np.identity(3)
I[0, 0] = -1
return I
def reverseY():
I = np.identity(3)
I[1, 1] = -1
return I
def intrinsicMatrix(fx, fy, u0, v0):
K = np.array([[fx, 0, u0], [0, fy, v0], [0, 0, 1]])
return K
class CoordinateTransformation(object):
I = np.dot(np.dot(reverseX(), reverseY()), rotationMatrix3DYPR(np.pi / 2, 0, -np.pi / 2))
@staticmethod
def world3DToCamera3D(world_vec, R, t):
camera_vec = np.dot(R.T, world_vec - t)
return camera_vec
@staticmethod
def camera3DToWorld3D(camera_vec, R, t):
world_vec = np.dot(R, camera_vec) + t
return world_vec
@staticmethod
def camera3DToImage2D(camera_vec, K, eps=1e-24):
image_vec = np.dot(np.dot(K, CoordinateTransformation.I), camera_vec)
return image_vec[:2, :] / (image_vec[2, :] + eps)
@staticmethod
def world3DToImage2D(world_vec, K, R, t):
camera_vec = CoordinateTransformation.world3DToCamera3D(world_vec, R, t)
image_vec = CoordinateTransformation.camera3DToImage2D(camera_vec, K)
return image_vec
@staticmethod
def world3DToImagePixel2D(world_vec, K, R, t):
image_vec = CoordinateTransformation.world3DToImage2D(world_vec, K, R, t)
x_pixel, y_pixel = round(image_vec[0, 0]), round(image_vec[1, 0])
return np.array([x_pixel, y_pixel]).reshape(2, 1)
@staticmethod
def image2DToWorld3D(image_vec, K, R, t):
r = np.vstack((image_vec, 1))
b = np.vstack((np.dot(np.dot(K, CoordinateTransformation.I), t), 0))
temp1 = np.dot(np.dot(K, CoordinateTransformation.I), R.T)
temp2 = np.hstack((temp1, -r))
A = np.vstack((temp2, np.array([[0, 0, 1, 0]])))
world_vec = np.dot(np.linalg.inv(A), b)
return world_vec[:3]
@staticmethod
def image2DToWorld3D2(image_vec, K, R, t):
r = np.vstack((image_vec, np.ones((1, image_vec.shape[1]))))
b = np.vstack((np.dot(np.dot(K, CoordinateTransformation.I), t), 0))
temp1 = np.dot(np.dot(K, CoordinateTransformation.I), R.T)
temp1 = np.expand_dims(temp1, axis=2).repeat(image_vec.shape[1], axis=2)
r = np.expand_dims(r, axis=1)
temp1 = np.transpose(temp1, (2, 0, 1))
r = np.transpose(r, (2, 0, 1))
temp2 = np.concatenate((temp1, -r), axis=2)
temp3 = np.array([[0, 0, 1, 0]])
temp3 = np.expand_dims(temp3, axis=2).repeat(image_vec.shape[1], axis=2)
temp3 = np.transpose(temp3, (2, 0, 1))
A = np.concatenate((temp2, temp3), axis=1)
world_vec = np.dot(np.linalg.inv(A), b)
return world_vec[:, :3]
| [
"numpy.concatenate",
"numpy.transpose",
"numpy.identity",
"numpy.expand_dims",
"numpy.hstack",
"numpy.ones",
"numpy.sin",
"numpy.array",
"numpy.linalg.inv",
"numpy.cos",
"numpy.dot",
"numpy.vstack"
] | [((300, 314), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (311, 314), True, 'import numpy as np\n'), ((582, 596), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (593, 596), True, 'import numpy as np\n'), ((611, 623), 'numpy.cos', 'np.cos', (['roll'], {}), '(roll)\n', (617, 623), True, 'import numpy as np\n'), ((638, 650), 'numpy.cos', 'np.cos', (['roll'], {}), '(roll)\n', (644, 650), True, 'import numpy as np\n'), ((665, 677), 'numpy.sin', 'np.sin', (['roll'], {}), '(roll)\n', (671, 677), True, 'import numpy as np\n'), ((765, 779), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (776, 779), True, 'import numpy as np\n'), ((794, 807), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (800, 807), True, 'import numpy as np\n'), ((822, 835), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (828, 835), True, 'import numpy as np\n'), ((879, 892), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (885, 892), True, 'import numpy as np\n'), ((948, 962), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (959, 962), True, 'import numpy as np\n'), ((977, 988), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (983, 988), True, 'import numpy as np\n'), ((1003, 1014), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (1009, 1014), True, 'import numpy as np\n'), ((1029, 1040), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (1035, 1040), True, 'import numpy as np\n'), ((1264, 1278), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (1275, 1278), True, 'import numpy as np\n'), ((1335, 1349), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (1346, 1349), True, 'import numpy as np\n'), ((1427, 1474), 'numpy.array', 'np.array', (['[[fx, 0, u0], [0, fy, v0], [0, 0, 1]]'], {}), '([[fx, 0, u0], [0, fy, v0], [0, 0, 1]])\n', (1435, 1474), True, 'import numpy as np\n'), ((132, 144), 'numpy.sin', 'np.sin', (['roll'], {}), '(roll)\n', (138, 144), True, 'import numpy as np\n'), ((146, 159), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (152, 159), True, 'import numpy as np\n'), ((161, 172), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (167, 172), True, 'import numpy as np\n'), ((190, 202), 'numpy.cos', 'np.cos', (['roll'], {}), '(roll)\n', (196, 202), True, 'import numpy as np\n'), ((204, 217), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (210, 217), True, 'import numpy as np\n'), ((219, 230), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (225, 230), True, 'import numpy as np\n'), ((693, 705), 'numpy.sin', 'np.sin', (['roll'], {}), '(roll)\n', (699, 705), True, 'import numpy as np\n'), ((851, 864), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (857, 864), True, 'import numpy as np\n'), ((1056, 1067), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (1062, 1067), True, 'import numpy as np\n'), ((1708, 1734), 'numpy.dot', 'np.dot', (['R.T', '(world_vec - t)'], {}), '(R.T, world_vec - t)\n', (1714, 1734), True, 'import numpy as np\n'), ((2714, 2739), 'numpy.vstack', 'np.vstack', (['(image_vec, 1)'], {}), '((image_vec, 1))\n', (2723, 2739), True, 'import numpy as np\n'), ((2901, 2923), 'numpy.hstack', 'np.hstack', (['(temp1, -r)'], {}), '((temp1, -r))\n', (2910, 2923), True, 'import numpy as np\n'), ((3431, 3456), 'numpy.expand_dims', 'np.expand_dims', (['r'], {'axis': '(1)'}), '(r, axis=1)\n', (3445, 3456), True, 'import numpy as np\n'), ((3474, 3504), 'numpy.transpose', 'np.transpose', (['temp1', '(2, 0, 1)'], {}), '(temp1, (2, 0, 1))\n', (3486, 3504), True, 'import numpy as np\n'), ((3517, 3543), 'numpy.transpose', 'np.transpose', (['r', '(2, 0, 1)'], {}), '(r, (2, 0, 1))\n', (3529, 3543), True, 'import numpy as np\n'), ((3561, 3596), 'numpy.concatenate', 'np.concatenate', (['(temp1, -r)'], {'axis': '(2)'}), '((temp1, -r), axis=2)\n', (3575, 3596), True, 'import numpy as np\n'), ((3613, 3637), 'numpy.array', 'np.array', (['[[0, 0, 1, 0]]'], {}), '([[0, 0, 1, 0]])\n', (3621, 3637), True, 'import numpy as np\n'), ((3735, 3765), 'numpy.transpose', 'np.transpose', (['temp3', '(2, 0, 1)'], {}), '(temp3, (2, 0, 1))\n', (3747, 3765), True, 'import numpy as np\n'), ((3779, 3817), 'numpy.concatenate', 'np.concatenate', (['(temp2, temp3)'], {'axis': '(1)'}), '((temp2, temp3), axis=1)\n', (3793, 3817), True, 'import numpy as np\n'), ((1845, 1866), 'numpy.dot', 'np.dot', (['R', 'camera_vec'], {}), '(R, camera_vec)\n', (1851, 1866), True, 'import numpy as np\n'), ((1995, 2032), 'numpy.dot', 'np.dot', (['K', 'CoordinateTransformation.I'], {}), '(K, CoordinateTransformation.I)\n', (2001, 2032), True, 'import numpy as np\n'), ((2841, 2878), 'numpy.dot', 'np.dot', (['K', 'CoordinateTransformation.I'], {}), '(K, CoordinateTransformation.I)\n', (2847, 2878), True, 'import numpy as np\n'), ((3008, 3024), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (3021, 3024), True, 'import numpy as np\n'), ((3294, 3331), 'numpy.dot', 'np.dot', (['K', 'CoordinateTransformation.I'], {}), '(K, CoordinateTransformation.I)\n', (3300, 3331), True, 'import numpy as np\n'), ((3845, 3861), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (3858, 3861), True, 'import numpy as np\n'), ((2594, 2622), 'numpy.array', 'np.array', (['[x_pixel, y_pixel]'], {}), '([x_pixel, y_pixel])\n', (2602, 2622), True, 'import numpy as np\n'), ((2954, 2978), 'numpy.array', 'np.array', (['[[0, 0, 1, 0]]'], {}), '([[0, 0, 1, 0]])\n', (2962, 2978), True, 'import numpy as np\n'), ((3158, 3190), 'numpy.ones', 'np.ones', (['(1, image_vec.shape[1])'], {}), '((1, image_vec.shape[1]))\n', (3165, 3190), True, 'import numpy as np\n'), ((3354, 3383), 'numpy.expand_dims', 'np.expand_dims', (['temp1'], {'axis': '(2)'}), '(temp1, axis=2)\n', (3368, 3383), True, 'import numpy as np\n'), ((3654, 3683), 'numpy.expand_dims', 'np.expand_dims', (['temp3'], {'axis': '(2)'}), '(temp3, axis=2)\n', (3668, 3683), True, 'import numpy as np\n'), ((2770, 2807), 'numpy.dot', 'np.dot', (['K', 'CoordinateTransformation.I'], {}), '(K, CoordinateTransformation.I)\n', (2776, 2807), True, 'import numpy as np\n'), ((3223, 3260), 'numpy.dot', 'np.dot', (['K', 'CoordinateTransformation.I'], {}), '(K, CoordinateTransformation.I)\n', (3229, 3260), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.image import draw_dense_reg
import math
import random
class EpisodicDetDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def _sample_query_from_categories(self, sampled_categories):
# this loop is to sample a single image for every category
# (to be sure each cat gets at least an image)
query_img_paths = []
anns_per_query = []
category_dict = {}
for idx,cat in enumerate(sampled_categories):
image_ids = self.coco.getImgIds(catIds=cat)
img_id = random.choice(image_ids)
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
all_anns = self.coco.loadAnns(ids=ann_ids)
val_anns = [a for a in all_anns if a['category_id'] in sampled_categories]
anns_per_query.append( val_anns )
query_img_paths.append( img_path )
category_dict[cat] = idx
return query_img_paths, anns_per_query, category_dict
def _sample_support_set(self, cat_id):
img_ids = self.coco_support.getImgIds(catIds=[cat_id])
#img_items = self.coco_support.loadImgs(ids=img_ids)
ann_ids = self.coco_support.getAnnIds(imgIds=img_ids)
anns = self.coco_support.loadAnns(ids=ann_ids)
is_proper_size = lambda a: (a['bbox'][2]>=self.opt.min_bbox_len) & (a['bbox'][3]>=self.opt.min_bbox_len)
is_proper_cat = lambda a:a['category_id']==cat_id
good_anns = [a for a in anns if (is_proper_size(a) & is_proper_cat(a))]
sampled_good_anns = np.random.choice(good_anns, self.k_shots).tolist()
img_paths = []
for s in sampled_good_anns:
img_file_name = self.coco_support.loadImgs([s['image_id']])[0]['file_name']
img_paths.append(os.path.join(self.supp_img_dir, img_file_name))
return img_paths, sampled_good_anns
def _process_query(self, img, cat, augment=False):
height, width = img.shape[0], img.shape[1]
center = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
if self.opt.keep_res:
input_h = (height | self.opt.pad) + 1
input_w = (width | self.opt.pad) + 1
scale = np.array([input_w, input_h], dtype=np.float32)
else:
scale = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.opt.input_h, self.opt.input_w
flipped = False
if augment:
if not self.opt.not_rand_crop:
scale = scale * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
center[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
center[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
center[0] += scale * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
center[1] += scale * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
scale = scale * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
center[0] = width - center[0] - 1
trans_input = get_affine_transform(
center, scale, 0, [input_w, input_h])
inp = cv2.warpAffine(img, trans_input,
(input_w, input_h),
flags=cv2.INTER_LINEAR)
#cv2.imshow('inp-{}'.format(cat),inp)
inp = (inp.astype(np.float32) / 255.)
if augment and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
inp_dim = (input_h, input_w)
return inp, inp_dim, flipped, center, scale
def _process_all_query_outs(self, query_imgs, anns_per_query, query_info, category_dict):
hm_per_query = []
reg_mask_per_query = []
reg_per_query = []
ind_per_query = []
wh_per_query = []
cs_wh_per_query = []
cs_mask_per_query = []
gt_det_per_query = []
for query_idx, img in enumerate(query_imgs):
width = img.shape[2]#(2, 0, 1)
input_h, input_w = query_info['inp_dim'][query_idx]
output_h = input_h // self.opt.down_ratio
output_w = input_w // self.opt.down_ratio
num_classes = len(query_info['sampled_categories'])
center = query_info['center'][query_idx]
scale = query_info['scale'][query_idx]
trans_output = get_affine_transform(center, scale, 0, [output_w, output_h])
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)
cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
num_objs = min(len(anns_per_query[query_idx]), self.max_objs)
for k in range(num_objs):
ann = anns_per_query[query_idx][k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = category_dict[ann['category_id']]
if query_info['flipped'][query_idx]:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]
cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])
#cv2.imshow( 'hm-query-{}-cat-{}'.format(query_idx,0), cv2.resize(hm[0], tuple(img.shape[1:3])) )
#cv2.imshow( 'hm-query-{}-cat-{}'.format(query_idx,1), cv2.resize(hm[1], tuple(img.shape[1:3])) )
#cv2.imshow( 'hm-query-{}-cat-{}'.format(query_idx,2), cv2.resize(hm[2], tuple(img.shape[1:3])) )
hm_per_query.append(hm)
reg_mask_per_query.append(reg_mask)
reg_per_query.append(reg)
ind_per_query.append(ind)
wh_per_query.append(wh)
gt_det_per_query.append(gt_det)
cs_wh_per_query.append(cat_spec_wh)
cs_mask_per_query.append(cat_spec_mask)
hm = np.stack(hm_per_query)
reg_mask = np.stack(reg_mask_per_query)
reg = np.stack(reg_per_query)
ind = np.stack(ind_per_query)
wh = np.stack(wh_per_query)
cs_wh_per_query = np.stack(cs_wh_per_query)
cs_mask_per_query = np.stack(cs_mask_per_query)
return hm, reg_mask, reg, ind, wh, gt_det_per_query, cs_wh_per_query, cs_mask_per_query
def _process_support_set(self, support_imgs, support_anns, cat, augment=False):
out_supp = []
for i, (img, ann) in enumerate(zip(support_imgs, support_anns)):
bbox = self._coco_box_to_bbox(ann['bbox'])
x1,y1,x2,y2 = math.floor(bbox[0]), math.floor(bbox[1]), math.ceil(bbox[2]), math.ceil(bbox[3])
#give a little more of context for support
y1 = max(0, y1-self.opt.supp_ctxt)
x1 = max(0, x1-self.opt.supp_ctxt)
y2 = min(y2+self.opt.supp_ctxt, img.shape[0])
x2 = min(x2+self.opt.supp_ctxt, img.shape[1])
inp = img[y1:y2,x1:x2,:]
if augment:
if np.random.random() < self.opt.flip:
inp = inp[:, ::-1, :]
#cv2.imshow('sample-{}-cat-{}'.format(i,cat), inp)
inp = cv2.resize(inp, (int(self.opt.supp_w), int(self.opt.supp_h)))
inp = (inp.astype(np.float32) / 255.)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
out_supp.append(inp)
out_supp = np.stack(out_supp,axis=0)
return out_supp
def _sample_categories(self,num_categories):
cat_ids = random.sample(self._valid_ids, num_categories)
return cat_ids
def __getitem__(self, index):
# 1. sample n categories
sampled_categories = self._sample_categories(self.n_sample_classes)
# 2. sample one image per category and load annotations for each image
query_img_paths, anns_per_query, category_dict = self._sample_query_from_categories(sampled_categories)
# 3. load all the query images and process them
query_imgs = []
query_info = {'flipped': [], 'center': [], 'scale': [], 'inp_dim': [], 'sampled_categories': sampled_categories}
for qi,path in enumerate(query_img_paths):
query_img = cv2.imread(path)
inp, inp_dim, flipped, center, scale = self._process_query(query_img, qi, augment=(self.split=='train'))
query_imgs.append(inp)
query_info['flipped'].append(flipped)
query_info['center'].append(center)
query_info['scale'].append(scale)
query_info['inp_dim'].append(inp_dim)
# 4. sample and process the support set
support_set = []
for ic,cat in enumerate(sampled_categories):
support_paths, support_anns = self._sample_support_set(cat)
supp_imgs = [cv2.imread(img_path) for img_path in support_paths]
supp_imgs = self._process_support_set(supp_imgs, support_anns, ic, augment=(self.split=='train'))
support_set.append(supp_imgs)
support_set = np.stack(support_set,axis=0)
# 5. Process query gt output
hm, reg_mask, reg, ind, wh, gt_det, cs_wh_per_query, cs_mask_per_query = self._process_all_query_outs(query_imgs, anns_per_query, query_info, category_dict)
# 6. stack all together to be size [N,...]
query_imgs = np.stack(query_imgs, axis=0)
#cv2.waitKey(0)
#print(query_imgs.shape, hm.shape, wh.shape, support_set.shape,'**************')
ret = {'input': query_imgs, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh,
'supp': support_set, 'cat_spec_wh': cs_wh_per_query, 'cat_spec_mask': cs_mask_per_query}
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not self.split == 'train':
gt_det = np.array(gt_det[0], dtype=np.float32) if len(gt_det[0]) > 0 else \
np.zeros((1, 6), dtype=np.float32)
#meta = {'c': center, 's': scale, 'gt_det': gt_det, 'img_id': query_id}
meta = {'c': center, 's': scale, 'gt_det': gt_det}
ret['meta'] = meta
return ret
| [
"random.sample",
"numpy.clip",
"cv2.warpAffine",
"numpy.random.randint",
"numpy.arange",
"os.path.join",
"numpy.random.randn",
"numpy.random.choice",
"numpy.stack",
"math.ceil",
"utils.image.affine_transform",
"numpy.zeros",
"random.choice",
"utils.image.color_aug",
"math.floor",
"cv2.... | [((544, 622), 'numpy.array', 'np.array', (['[box[0], box[1], box[0] + box[2], box[1] + box[3]]'], {'dtype': 'np.float32'}), '([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)\n', (552, 622), True, 'import numpy as np\n'), ((2633, 2701), 'numpy.array', 'np.array', (['[img.shape[1] / 2.0, img.shape[0] / 2.0]'], {'dtype': 'np.float32'}), '([img.shape[1] / 2.0, img.shape[0] / 2.0], dtype=np.float32)\n', (2641, 2701), True, 'import numpy as np\n'), ((3876, 3934), 'utils.image.get_affine_transform', 'get_affine_transform', (['center', 'scale', '(0)', '[input_w, input_h]'], {}), '(center, scale, 0, [input_w, input_h])\n', (3896, 3934), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((3952, 4028), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'trans_input', '(input_w, input_h)'], {'flags': 'cv2.INTER_LINEAR'}), '(img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)\n', (3966, 4028), False, 'import cv2\n'), ((7772, 7794), 'numpy.stack', 'np.stack', (['hm_per_query'], {}), '(hm_per_query)\n', (7780, 7794), True, 'import numpy as np\n'), ((7810, 7838), 'numpy.stack', 'np.stack', (['reg_mask_per_query'], {}), '(reg_mask_per_query)\n', (7818, 7838), True, 'import numpy as np\n'), ((7849, 7872), 'numpy.stack', 'np.stack', (['reg_per_query'], {}), '(reg_per_query)\n', (7857, 7872), True, 'import numpy as np\n'), ((7883, 7906), 'numpy.stack', 'np.stack', (['ind_per_query'], {}), '(ind_per_query)\n', (7891, 7906), True, 'import numpy as np\n'), ((7917, 7939), 'numpy.stack', 'np.stack', (['wh_per_query'], {}), '(wh_per_query)\n', (7925, 7939), True, 'import numpy as np\n'), ((7962, 7987), 'numpy.stack', 'np.stack', (['cs_wh_per_query'], {}), '(cs_wh_per_query)\n', (7970, 7987), True, 'import numpy as np\n'), ((8012, 8039), 'numpy.stack', 'np.stack', (['cs_mask_per_query'], {}), '(cs_mask_per_query)\n', (8020, 8039), True, 'import numpy as np\n'), ((9121, 9147), 'numpy.stack', 'np.stack', (['out_supp'], {'axis': '(0)'}), '(out_supp, axis=0)\n', (9129, 9147), True, 'import numpy as np\n'), ((9230, 9276), 'random.sample', 'random.sample', (['self._valid_ids', 'num_categories'], {}), '(self._valid_ids, num_categories)\n', (9243, 9276), False, 'import random\n'), ((10612, 10641), 'numpy.stack', 'np.stack', (['support_set'], {'axis': '(0)'}), '(support_set, axis=0)\n', (10620, 10641), True, 'import numpy as np\n'), ((10906, 10934), 'numpy.stack', 'np.stack', (['query_imgs'], {'axis': '(0)'}), '(query_imgs, axis=0)\n', (10914, 10934), True, 'import numpy as np\n'), ((1166, 1190), 'random.choice', 'random.choice', (['image_ids'], {}), '(image_ids)\n', (1179, 1190), False, 'import random\n'), ((1276, 1313), 'os.path.join', 'os.path.join', (['self.img_dir', 'file_name'], {}), '(self.img_dir, file_name)\n', (1288, 1313), False, 'import os\n'), ((2827, 2873), 'numpy.array', 'np.array', (['[input_w, input_h]'], {'dtype': 'np.float32'}), '([input_w, input_h], dtype=np.float32)\n', (2835, 2873), True, 'import numpy as np\n'), ((4218, 4278), 'utils.image.color_aug', 'color_aug', (['self._data_rng', 'inp', 'self._eig_val', 'self._eig_vec'], {}), '(self._data_rng, inp, self._eig_val, self._eig_vec)\n', (4227, 4278), False, 'from utils.image import flip, color_aug\n'), ((5134, 5194), 'utils.image.get_affine_transform', 'get_affine_transform', (['center', 'scale', '(0)', '[output_w, output_h]'], {}), '(center, scale, 0, [output_w, output_h])\n', (5154, 5194), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((5207, 5268), 'numpy.zeros', 'np.zeros', (['(num_classes, output_h, output_w)'], {'dtype': 'np.float32'}), '((num_classes, output_h, output_w), dtype=np.float32)\n', (5215, 5268), True, 'import numpy as np\n'), ((5280, 5326), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (5288, 5326), True, 'import numpy as np\n'), ((5339, 5385), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (5347, 5385), True, 'import numpy as np\n'), ((5398, 5437), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.int64'}), '(self.max_objs, dtype=np.int64)\n', (5406, 5437), True, 'import numpy as np\n'), ((5457, 5496), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.uint8'}), '(self.max_objs, dtype=np.uint8)\n', (5465, 5496), True, 'import numpy as np\n'), ((5519, 5579), 'numpy.zeros', 'np.zeros', (['(self.max_objs, num_classes * 2)'], {'dtype': 'np.float32'}), '((self.max_objs, num_classes * 2), dtype=np.float32)\n', (5527, 5579), True, 'import numpy as np\n'), ((5602, 5660), 'numpy.zeros', 'np.zeros', (['(self.max_objs, num_classes * 2)'], {'dtype': 'np.uint8'}), '((self.max_objs, num_classes * 2), dtype=np.uint8)\n', (5610, 5660), True, 'import numpy as np\n'), ((9875, 9891), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (9885, 9891), False, 'import cv2\n'), ((2218, 2259), 'numpy.random.choice', 'np.random.choice', (['good_anns', 'self.k_shots'], {}), '(good_anns, self.k_shots)\n', (2234, 2259), True, 'import numpy as np\n'), ((2430, 2476), 'os.path.join', 'os.path.join', (['self.supp_img_dir', 'img_file_name'], {}), '(self.supp_img_dir, img_file_name)\n', (2442, 2476), False, 'import os\n'), ((3271, 3332), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'w_border', 'high': '(img.shape[1] - w_border)'}), '(low=w_border, high=img.shape[1] - w_border)\n', (3288, 3332), True, 'import numpy as np\n'), ((3353, 3414), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'h_border', 'high': '(img.shape[0] - h_border)'}), '(low=h_border, high=img.shape[0] - h_border)\n', (3370, 3414), True, 'import numpy as np\n'), ((3717, 3735), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3733, 3735), True, 'import numpy as np\n'), ((6155, 6195), 'utils.image.affine_transform', 'affine_transform', (['bbox[:2]', 'trans_output'], {}), '(bbox[:2], trans_output)\n', (6171, 6195), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((6215, 6255), 'utils.image.affine_transform', 'affine_transform', (['bbox[2:]', 'trans_output'], {}), '(bbox[2:], trans_output)\n', (6231, 6255), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((6279, 6317), 'numpy.clip', 'np.clip', (['bbox[[0, 2]]', '(0)', '(output_w - 1)'], {}), '(bbox[[0, 2]], 0, output_w - 1)\n', (6286, 6317), True, 'import numpy as np\n'), ((6341, 6379), 'numpy.clip', 'np.clip', (['bbox[[1, 3]]', '(0)', '(output_h - 1)'], {}), '(bbox[[1, 3]], 0, output_h - 1)\n', (6348, 6379), True, 'import numpy as np\n'), ((8373, 8392), 'math.floor', 'math.floor', (['bbox[0]'], {}), '(bbox[0])\n', (8383, 8392), False, 'import math\n'), ((8394, 8413), 'math.floor', 'math.floor', (['bbox[1]'], {}), '(bbox[1])\n', (8404, 8413), False, 'import math\n'), ((8415, 8433), 'math.ceil', 'math.ceil', (['bbox[2]'], {}), '(bbox[2])\n', (8424, 8433), False, 'import math\n'), ((8435, 8453), 'math.ceil', 'math.ceil', (['bbox[3]'], {}), '(bbox[3])\n', (8444, 8453), False, 'import math\n'), ((10402, 10422), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (10412, 10422), False, 'import cv2\n'), ((11364, 11401), 'numpy.array', 'np.array', (['gt_det[0]'], {'dtype': 'np.float32'}), '(gt_det[0], dtype=np.float32)\n', (11372, 11401), True, 'import numpy as np\n'), ((11446, 11480), 'numpy.zeros', 'np.zeros', (['(1, 6)'], {'dtype': 'np.float32'}), '((1, 6), dtype=np.float32)\n', (11454, 11480), True, 'import numpy as np\n'), ((6579, 6657), 'numpy.array', 'np.array', (['[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]'], {'dtype': 'np.float32'}), '([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n', (6587, 6657), True, 'import numpy as np\n'), ((8758, 8776), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8774, 8776), True, 'import numpy as np\n'), ((3115, 3139), 'numpy.arange', 'np.arange', (['(0.6)', '(1.4)', '(0.1)'], {}), '(0.6, 1.4, 0.1)\n', (3124, 3139), True, 'import numpy as np\n'), ((3520, 3537), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (3535, 3537), True, 'import numpy as np\n'), ((3592, 3609), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (3607, 3609), True, 'import numpy as np\n'), ((6496, 6508), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (6505, 6508), False, 'import math\n'), ((6510, 6522), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (6519, 6522), False, 'import math\n'), ((3659, 3676), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (3674, 3676), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Chainer example: train a VAE on MNIST
"""
import argparse
import os
import numpy as np
import chainer
from chainer import training
from chainer.training import extensions
import chainerx
import net
def main():
parser = argparse.ArgumentParser(description='Chainer example: VAE')
parser.add_argument('--initmodel', '-m', type=str,
help='Initialize the model from given file')
parser.add_argument('--resume', '-r', type=str,
help='Resume the optimization from snapshot')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='results',
help='Directory to output the result')
parser.add_argument('--epoch', '-e', default=100, type=int,
help='number of epochs to learn')
parser.add_argument('--dim-z', '-z', default=20, type=int,
help='dimention of encoded vector')
parser.add_argument('--dim-h', default=500, type=int,
help='dimention of hidden layer')
parser.add_argument('--beta', default=1.0, type=float,
help='Regularization coefficient for '
'the second term of ELBO bound')
parser.add_argument('--k', '-k', default=1, type=int,
help='Number of Monte Carlo samples used in '
'encoded vector')
parser.add_argument('--binary', action='store_true',
help='Use binarized MNIST')
parser.add_argument('--batch-size', '-b', type=int, default=100,
help='learning minibatch size')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
device.use()
print('Device: {}'.format(device))
print('# dim z: {}'.format(args.dim_z))
print('# Minibatch-size: {}'.format(args.batch_size))
print('# epoch: {}'.format(args.epoch))
print('')
# Prepare VAE model, defined in net.py
encoder = net.make_encoder(784, args.dim_z, args.dim_h)
decoder = net.make_decoder(784, args.dim_z, args.dim_h,
binary_check=args.binary)
prior = net.make_prior(args.dim_z)
avg_elbo_loss = net.AvgELBOLoss(encoder, decoder, prior,
beta=args.beta, k=args.k)
avg_elbo_loss.to_device(device)
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(avg_elbo_loss)
# Initialize
if args.initmodel is not None:
chainer.serializers.load_npz(args.initmodel, avg_elbo_loss)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist(withlabel=False)
if args.binary:
# Binarize dataset
train = (train >= 0.5).astype(np.float32)
test = (test >= 0.5).astype(np.float32)
if args.test:
train, _ = chainer.datasets.split_dataset(train, 100)
test, _ = chainer.datasets.split_dataset(test, 100)
train_iter = chainer.iterators.SerialIterator(train, args.batch_size)
test_iter = chainer.iterators.SerialIterator(test, args.batch_size,
repeat=False, shuffle=False)
# Set up an updater. StandardUpdater can explicitly specify a loss function
# used in the training with 'loss_func' option
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=device, loss_func=avg_elbo_loss)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(
test_iter, avg_elbo_loss, device=device))
# TODO(niboshi): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/reconstr', 'main/kl_penalty', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume is not None:
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
# Visualize the results
def save_images(x, filename):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(3, 3, figsize=(9, 9), dpi=100)
for ai, xi in zip(ax.flatten(), x):
ai.imshow(xi.reshape(28, 28))
fig.savefig(filename)
avg_elbo_loss.to_cpu()
train_ind = [1, 3, 5, 10, 2, 0, 13, 15, 17]
x = chainer.Variable(np.asarray(train[train_ind]))
with chainer.using_config('train', False), chainer.no_backprop_mode():
x1 = decoder(encoder(x).mean, inference=True).mean
save_images(x.array, os.path.join(args.out, 'train'))
save_images(x1.array, os.path.join(args.out, 'train_reconstructed'))
test_ind = [3, 2, 1, 18, 4, 8, 11, 17, 61]
x = chainer.Variable(np.asarray(test[test_ind]))
with chainer.using_config('train', False), chainer.no_backprop_mode():
x1 = decoder(encoder(x).mean, inference=True).mean
save_images(x.array, os.path.join(args.out, 'test'))
save_images(x1.array, os.path.join(args.out, 'test_reconstructed'))
# draw images from randomly sampled z
z = prior().sample(9)
x = decoder(z, inference=True).mean
save_images(x.array, os.path.join(args.out, 'sampled'))
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"net.make_encoder",
"net.AvgELBOLoss",
"net.make_decoder",
"chainer.no_backprop_mode",
"chainer.iterators.SerialIterator",
"os.path.join",
"chainer.training.extensions.LogReport",
"chainer.serializers.load_npz",
"chainer.training.extensions.Evaluator",
"net.make_prior"... | [((253, 312), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chainer example: VAE"""'}), "(description='Chainer example: VAE')\n", (276, 312), False, 'import argparse\n'), ((2397, 2428), 'chainer.get_device', 'chainer.get_device', (['args.device'], {}), '(args.device)\n', (2415, 2428), False, 'import chainer\n'), ((2704, 2749), 'net.make_encoder', 'net.make_encoder', (['(784)', 'args.dim_z', 'args.dim_h'], {}), '(784, args.dim_z, args.dim_h)\n', (2720, 2749), False, 'import net\n'), ((2764, 2835), 'net.make_decoder', 'net.make_decoder', (['(784)', 'args.dim_z', 'args.dim_h'], {'binary_check': 'args.binary'}), '(784, args.dim_z, args.dim_h, binary_check=args.binary)\n', (2780, 2835), False, 'import net\n'), ((2879, 2905), 'net.make_prior', 'net.make_prior', (['args.dim_z'], {}), '(args.dim_z)\n', (2893, 2905), False, 'import net\n'), ((2926, 2992), 'net.AvgELBOLoss', 'net.AvgELBOLoss', (['encoder', 'decoder', 'prior'], {'beta': 'args.beta', 'k': 'args.k'}), '(encoder, decoder, prior, beta=args.beta, k=args.k)\n', (2941, 2992), False, 'import net\n'), ((3107, 3132), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', ([], {}), '()\n', (3130, 3132), False, 'import chainer\n'), ((3337, 3380), 'chainer.datasets.get_mnist', 'chainer.datasets.get_mnist', ([], {'withlabel': '(False)'}), '(withlabel=False)\n', (3363, 3380), False, 'import chainer\n'), ((3686, 3742), 'chainer.iterators.SerialIterator', 'chainer.iterators.SerialIterator', (['train', 'args.batch_size'], {}), '(train, args.batch_size)\n', (3718, 3742), False, 'import chainer\n'), ((3759, 3847), 'chainer.iterators.SerialIterator', 'chainer.iterators.SerialIterator', (['test', 'args.batch_size'], {'repeat': '(False)', 'shuffle': '(False)'}), '(test, args.batch_size, repeat=False,\n shuffle=False)\n', (3791, 3847), False, 'import chainer\n'), ((4039, 4139), 'chainer.training.updaters.StandardUpdater', 'training.updaters.StandardUpdater', (['train_iter', 'optimizer'], {'device': 'device', 'loss_func': 'avg_elbo_loss'}), '(train_iter, optimizer, device=device,\n loss_func=avg_elbo_loss)\n', (4072, 4139), False, 'from chainer import training\n'), ((4160, 4222), 'chainer.training.Trainer', 'training.Trainer', (['updater', "(args.epoch, 'epoch')"], {'out': 'args.out'}), "(updater, (args.epoch, 'epoch'), out=args.out)\n", (4176, 4222), False, 'from chainer import training\n'), ((3229, 3288), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.initmodel', 'avg_elbo_loss'], {}), '(args.initmodel, avg_elbo_loss)\n', (3257, 3288), False, 'import chainer\n'), ((3565, 3607), 'chainer.datasets.split_dataset', 'chainer.datasets.split_dataset', (['train', '(100)'], {}), '(train, 100)\n', (3595, 3607), False, 'import chainer\n'), ((3626, 3667), 'chainer.datasets.split_dataset', 'chainer.datasets.split_dataset', (['test', '(100)'], {}), '(test, 100)\n', (3656, 3667), False, 'import chainer\n'), ((4242, 4303), 'chainer.training.extensions.Evaluator', 'extensions.Evaluator', (['test_iter', 'avg_elbo_loss'], {'device': 'device'}), '(test_iter, avg_elbo_loss, device=device)\n', (4262, 4303), False, 'from chainer.training import extensions\n'), ((4489, 4510), 'chainer.training.extensions.snapshot', 'extensions.snapshot', ([], {}), '()\n', (4508, 4510), False, 'from chainer.training import extensions\n'), ((4562, 4584), 'chainer.training.extensions.LogReport', 'extensions.LogReport', ([], {}), '()\n', (4582, 4584), False, 'from chainer.training import extensions\n'), ((4605, 4731), 'chainer.training.extensions.PrintReport', 'extensions.PrintReport', (["['epoch', 'main/loss', 'validation/main/loss', 'main/reconstr',\n 'main/kl_penalty', 'elapsed_time']"], {}), "(['epoch', 'main/loss', 'validation/main/loss',\n 'main/reconstr', 'main/kl_penalty', 'elapsed_time'])\n", (4627, 4731), False, 'from chainer.training import extensions\n'), ((4766, 4790), 'chainer.training.extensions.ProgressBar', 'extensions.ProgressBar', ([], {}), '()\n', (4788, 4790), False, 'from chainer.training import extensions\n'), ((4833, 4883), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.resume', 'trainer'], {}), '(args.resume, trainer)\n', (4861, 4883), False, 'import chainer\n'), ((5047, 5090), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(9, 9)', 'dpi': '(100)'}), '(3, 3, figsize=(9, 9), dpi=100)\n', (5059, 5090), True, 'import matplotlib.pyplot as plt\n'), ((5308, 5336), 'numpy.asarray', 'np.asarray', (['train[train_ind]'], {}), '(train[train_ind])\n', (5318, 5336), True, 'import numpy as np\n'), ((5347, 5383), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (5367, 5383), False, 'import chainer\n'), ((5385, 5411), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (5409, 5411), False, 'import chainer\n'), ((5497, 5528), 'os.path.join', 'os.path.join', (['args.out', '"""train"""'], {}), "(args.out, 'train')\n", (5509, 5528), False, 'import os\n'), ((5556, 5601), 'os.path.join', 'os.path.join', (['args.out', '"""train_reconstructed"""'], {}), "(args.out, 'train_reconstructed')\n", (5568, 5601), False, 'import os\n'), ((5676, 5702), 'numpy.asarray', 'np.asarray', (['test[test_ind]'], {}), '(test[test_ind])\n', (5686, 5702), True, 'import numpy as np\n'), ((5713, 5749), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (5733, 5749), False, 'import chainer\n'), ((5751, 5777), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (5775, 5777), False, 'import chainer\n'), ((5863, 5893), 'os.path.join', 'os.path.join', (['args.out', '"""test"""'], {}), "(args.out, 'test')\n", (5875, 5893), False, 'import os\n'), ((5921, 5965), 'os.path.join', 'os.path.join', (['args.out', '"""test_reconstructed"""'], {}), "(args.out, 'test_reconstructed')\n", (5933, 5965), False, 'import os\n'), ((6101, 6134), 'os.path.join', 'os.path.join', (['args.out', '"""sampled"""'], {}), "(args.out, 'sampled')\n", (6113, 6134), False, 'import os\n'), ((4435, 4468), 'chainer.training.extensions.DumpGraph', 'extensions.DumpGraph', (['"""main/loss"""'], {}), "('main/loss')\n", (4455, 4468), False, 'from chainer.training import extensions\n')] |
from probeinterface import Probe
import numpy as np
import pytest
def _dummy_position():
n = 24
positions = np.zeros((n, 2))
for i in range(n):
x = i // 8
y = i % 8
positions[i] = x, y
positions *= 20
positions[8:16, 1] -= 10
return positions
def test_probe():
positions = _dummy_position()
probe = Probe(ndim=2, si_units='um')
probe.set_contacts(positions=positions, shapes='circle', shape_params={'radius': 5})
probe.set_contacts(positions=positions, shapes='square', shape_params={'width': 5})
probe.set_contacts(positions=positions, shapes='rect', shape_params={'width': 8, 'height':5 })
assert probe.get_contact_count() == 24
# shape of the probe
vertices = [(-20, -30), (20, -110), (60, -30), (60, 190), (-20, 190)]
probe.set_planar_contour(vertices)
# auto shape
probe.create_auto_shape()
# annotation
probe.annotate(manufacturer='me')
assert 'manufacturer' in probe.annotations
probe.annotate_contacts(impedance=np.random.rand(24)*1000)
assert 'impedance' in probe.contact_annotations
# device channel
chans = np.arange(0, 24, dtype='int')
np.random.shuffle(chans)
probe.set_device_channel_indices(chans)
# contact_ids int or str
elec_ids = np.arange(24)
probe.set_contact_ids(elec_ids)
elec_ids = [f'elec #{e}' for e in range(24)]
probe.set_contact_ids(elec_ids)
# copy
probe2 = probe.copy()
# move rotate
probe.move([20, 50])
probe.rotate(theta=40, center=[0, 0], axis=None)
# make annimage
values = np.random.randn(24)
image, xlims, ylims = probe.to_image(values, method='cubic')
image2, xlims, ylims = probe.to_image(values, method='cubic', num_pixel=16)
#~ from probeinterface.plotting import plot_probe_group, plot_probe
#~ import matplotlib.pyplot as plt
#~ fig, ax = plt.subplots()
#~ plot_probe(probe, ax=ax)
#~ ax.imshow(image, extent=xlims+ylims, origin='lower')
#~ ax.imshow(image2, extent=xlims+ylims, origin='lower')
#~ plt.show()
# 3d
probe_3d = probe.to_3d()
probe_3d.rotate(theta=60, center=[0, 0, 0], axis=[0, 1, 0])
# 3d-2d
probe_3d = probe.to_3d()
probe_2d = probe_3d.to_2d(axes="xz")
assert np.allclose(probe_2d.contact_positions, probe_3d.contact_positions[:, [0, 2]])
#~ from probeinterface.plotting import plot_probe_group, plot_probe
#~ import matplotlib.pyplot as plt
#~ plot_probe(probe_3d)
#~ plt.show()
# get shanks
for shank in probe.get_shanks():
pass
# print(shank)
# print(shank.contact_positions)
# get dict and df
d = probe.to_dict()
other = Probe.from_dict(d)
# export to/from numpy
arr = probe.to_numpy(complete=False)
other = Probe.from_numpy(arr)
arr = probe.to_numpy(complete=True)
other2 = Probe.from_numpy(arr)
arr = probe_3d.to_numpy(complete=True)
other_3d = Probe.from_numpy(arr)
# export to/from DataFrame
df = probe.to_dataframe(complete=True)
other = Probe.from_dataframe(df)
df = probe.to_dataframe(complete=False)
other2 = Probe.from_dataframe(df)
df = probe_3d.to_dataframe(complete=True)
# print(df.index)
other_3d = Probe.from_dataframe(df)
assert other_3d.ndim == 3
# slice handling
selection = np.arange(0,18,2)
# print(selection.dtype.kind)
sliced_probe = probe.get_slice(selection)
assert sliced_probe.get_contact_count() == 9
assert sliced_probe.contact_annotations['impedance'].shape == (9, )
#~ from probeinterface.plotting import plot_probe_group, plot_probe
#~ import matplotlib.pyplot as plt
#~ plot_probe(probe)
#~ plot_probe(sliced_probe)
selection = np.ones(24, dtype='bool')
selection[::2] = False
sliced_probe = probe.get_slice(selection)
assert sliced_probe.get_contact_count() == 12
assert sliced_probe.contact_annotations['impedance'].shape == (12, )
#~ plot_probe(probe)
#~ plot_probe(sliced_probe)
#~ plt.show()
def test_set_shanks():
probe = Probe(ndim=2, si_units='um')
probe.set_contacts(
positions= np.arange(20).reshape(10, 2),
shapes='circle',
shape_params={'radius' : 5})
# for simplicity each contact is on separate shank
shank_ids = np.arange(10)
probe.set_shank_ids(shank_ids)
assert all(probe.shank_ids == shank_ids.astype(str))
if __name__ == '__main__':
test_probe()
test_set_shanks()
| [
"probeinterface.Probe.from_dict",
"numpy.random.randn",
"probeinterface.Probe",
"numpy.allclose",
"numpy.zeros",
"numpy.ones",
"probeinterface.Probe.from_numpy",
"numpy.arange",
"numpy.random.rand",
"probeinterface.Probe.from_dataframe",
"numpy.random.shuffle"
] | [((119, 135), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (127, 135), True, 'import numpy as np\n'), ((369, 397), 'probeinterface.Probe', 'Probe', ([], {'ndim': '(2)', 'si_units': '"""um"""'}), "(ndim=2, si_units='um')\n", (374, 397), False, 'from probeinterface import Probe\n'), ((1173, 1202), 'numpy.arange', 'np.arange', (['(0)', '(24)'], {'dtype': '"""int"""'}), "(0, 24, dtype='int')\n", (1182, 1202), True, 'import numpy as np\n'), ((1207, 1231), 'numpy.random.shuffle', 'np.random.shuffle', (['chans'], {}), '(chans)\n', (1224, 1231), True, 'import numpy as np\n'), ((1325, 1338), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (1334, 1338), True, 'import numpy as np\n'), ((1637, 1656), 'numpy.random.randn', 'np.random.randn', (['(24)'], {}), '(24)\n', (1652, 1656), True, 'import numpy as np\n'), ((2336, 2414), 'numpy.allclose', 'np.allclose', (['probe_2d.contact_positions', 'probe_3d.contact_positions[:, [0, 2]]'], {}), '(probe_2d.contact_positions, probe_3d.contact_positions[:, [0, 2]])\n', (2347, 2414), True, 'import numpy as np\n'), ((2772, 2790), 'probeinterface.Probe.from_dict', 'Probe.from_dict', (['d'], {}), '(d)\n', (2787, 2790), False, 'from probeinterface import Probe\n'), ((2876, 2897), 'probeinterface.Probe.from_numpy', 'Probe.from_numpy', (['arr'], {}), '(arr)\n', (2892, 2897), False, 'from probeinterface import Probe\n'), ((2951, 2972), 'probeinterface.Probe.from_numpy', 'Probe.from_numpy', (['arr'], {}), '(arr)\n', (2967, 2972), False, 'from probeinterface import Probe\n'), ((3031, 3052), 'probeinterface.Probe.from_numpy', 'Probe.from_numpy', (['arr'], {}), '(arr)\n', (3047, 3052), False, 'from probeinterface import Probe\n'), ((3144, 3168), 'probeinterface.Probe.from_dataframe', 'Probe.from_dataframe', (['df'], {}), '(df)\n', (3164, 3168), False, 'from probeinterface import Probe\n'), ((3226, 3250), 'probeinterface.Probe.from_dataframe', 'Probe.from_dataframe', (['df'], {}), '(df)\n', (3246, 3250), False, 'from probeinterface import Probe\n'), ((3334, 3358), 'probeinterface.Probe.from_dataframe', 'Probe.from_dataframe', (['df'], {}), '(df)\n', (3354, 3358), False, 'from probeinterface import Probe\n'), ((3427, 3446), 'numpy.arange', 'np.arange', (['(0)', '(18)', '(2)'], {}), '(0, 18, 2)\n', (3436, 3446), True, 'import numpy as np\n'), ((3840, 3865), 'numpy.ones', 'np.ones', (['(24)'], {'dtype': '"""bool"""'}), "(24, dtype='bool')\n", (3847, 3865), True, 'import numpy as np\n'), ((4179, 4207), 'probeinterface.Probe', 'Probe', ([], {'ndim': '(2)', 'si_units': '"""um"""'}), "(ndim=2, si_units='um')\n", (4184, 4207), False, 'from probeinterface import Probe\n'), ((4432, 4445), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4441, 4445), True, 'import numpy as np\n'), ((1058, 1076), 'numpy.random.rand', 'np.random.rand', (['(24)'], {}), '(24)\n', (1072, 1076), True, 'import numpy as np\n'), ((4255, 4268), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (4264, 4268), True, 'import numpy as np\n')] |
#!/usr/bin/env python
u"""
load_nodal_corrections.py (12/2020)
Calculates the nodal corrections for tidal constituents
Modification of ARGUMENTS fortran subroutine by <NAME> 03/1999
CALLING SEQUENCE:
pu,pf,G = load_nodal_corrections(MJD,constituents)
INPUTS:
MJD: Modified Julian Day of input date
constituents: tidal constituent IDs
OUTPUTS:
pu,pf: nodal corrections for the constituents
G: phase correction in degrees
OPTIONS:
DELTAT: time correction for converting to Ephemeris Time (days)
CORRECTIONS: use nodal corrections from OTIS/ATLAS or GOT models
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
PROGRAM DEPENDENCIES:
calc_astrol_longitudes.py: computes the basic astronomical mean longitudes
REFERENCES:
<NAME> and <NAME>, "Admiralty Manual of Tides", HMSO, (1941).
<NAME>, "Manual of Harmonic Analysis and Prediction of Tides"
US Coast and Geodetic Survey, Special Publication, 98, (1958).
<NAME> and <NAME>, "The harmonic analysis of tidal model
time series", Advances in Water Resources, 12, (1989).
UPDATE HISTORY:
Updated 12/2020: fix k1 for FES models
Updated 08/2020: change time variable names to not overwrite functions
update nodal corrections for FES models
Updated 07/2020: added function docstrings. add shallow water constituents
Updated 09/2019: added netcdf option to CORRECTIONS option
Updated 08/2018: added correction option ATLAS for localized OTIS solutions
Updated 07/2018: added option to use GSFC GOT nodal corrections
Updated 09/2017: Rewritten in Python
Rewritten in Matlab by <NAME> 01/2003
Written by <NAME> 03/1999
"""
import numpy as np
from pyTMD.calc_astrol_longitudes import calc_astrol_longitudes
def load_nodal_corrections(MJD,constituents,DELTAT=0.0,CORRECTIONS='OTIS'):
"""
Calculates the nodal corrections for tidal constituents
Arguments
---------
MJD: modified julian day of input date
constituents: tidal constituent IDs
Keyword arguments
-----------------
DELTAT: time correction for converting to Ephemeris Time (days)
CORRECTIONS: use nodal corrections from OTIS/ATLAS or GOT models
Returns
-------
pu,pf: nodal corrections for the constituents
G: phase correction in degrees
"""
#-- constituents array (not all are included in tidal program)
cindex = ['sa','ssa','mm','msf','mf','mt','alpha1','2q1','sigma1','q1',
'rho1','o1','tau1','m1','chi1','pi1','p1','s1','k1','psi1','phi1',
'theta1','j1','oo1','2n2','mu2','n2','nu2','m2a','m2','m2b','lambda2',
'l2','t2','s2','r2','k2','eta2','mns2','2sm2','m3','mk3','s3','mn4',
'm4','ms4','mk4','s4','s5','m6','s6','s7','s8','m8','mks2','msqm','mtm',
'n4','eps2','z0']
#-- degrees to radians
dtr = np.pi/180.0
#-- set function for astronomical longitudes
ASTRO5 = True if CORRECTIONS in ('GOT','FES') else False
#-- convert from Modified Julian Dates into Ephemeris Time
s,h,p,omega,pp = calc_astrol_longitudes(MJD+DELTAT, ASTRO5=ASTRO5)
hour = (MJD % 1)*24.0
t1 = 15.0*hour
t2 = 30.0*hour
nt = len(np.atleast_1d(MJD))
#-- Determine equilibrium arguments
arg = np.zeros((nt,60))
arg[:,0] = h - pp #-- Sa
arg[:,1] = 2.0*h #-- Ssa
arg[:,2] = s - p #-- Mm
arg[:,3] = 2.0*s - 2.0*h #-- MSf
arg[:,4] = 2.0*s #-- Mf
arg[:,5] = 3.0*s - p #-- Mt
arg[:,6] = t1 - 5.0*s + 3.0*h + p - 90.0 #-- alpha1
arg[:,7] = t1 - 4.0*s + h + 2.0*p - 90.0 #-- 2Q1
arg[:,8] = t1 - 4.0*s + 3.0*h - 90.0 #-- sigma1
arg[:,9] = t1 - 3.0*s + h + p - 90.0 #-- q1
arg[:,10] = t1 - 3.0*s + 3.0*h - p - 90.0 #-- rho1
arg[:,11] = t1 - 2.0*s + h - 90.0 #-- o1
arg[:,12] = t1 - 2.0*s + 3.0*h + 90.0 #-- tau1
arg[:,13] = t1 - s + h + 90.0 #-- M1
arg[:,14] = t1 - s + 3.0*h - p + 90.0 #-- chi1
arg[:,15] = t1 - 2.0*h + pp - 90.0 #-- pi1
arg[:,16] = t1 - h - 90.0 #-- p1
if CORRECTIONS in ('OTIS','ATLAS','netcdf'):
arg[:,17] = t1 + 90.0 #-- s1
elif CORRECTIONS in ('GOT','FES'):
arg[:,17] = t1 + 180.0 #-- s1 (Doodson's phase)
arg[:,18] = t1 + h + 90.0 #-- k1
arg[:,19] = t1 + 2.0*h - pp + 90.0 #-- psi1
arg[:,20] = t1 + 3.0*h + 90.0 #-- phi1
arg[:,21] = t1 + s - h + p + 90.0 #-- theta1
arg[:,22] = t1 + s + h - p + 90.0 #-- J1
arg[:,23] = t1 + 2.0*s + h + 90.0 #-- OO1
arg[:,24] = t2 - 4.0*s + 2.0*h + 2.0*p #-- 2N2
arg[:,25] = t2 - 4.0*s + 4.0*h #-- mu2
arg[:,26] = t2 - 3.0*s + 2.0*h + p #-- n2
arg[:,27] = t2 - 3.0*s + 4.0*h - p #-- nu2
arg[:,28] = t2 - 2.0*s + h + pp #-- M2a
arg[:,29] = t2 - 2.0*s + 2.0*h #-- M2
arg[:,30] = t2 - 2.0*s + 3.0*h - pp #-- M2b
arg[:,31] = t2 - s + p + 180.0 #-- lambda2
arg[:,32] = t2 - s + 2.0*h - p + 180.0 #-- L2
arg[:,33] = t2 - h + pp #-- T2
arg[:,34] = t2 #-- S2
arg[:,35] = t2 + h - pp + 180.0 #-- R2
arg[:,36] = t2 + 2.0*h #-- K2
arg[:,37] = t2 + s + 2.0*h - pp #-- eta2
arg[:,38] = t2 - 5.0*s + 4.0*h + p #-- MNS2
arg[:,39] = t2 + 2.0*s - 2.0*h #-- 2SM2
arg[:,40] = 1.5*arg[:,29] #-- M3
arg[:,41] = arg[:,18] + arg[:,29] #-- MK3
arg[:,42] = 3.0*t1 #-- S3
arg[:,43] = arg[:,26] + arg[:,29] #-- MN4
arg[:,44] = 2.0*arg[:,29] #-- M4
arg[:,45] = arg[:,29] + arg[:,34] #-- MS4
arg[:,46] = arg[:,29] + arg[:,36] #-- MK4
arg[:,47] = 4.0*t1 #-- S4
arg[:,48] = 5.0*t1 #-- S5
arg[:,49] = 3.0*arg[:,29] #-- M6
arg[:,50] = 3.0*t2 #-- S6
arg[:,51] = 7.0*t1 #-- S7
arg[:,52] = 4.0*t2 #-- S8
#-- shallow water constituents
arg[:,53] = 4.0*arg[:,29] #-- m8
arg[:,54] = arg[:,29] + arg[:,36] - arg[:,34] #-- mks2
arg[:,55] = 4.0*s - 2.0*h #-- msqm
arg[:,56] = 3.0*s - p #-- mtm
arg[:,57] = 2.0*arg[:,26] #-- n4
arg[:,58] = t2 - 5.0*s + 4.0*h + p #-- eps2
#-- mean sea level
arg[:,59] = 0.0 #-- Z0
#-- determine nodal corrections f and u
sinn = np.sin(omega*dtr)
cosn = np.cos(omega*dtr)
sin2n = np.sin(2.0*omega*dtr)
cos2n = np.cos(2.0*omega*dtr)
sin3n = np.sin(3.0*omega*dtr)
#-- set nodal corrections
f = np.zeros((nt,60))
u = np.zeros((nt,60))
#-- determine nodal corrections f and u for each model type
if CORRECTIONS in ('OTIS','ATLAS','netcdf'):
f[:,0] = 1.0 #-- Sa
f[:,1] = 1.0 #-- Ssa
f[:,2] = 1.0 - 0.130*cosn #-- Mm
f[:,3] = 1.0 #-- MSf
f[:,4] = 1.043 + 0.414*cosn #-- Mf
temp1 = (1.0 + 0.203*cosn + 0.040*cos2n)**2
temp2 = (0.203*sinn + 0.040*sin2n)**2
f[:,5] = np.sqrt(temp1 + temp2) #-- Mt
f[:,6] = 1.0 #-- alpha1
f[:,7] = np.sqrt((1.0 + 0.188*cosn)**2 + (0.188*sinn)**2) #-- 2Q1
f[:,8] = f[:,7] #-- sigma1
f[:,9] = f[:,7] #-- q1
f[:,10] = f[:,7] #-- rho1
temp1 = (1.0 + 0.189*cosn - 0.0058*cos2n)**2
temp2 = (0.189*sinn - 0.0058*sin2n)**2
f[:,11] = np.sqrt(temp1 + temp2) #-- O1
f[:,12] = 1.0 #-- tau1
#-- Doodson's
# Mtmp1 = 2.0*np.cos(p*dtr) + 0.4*np.cos((p-omega)*dtr)
# Mtmp2 = np.sin(p*dtr) + 0.2*np.sin((p-omega)*dtr)
#-- Ray's
Mtmp1 = 1.36*np.cos(p*dtr) + 0.267*np.cos((p-omega)*dtr)
Mtmp2 = 0.64*np.sin(p*dtr) + 0.135*np.sin((p-omega)*dtr)
f[:,13] = np.sqrt(Mtmp1**2 + Mtmp2**2) #-- M1
f[:,14] = np.sqrt((1.0+0.221*cosn)**2+(0.221*sinn)**2) #-- chi1
f[:,15] = 1.0 #-- pi1
f[:,16] = 1.0 #-- P1
f[:,17] = 1.0 #-- S1
temp1 = (1.0 + 0.1158*cosn - 0.0029*cos2n)**2
temp2 = (0.1554*sinn - 0.0029*sin2n)**2
f[:,18] = np.sqrt(temp1 + temp2) #-- K1
f[:,19] = 1.0 #-- psi1
f[:,20] = 1.0 #-- phi1
f[:,21] = 1.0 #-- theta1
f[:,22] = np.sqrt((1.0+0.169*cosn)**2 + (0.227*sinn)**2) #-- J1
temp1 = (1.0 + 0.640*cosn + 0.134*cos2n)**2
temp2 = (0.640*sinn + 0.134*sin2n)**2
f[:,23] = np.sqrt(temp1 + temp2) #-- OO1
temp1 = (1.0 - 0.03731*cosn + 0.00052*cos2n)**2
temp2 = (0.03731*sinn - 0.00052*sin2n)**2
f[:,24] = np.sqrt(temp1 + temp2) #-- 2N2
f[:,25] = f[:,24] #-- mu2
f[:,26] = f[:,24] #-- N2
f[:,27] = f[:,24] #-- nu2
f[:,28] = 1.0 #-- M2a
f[:,29] = f[:,24] #-- M2
f[:,30] = 1.0 #-- M2b
f[:,31] = 1.0 #-- lambda2
Ltmp1 = 1.0 - 0.25*np.cos(2*p*dtr) - 0.11*np.cos((2.0*p-omega)*dtr) - 0.04*cosn
Ltmp2 = 0.25*np.sin(2*p*dtr) + 0.11*np.sin((2.0*p-omega)*dtr) + 0.04*sinn
f[:,32] = np.sqrt(Ltmp1**2 + Ltmp2**2) #-- L2
f[:,33] = 1.0 #-- T2
f[:,34] = 1.0 #-- S2
f[:,35] = 1.0 #-- R2
temp1 = (1.0 + 0.2852*cosn + 0.0324*cos2n)**2
temp2 = (0.3108*sinn + 0.0324*sin2n)**2
f[:,36] = np.sqrt(temp1 + temp2) #-- K2
f[:,37] = np.sqrt((1.0 + 0.436*cosn)**2 + (0.436*sinn)**2) #-- eta2
f[:,38] = f[:,29]**2 #-- MNS2
f[:,39] = f[:,29] #-- 2SM2
f[:,40] = 1.0 #-- M3 (wrong)
f[:,41] = f[:,18]*f[:,29] #-- MK3
f[:,42] = 1.0 #-- S3
f[:,43] = f[:,29]**2 #-- MN4
f[:,44] = f[:,43] #-- M4
f[:,45] = f[:,43] #-- MS4
f[:,46] = f[:,29]*f[:,36] #-- MK4
f[:,47] = 1.0 #-- S4
f[:,48] = 1.0 #-- S5
f[:,49] = f[:,29]**3 #-- M6
f[:,50] = 1.0 #-- S6
f[:,51] = 1.0 #-- S7
f[:,52] = 1.0 #-- S8
#-- shallow water constituents
f[:,53] = f[:,29]**4 #-- m8
f[:,54] = f[:,29]*f[:,36] #-- mks2
f[:,55] = f[:,4] #-- msqm
f[:,56] = f[:,4] #-- mtm
f[:,57] = f[:,29]**2 #-- n4
f[:,58] = f[:,29] #-- eps2
#-- mean sea level
f[:,59] = 1.0 #-- Z0
u[:,0] = 0.0 #-- Sa
u[:,1] = 0.0 #-- Ssa
u[:,2] = 0.0 #-- Mm
u[:,3] = 0.0 #-- MSf
u[:,4] = -23.7*sinn + 2.7*sin2n - 0.4*sin3n #-- Mf
temp1 = -(0.203*sinn + 0.040*sin2n)
temp2 = (1.0 + 0.203*cosn + 0.040*cos2n)
u[:,5] = np.arctan(temp1/temp2)/dtr #-- Mt
u[:,6] = 0.0 #-- alpha1
u[:,7] = np.arctan(0.189*sinn/(1.0 + 0.189*cosn))/dtr #-- 2Q1
u[:,8] = u[:,7] #-- sigma1
u[:,9] = u[:,7] #-- q1
u[:,10] = u[:,7] #-- rho1
u[:,11] = 10.8*sinn - 1.3*sin2n + 0.2*sin3n #-- O1
u[:,12] = 0.0 #-- tau1
u[:,13] = np.arctan2(Mtmp2,Mtmp1)/dtr #-- M1
u[:,14] = np.arctan(-0.221*sinn/(1.0+0.221*cosn))/dtr #-- chi1
u[:,15] = 0.0 #-- pi1
u[:,16] = 0.0 #-- P1
u[:,17] = 0.0 #-- S1
temp1 = (-0.1554*sinn + 0.0029*sin2n)
temp2 = (1.0 + 0.1158*cosn - 0.0029*cos2n)
u[:,18] = np.arctan(temp1/temp2)/dtr #-- K1
u[:,19] = 0.0 #-- psi1
u[:,20] = 0.0 #-- phi1
u[:,21] = 0.0 #-- theta1
u[:,22] = np.arctan(-0.227*sinn/(1.0+0.169*cosn))/dtr #-- J1
temp1 = -(0.640*sinn + 0.134*sin2n)
temp2 = (1.0 + 0.640*cosn + 0.134*cos2n)
u[:,23] = np.arctan(temp1/temp2)/dtr #-- OO1
temp1 = (-0.03731*sinn + 0.00052*sin2n)
temp2 = (1.0 - 0.03731*cosn + 0.00052*cos2n)
u[:,24] = np.arctan(temp1/temp2)/dtr #-- 2N2
u[:,25] = u[:,24] #-- mu2
u[:,26] = u[:,24] #-- N2
u[:,27] = u[:,24] #-- nu2
u[:,28] = 0.0 #-- M2a
u[:,29] = u[:,24] #-- M2
u[:,30] = 0.0 #-- M2b
u[:,31] = 0.0 #-- lambda2
u[:,32] = np.arctan(-Ltmp2/Ltmp1)/dtr #-- L2
u[:,33] = 0.0 #-- T2
u[:,34] = 0.0 #-- S2
u[:,35] = 0.0 #-- R2
temp1 = -(0.3108*sinn+0.0324*sin2n)
temp2 = (1.0 + 0.2852*cosn + 0.0324*cos2n)
u[:,36] = np.arctan(temp1/temp2)/dtr #-- K2
u[:,37] = np.arctan(-0.436*sinn/(1.0 + 0.436*cosn))/dtr #-- eta2
u[:,38] = u[:,29]*2.0 #-- MNS2
u[:,39] = u[:,29] #-- 2SM2
u[:,40] = 1.50*u[:,29] #-- M3
u[:,41] = u[:,29] + u[:,18] #-- MK3
u[:,42] = 0.0 #-- S3
u[:,43] = 2.0*u[:,29] #-- MN4
u[:,44] = u[:,43] #-- M4
u[:,45] = u[:,29] #-- MS4
u[:,46] = u[:,29] + u[:,36] #-- MK4
u[:,47] = 0.0 #-- S4
u[:,48] = 0.0 #-- S5
u[:,49] = 3.0*u[:,29] #-- M6
u[:,50] = 0.0 #-- S6
u[:,51] = 0.0 #-- S7
u[:,52] = 0.0 #-- S8
#-- mean sea level
u[:,59] = 0.0 #-- Z0
elif CORRECTIONS in ('FES',):
#-- additional astronomical terms for FES models
II = np.arccos(0.913694997 - 0.035692561*np.cos(omega*dtr))
at1 = np.arctan(1.01883*np.tan(omega*dtr/2.0))
at2 = np.arctan(0.64412*np.tan(omega*dtr/2.0))
xi = -at1 - at2 + omega*dtr
xi[xi > np.pi] -= 2.0*np.pi
nu = at1 - at2
I2 = np.tan(II/2.0)
Ra1 = np.sqrt(1.0 - 12.0*(I2**2)*np.cos(2.0*(p - xi)) + 36.0*(I2**4))
P2 = np.sin(2.0*(p - xi))
Q2 = 1.0/(6.0*(I2**2)) - np.cos(2.0*(p - xi))
R = np.arctan(P2/Q2)
P_prime = np.sin(2.0*II)*np.sin(nu)
Q_prime = np.sin(2.0*II)*np.cos(nu) + 0.3347
nu_prime = np.arctan(P_prime/Q_prime)
P_sec = (np.sin(II)**2)*np.sin(2.0*nu)
Q_sec = (np.sin(II)**2)*np.cos(2.0*nu) + 0.0727
nu_sec = 0.5*np.arctan(P_sec/Q_sec)
f[:,0] = 1.0 #-- Sa
f[:,1] = 1.0 #-- Ssa
f[:,2] = (2.0/3.0 - np.power(np.sin(II),2.0))/0.5021 #-- Mm
f[:,3] = 1.0 #-- MSf
f[:,4] = np.power(np.sin(II),2.0)/0.1578 #-- Mf
f[:,7] = np.sin(II)*(np.cos(II/2.0)**2)/0.38 #-- 2Q1
f[:,8] = f[:,7] #-- sigma1
f[:,9] = f[:,7] #-- q1
f[:,10] = f[:,7] #-- rho1
f[:,11] = f[:,7] #-- O1
#-- Ray's
Mtmp1 = 1.36*np.cos(p*dtr) + 0.267*np.cos((p-omega)*dtr)
Mtmp2 = 0.64*np.sin(p*dtr) + 0.135*np.sin((p-omega)*dtr)
f[:,13] = np.sqrt(Mtmp1**2 + Mtmp2**2) #-- M1
f[:,14] = np.sin(2.0*II) / 0.7214 #-- chi1
f[:,15] = 1.0 #-- pi1
f[:,16] = 1.0 #-- P1
f[:,17] = 1.0 #-- S1
temp1 = 0.8965*np.power(np.sin(2.0*II),2.0)
temp2 = 0.6001*np.sin(2.0*II)*np.cos(nu)
f[:,18] = np.sqrt(temp1 + temp2 + 0.1006) #-- K1
f[:,19] = 1.0 #-- psi1
f[:,20] = 1.0 #-- phi1
f[:,21] = f[:,14] #-- theta1
f[:,22] = f[:,14] #-- J1
f[:,23] = np.sin(II)*np.power(np.sin(II/2.0),2.0)/0.01640 #-- OO1
f[:,24] = np.power(np.cos(II/2.0),4.0)/0.9154 #-- 2N2
f[:,25] = f[:,24] #-- mu2
f[:,26] = f[:,24] #-- N2
f[:,27] = f[:,24] #-- nu2
f[:,28] = 1.0 #-- M2a
f[:,29] = f[:,24] #-- M2
f[:,30] = 1.0 #-- M2b
f[:,31] = f[:,29] #-- lambda2
f[:,32] = f[:,29]*Ra1 #-- L2
f[:,33] = 1.0 #-- T2
f[:,34] = 1.0 #-- S2
f[:,35] = 1.0 #-- R2
temp1 = 19.0444 * np.power(np.sin(II),4.0)
temp2 = 2.7702 * np.power(np.sin(II),2.0) * np.cos(2.0*nu)
f[:,36] = np.sqrt(temp1 + temp2 + 0.0981) #-- K2
f[:,37] = np.power(np.sin(II),2.0)/0.1565 #-- eta2
f[:,38] = f[:,29]**2 #-- MNS2
f[:,39] = f[:,29] #-- 2SM2
f[:,40] = np.power(np.cos(II/2.0), 6.0) / 0.8758 #-- M3
f[:,41] = f[:,18]*f[:,29] #-- MK3
f[:,42] = 1.0 #-- S3
f[:,43] = f[:,29]**2 #-- MN4
f[:,44] = f[:,43] #-- M4
f[:,45] = f[:,29] #-- MS4
f[:,46] = f[:,29]*f[:,36] #-- MK4
f[:,47] = 1.0 #-- S4
f[:,48] = 1.0 #-- S5
f[:,49] = f[:,29]**3 #-- M6
f[:,50] = 1.0 #-- S6
f[:,51] = 1.0 #-- S7
f[:,52] = 1.0 #-- S8
#-- shallow water constituents
f[:,53] = f[:,29]**4 #-- m8
f[:,54] = f[:,29]*f[:,36] #-- mks2
f[:,55] = f[:,4] #-- msqm
f[:,56] = f[:,4] #-- mtm
f[:,57] = f[:,29]**2 #-- n4
f[:,58] = f[:,29] #-- eps2
#-- mean sea level
f[:,59] = 1.0 #-- Z0
u[:,0] = 0.0 #-- Sa
u[:,1] = 0.0 #-- Ssa
u[:,2] = 0.0 #-- Mm
u[:,3] = (2.0*xi - 2.0*nu)/dtr #-- MSf
u[:,4] = -2.0*xi/dtr #-- Mf
u[:,7] = (2.0*xi - nu)/dtr #-- 2Q1
u[:,8] = u[:,7] #-- sigma1
u[:,9] = u[:,7] #-- q1
u[:,10] = u[:,7] #-- rho1
u[:,11] = u[:,7] #-- O1
u[:,13] = np.arctan2(Mtmp2,Mtmp1)/dtr #-- M1
u[:,14] = -nu/dtr #-- chi1
u[:,15] = 0.0 #-- pi1
u[:,16] = 0.0 #-- P1
u[:,17] = 0.0 #-- S1
u[:,18] = -nu_prime/dtr #-- K1
u[:,19] = 0.0 #-- psi1
u[:,20] = 0.0 #-- phi1
u[:,21] = -nu/dtr #-- theta1
u[:,22] = u[:,21] #-- J1
u[:,23] = (-2.0*xi - nu)/dtr #-- OO1
u[:,24] = (2.0*xi - 2.0*nu)/dtr #-- 2N2
u[:,25] = u[:,24] #-- mu2
u[:,26] = u[:,24] #-- N2
u[:,27] = u[:,24] #-- nu2
u[:,29] = u[:,24] #-- M2
u[:,31] = (2.0*xi - 2.0*nu)/dtr #-- lambda2
u[:,32] = (2.0*xi - 2.0*nu - R)/dtr #-- L2
u[:,33] = 0.0 #-- T2
u[:,34] = 0.0 #-- S2
u[:,35] = 0.0 #-- R2
u[:,36] = -2.0*nu_sec/dtr #-- K2
u[:,37] = -2.0*nu/dtr #-- eta2
u[:,38] = (4.0*xi - 4.0*nu)/dtr #-- mns2
u[:,39] = (2.0*xi - 2.0*nu)/dtr #-- 2SM2
u[:,40] = (3.0*xi - 3.0*nu)/dtr #-- M3
u[:,41] = (2.0*xi - 2.0*nu - 2.0*nu_prime)/dtr #-- MK3
u[:,42] = 0.0 #-- S3
u[:,43] = (4.0*xi - 4.0*nu)/dtr #-- MN4
u[:,44] = (4.0*xi - 4.0*nu)/dtr #-- M4
u[:,45] = (2.0*xi - 2.0*nu)/dtr #-- MS4
u[:,46] = (2.0*xi - 2.0*nu - 2.0*nu_sec)/dtr #-- MK4
u[:,47] = 0.0 #-- S4
u[:,48] = 0.0 #-- S5
u[:,49] = (6.0*xi - 6.0*nu)/dtr #-- M6
u[:,50] = 0.0 #-- S6
u[:,51] = 0.0 #-- S7
u[:,52] = 0.0 #-- S8
#-- shallow water constituents
u[:,53] = (8.0*xi - 8.0*nu)/dtr #-- m8
u[:,54] = (2.0*xi - 2.0*nu - 2.0*nu_sec)/dtr #-- mks2
u[:,55] = u[:,4] #-- msqm
u[:,56] = u[:,4] #-- mtm
u[:,57] = (4.0*xi - 4.0*nu)/dtr #-- n4
u[:,58] = u[:,29] #-- eps2
#-- mean sea level
u[:,59] = 0.0 #-- Z0
elif CORRECTIONS in ('GOT',):
f[:,9] = 1.009 + 0.187*cosn - 0.015*cos2n#-- Q1
f[:,11] = f[:,9]#-- O1
f[:,16] = 1.0 #-- P1
f[:,17] = 1.0 #-- S1
f[:,18] = 1.006 + 0.115*cosn - 0.009*cos2n#-- K1
f[:,26] = 1.000 - 0.037*cosn#-- N2
f[:,29] = f[:,26]#-- M2
f[:,34] = 1.0 #-- S2
f[:,36] = 1.024 + 0.286*cosn + 0.008*cos2n#-- K2
f[:,44] = f[:,29]**2#-- M4
u[:,9] = 10.8*sinn - 1.3*sin2n#-- Q1
u[:,11] = u[:,9]#-- O1
u[:,16] = 0.0 #-- P1
u[:,17] = 0.0 #-- S1
u[:,18] = -8.9*sinn + 0.7*sin2n#-- K1
u[:,26] = -2.1*sinn#-- N2
u[:,29] = u[:,26]#-- M2
u[:,34] = 0.0 #-- S2
u[:,36] = -17.7*sinn + 0.7*sin2n#-- K2
u[:,44] = -4.2*sinn#-- M4
#-- take pu,pf,G for the set of given constituents
nc = len(constituents)
pu = np.zeros((nt,nc))
pf = np.zeros((nt,nc))
G = np.zeros((nt,nc))
for i,c in enumerate(constituents):
#-- map between given constituents and supported in tidal program
j, = [j for j,val in enumerate(cindex) if (val == c)]
pu[:,i] = u[:,j]*dtr
pf[:,i] = f[:,j]
G[:,i] = arg[:,j]
#-- return values as tuple
return (pu,pf,G)
| [
"numpy.arctan2",
"numpy.zeros",
"numpy.sin",
"numpy.tan",
"numpy.cos",
"numpy.arctan",
"numpy.atleast_1d",
"pyTMD.calc_astrol_longitudes.calc_astrol_longitudes",
"numpy.sqrt"
] | [((3157, 3208), 'pyTMD.calc_astrol_longitudes.calc_astrol_longitudes', 'calc_astrol_longitudes', (['(MJD + DELTAT)'], {'ASTRO5': 'ASTRO5'}), '(MJD + DELTAT, ASTRO5=ASTRO5)\n', (3179, 3208), False, 'from pyTMD.calc_astrol_longitudes import calc_astrol_longitudes\n'), ((3355, 3373), 'numpy.zeros', 'np.zeros', (['(nt, 60)'], {}), '((nt, 60))\n', (3363, 3373), True, 'import numpy as np\n'), ((6104, 6123), 'numpy.sin', 'np.sin', (['(omega * dtr)'], {}), '(omega * dtr)\n', (6110, 6123), True, 'import numpy as np\n'), ((6133, 6152), 'numpy.cos', 'np.cos', (['(omega * dtr)'], {}), '(omega * dtr)\n', (6139, 6152), True, 'import numpy as np\n'), ((6163, 6188), 'numpy.sin', 'np.sin', (['(2.0 * omega * dtr)'], {}), '(2.0 * omega * dtr)\n', (6169, 6188), True, 'import numpy as np\n'), ((6197, 6222), 'numpy.cos', 'np.cos', (['(2.0 * omega * dtr)'], {}), '(2.0 * omega * dtr)\n', (6203, 6222), True, 'import numpy as np\n'), ((6231, 6256), 'numpy.sin', 'np.sin', (['(3.0 * omega * dtr)'], {}), '(3.0 * omega * dtr)\n', (6237, 6256), True, 'import numpy as np\n'), ((6292, 6310), 'numpy.zeros', 'np.zeros', (['(nt, 60)'], {}), '((nt, 60))\n', (6300, 6310), True, 'import numpy as np\n'), ((6318, 6336), 'numpy.zeros', 'np.zeros', (['(nt, 60)'], {}), '((nt, 60))\n', (6326, 6336), True, 'import numpy as np\n'), ((19000, 19018), 'numpy.zeros', 'np.zeros', (['(nt, nc)'], {}), '((nt, nc))\n', (19008, 19018), True, 'import numpy as np\n'), ((19027, 19045), 'numpy.zeros', 'np.zeros', (['(nt, nc)'], {}), '((nt, nc))\n', (19035, 19045), True, 'import numpy as np\n'), ((19053, 19071), 'numpy.zeros', 'np.zeros', (['(nt, nc)'], {}), '((nt, nc))\n', (19061, 19071), True, 'import numpy as np\n'), ((3284, 3302), 'numpy.atleast_1d', 'np.atleast_1d', (['MJD'], {}), '(MJD)\n', (3297, 3302), True, 'import numpy as np\n'), ((6734, 6756), 'numpy.sqrt', 'np.sqrt', (['(temp1 + temp2)'], {}), '(temp1 + temp2)\n', (6741, 6756), True, 'import numpy as np\n'), ((6813, 6869), 'numpy.sqrt', 'np.sqrt', (['((1.0 + 0.188 * cosn) ** 2 + (0.188 * sinn) ** 2)'], {}), '((1.0 + 0.188 * cosn) ** 2 + (0.188 * sinn) ** 2)\n', (6820, 6869), True, 'import numpy as np\n'), ((7088, 7110), 'numpy.sqrt', 'np.sqrt', (['(temp1 + temp2)'], {}), '(temp1 + temp2)\n', (7095, 7110), True, 'import numpy as np\n'), ((7461, 7493), 'numpy.sqrt', 'np.sqrt', (['(Mtmp1 ** 2 + Mtmp2 ** 2)'], {}), '(Mtmp1 ** 2 + Mtmp2 ** 2)\n', (7468, 7493), True, 'import numpy as np\n'), ((7515, 7571), 'numpy.sqrt', 'np.sqrt', (['((1.0 + 0.221 * cosn) ** 2 + (0.221 * sinn) ** 2)'], {}), '((1.0 + 0.221 * cosn) ** 2 + (0.221 * sinn) ** 2)\n', (7522, 7571), True, 'import numpy as np\n'), ((7777, 7799), 'numpy.sqrt', 'np.sqrt', (['(temp1 + temp2)'], {}), '(temp1 + temp2)\n', (7784, 7799), True, 'import numpy as np\n'), ((7920, 7976), 'numpy.sqrt', 'np.sqrt', (['((1.0 + 0.169 * cosn) ** 2 + (0.227 * sinn) ** 2)'], {}), '((1.0 + 0.169 * cosn) ** 2 + (0.227 * sinn) ** 2)\n', (7927, 7976), True, 'import numpy as np\n'), ((8090, 8112), 'numpy.sqrt', 'np.sqrt', (['(temp1 + temp2)'], {}), '(temp1 + temp2)\n', (8097, 8112), True, 'import numpy as np\n'), ((8245, 8267), 'numpy.sqrt', 'np.sqrt', (['(temp1 + temp2)'], {}), '(temp1 + temp2)\n', (8252, 8267), True, 'import numpy as np\n'), ((8692, 8724), 'numpy.sqrt', 'np.sqrt', (['(Ltmp1 ** 2 + Ltmp2 ** 2)'], {}), '(Ltmp1 ** 2 + Ltmp2 ** 2)\n', (8699, 8724), True, 'import numpy as np\n'), ((8935, 8957), 'numpy.sqrt', 'np.sqrt', (['(temp1 + temp2)'], {}), '(temp1 + temp2)\n', (8942, 8957), True, 'import numpy as np\n'), ((8983, 9039), 'numpy.sqrt', 'np.sqrt', (['((1.0 + 0.436 * cosn) ** 2 + (0.436 * sinn) ** 2)'], {}), '((1.0 + 0.436 * cosn) ** 2 + (0.436 * sinn) ** 2)\n', (8990, 9039), True, 'import numpy as np\n'), ((10145, 10169), 'numpy.arctan', 'np.arctan', (['(temp1 / temp2)'], {}), '(temp1 / temp2)\n', (10154, 10169), True, 'import numpy as np\n'), ((10228, 10274), 'numpy.arctan', 'np.arctan', (['(0.189 * sinn / (1.0 + 0.189 * cosn))'], {}), '(0.189 * sinn / (1.0 + 0.189 * cosn))\n', (10237, 10274), True, 'import numpy as np\n'), ((10489, 10513), 'numpy.arctan2', 'np.arctan2', (['Mtmp2', 'Mtmp1'], {}), '(Mtmp2, Mtmp1)\n', (10499, 10513), True, 'import numpy as np\n'), ((10542, 10589), 'numpy.arctan', 'np.arctan', (['(-0.221 * sinn / (1.0 + 0.221 * cosn))'], {}), '(-0.221 * sinn / (1.0 + 0.221 * cosn))\n', (10551, 10589), True, 'import numpy as np\n'), ((10798, 10822), 'numpy.arctan', 'np.arctan', (['(temp1 / temp2)'], {}), '(temp1 / temp2)\n', (10807, 10822), True, 'import numpy as np\n'), ((10945, 10992), 'numpy.arctan', 'np.arctan', (['(-0.227 * sinn / (1.0 + 0.169 * cosn))'], {}), '(-0.227 * sinn / (1.0 + 0.169 * cosn))\n', (10954, 10992), True, 'import numpy as np\n'), ((11107, 11131), 'numpy.arctan', 'np.arctan', (['(temp1 / temp2)'], {}), '(temp1 / temp2)\n', (11116, 11131), True, 'import numpy as np\n'), ((11261, 11285), 'numpy.arctan', 'np.arctan', (['(temp1 / temp2)'], {}), '(temp1 / temp2)\n', (11270, 11285), True, 'import numpy as np\n'), ((11542, 11567), 'numpy.arctan', 'np.arctan', (['(-Ltmp2 / Ltmp1)'], {}), '(-Ltmp2 / Ltmp1)\n', (11551, 11567), True, 'import numpy as np\n'), ((11777, 11801), 'numpy.arctan', 'np.arctan', (['(temp1 / temp2)'], {}), '(temp1 / temp2)\n', (11786, 11801), True, 'import numpy as np\n'), ((11829, 11876), 'numpy.arctan', 'np.arctan', (['(-0.436 * sinn / (1.0 + 0.436 * cosn))'], {}), '(-0.436 * sinn / (1.0 + 0.436 * cosn))\n', (11838, 11876), True, 'import numpy as np\n'), ((12834, 12850), 'numpy.tan', 'np.tan', (['(II / 2.0)'], {}), '(II / 2.0)\n', (12840, 12850), True, 'import numpy as np\n'), ((12940, 12962), 'numpy.sin', 'np.sin', (['(2.0 * (p - xi))'], {}), '(2.0 * (p - xi))\n', (12946, 12962), True, 'import numpy as np\n'), ((13027, 13045), 'numpy.arctan', 'np.arctan', (['(P2 / Q2)'], {}), '(P2 / Q2)\n', (13036, 13045), True, 'import numpy as np\n'), ((13160, 13188), 'numpy.arctan', 'np.arctan', (['(P_prime / Q_prime)'], {}), '(P_prime / Q_prime)\n', (13169, 13188), True, 'import numpy as np\n'), ((13905, 13937), 'numpy.sqrt', 'np.sqrt', (['(Mtmp1 ** 2 + Mtmp2 ** 2)'], {}), '(Mtmp1 ** 2 + Mtmp2 ** 2)\n', (13912, 13937), True, 'import numpy as np\n'), ((14199, 14230), 'numpy.sqrt', 'np.sqrt', (['(temp1 + temp2 + 0.1006)'], {}), '(temp1 + temp2 + 0.1006)\n', (14206, 14230), True, 'import numpy as np\n'), ((14998, 15029), 'numpy.sqrt', 'np.sqrt', (['(temp1 + temp2 + 0.0981)'], {}), '(temp1 + temp2 + 0.0981)\n', (15005, 15029), True, 'import numpy as np\n'), ((7334, 7349), 'numpy.cos', 'np.cos', (['(p * dtr)'], {}), '(p * dtr)\n', (7340, 7349), True, 'import numpy as np\n'), ((7356, 7381), 'numpy.cos', 'np.cos', (['((p - omega) * dtr)'], {}), '((p - omega) * dtr)\n', (7362, 7381), True, 'import numpy as np\n'), ((7399, 7414), 'numpy.sin', 'np.sin', (['(p * dtr)'], {}), '(p * dtr)\n', (7405, 7414), True, 'import numpy as np\n'), ((7421, 7446), 'numpy.sin', 'np.sin', (['((p - omega) * dtr)'], {}), '((p - omega) * dtr)\n', (7427, 7446), True, 'import numpy as np\n'), ((12994, 13016), 'numpy.cos', 'np.cos', (['(2.0 * (p - xi))'], {}), '(2.0 * (p - xi))\n', (13000, 13016), True, 'import numpy as np\n'), ((13062, 13078), 'numpy.sin', 'np.sin', (['(2.0 * II)'], {}), '(2.0 * II)\n', (13068, 13078), True, 'import numpy as np\n'), ((13077, 13087), 'numpy.sin', 'np.sin', (['nu'], {}), '(nu)\n', (13083, 13087), True, 'import numpy as np\n'), ((13219, 13235), 'numpy.sin', 'np.sin', (['(2.0 * nu)'], {}), '(2.0 * nu)\n', (13225, 13235), True, 'import numpy as np\n'), ((13311, 13335), 'numpy.arctan', 'np.arctan', (['(P_sec / Q_sec)'], {}), '(P_sec / Q_sec)\n', (13320, 13335), True, 'import numpy as np\n'), ((13959, 13975), 'numpy.sin', 'np.sin', (['(2.0 * II)'], {}), '(2.0 * II)\n', (13965, 13975), True, 'import numpy as np\n'), ((14170, 14180), 'numpy.cos', 'np.cos', (['nu'], {}), '(nu)\n', (14176, 14180), True, 'import numpy as np\n'), ((14965, 14981), 'numpy.cos', 'np.cos', (['(2.0 * nu)'], {}), '(2.0 * nu)\n', (14971, 14981), True, 'import numpy as np\n'), ((16305, 16329), 'numpy.arctan2', 'np.arctan2', (['Mtmp2', 'Mtmp1'], {}), '(Mtmp2, Mtmp1)\n', (16315, 16329), True, 'import numpy as np\n'), ((8554, 8585), 'numpy.cos', 'np.cos', (['((2.0 * p - omega) * dtr)'], {}), '((2.0 * p - omega) * dtr)\n', (8560, 8585), True, 'import numpy as np\n'), ((8613, 8632), 'numpy.sin', 'np.sin', (['(2 * p * dtr)'], {}), '(2 * p * dtr)\n', (8619, 8632), True, 'import numpy as np\n'), ((8636, 8667), 'numpy.sin', 'np.sin', (['((2.0 * p - omega) * dtr)'], {}), '((2.0 * p - omega) * dtr)\n', (8642, 8667), True, 'import numpy as np\n'), ((12648, 12673), 'numpy.tan', 'np.tan', (['(omega * dtr / 2.0)'], {}), '(omega * dtr / 2.0)\n', (12654, 12673), True, 'import numpy as np\n'), ((12703, 12728), 'numpy.tan', 'np.tan', (['(omega * dtr / 2.0)'], {}), '(omega * dtr / 2.0)\n', (12709, 12728), True, 'import numpy as np\n'), ((13106, 13122), 'numpy.sin', 'np.sin', (['(2.0 * II)'], {}), '(2.0 * II)\n', (13112, 13122), True, 'import numpy as np\n'), ((13121, 13131), 'numpy.cos', 'np.cos', (['nu'], {}), '(nu)\n', (13127, 13131), True, 'import numpy as np\n'), ((13204, 13214), 'numpy.sin', 'np.sin', (['II'], {}), '(II)\n', (13210, 13214), True, 'import numpy as np\n'), ((13266, 13282), 'numpy.cos', 'np.cos', (['(2.0 * nu)'], {}), '(2.0 * nu)\n', (13272, 13282), True, 'import numpy as np\n'), ((13515, 13525), 'numpy.sin', 'np.sin', (['II'], {}), '(II)\n', (13521, 13525), True, 'import numpy as np\n'), ((13563, 13573), 'numpy.sin', 'np.sin', (['II'], {}), '(II)\n', (13569, 13573), True, 'import numpy as np\n'), ((13778, 13793), 'numpy.cos', 'np.cos', (['(p * dtr)'], {}), '(p * dtr)\n', (13784, 13793), True, 'import numpy as np\n'), ((13800, 13825), 'numpy.cos', 'np.cos', (['((p - omega) * dtr)'], {}), '((p - omega) * dtr)\n', (13806, 13825), True, 'import numpy as np\n'), ((13843, 13858), 'numpy.sin', 'np.sin', (['(p * dtr)'], {}), '(p * dtr)\n', (13849, 13858), True, 'import numpy as np\n'), ((13865, 13890), 'numpy.sin', 'np.sin', (['((p - omega) * dtr)'], {}), '((p - omega) * dtr)\n', (13871, 13890), True, 'import numpy as np\n'), ((14112, 14128), 'numpy.sin', 'np.sin', (['(2.0 * II)'], {}), '(2.0 * II)\n', (14118, 14128), True, 'import numpy as np\n'), ((14155, 14171), 'numpy.sin', 'np.sin', (['(2.0 * II)'], {}), '(2.0 * II)\n', (14161, 14171), True, 'import numpy as np\n'), ((14388, 14398), 'numpy.sin', 'np.sin', (['II'], {}), '(II)\n', (14394, 14398), True, 'import numpy as np\n'), ((14471, 14487), 'numpy.cos', 'np.cos', (['(II / 2.0)'], {}), '(II / 2.0)\n', (14477, 14487), True, 'import numpy as np\n'), ((14897, 14907), 'numpy.sin', 'np.sin', (['II'], {}), '(II)\n', (14903, 14907), True, 'import numpy as np\n'), ((15064, 15074), 'numpy.sin', 'np.sin', (['II'], {}), '(II)\n', (15070, 15074), True, 'import numpy as np\n'), ((15196, 15212), 'numpy.cos', 'np.cos', (['(II / 2.0)'], {}), '(II / 2.0)\n', (15202, 15212), True, 'import numpy as np\n'), ((8531, 8550), 'numpy.cos', 'np.cos', (['(2 * p * dtr)'], {}), '(2 * p * dtr)\n', (8537, 8550), True, 'import numpy as np\n'), ((12597, 12616), 'numpy.cos', 'np.cos', (['(omega * dtr)'], {}), '(omega * dtr)\n', (12603, 12616), True, 'import numpy as np\n'), ((13251, 13261), 'numpy.sin', 'np.sin', (['II'], {}), '(II)\n', (13257, 13261), True, 'import numpy as np\n'), ((13429, 13439), 'numpy.sin', 'np.sin', (['II'], {}), '(II)\n', (13435, 13439), True, 'import numpy as np\n'), ((13575, 13591), 'numpy.cos', 'np.cos', (['(II / 2.0)'], {}), '(II / 2.0)\n', (13581, 13591), True, 'import numpy as np\n'), ((14408, 14424), 'numpy.sin', 'np.sin', (['(II / 2.0)'], {}), '(II / 2.0)\n', (14414, 14424), True, 'import numpy as np\n'), ((14947, 14957), 'numpy.sin', 'np.sin', (['II'], {}), '(II)\n', (14953, 14957), True, 'import numpy as np\n'), ((12890, 12912), 'numpy.cos', 'np.cos', (['(2.0 * (p - xi))'], {}), '(2.0 * (p - xi))\n', (12896, 12912), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from scripts.utils import check_if_board_is_full, get_winner, negamax, negamax_alpha_beta_pruned
board0 = np.zeros(shape=(3, 3))
board1 = np.array([[-1, 0, 1], [1, 0, 0], [1, -1, -1]])
board2 = np.array([[1, 0, 1], [0, 0, 0], [0, -1, -1]])
board3 = np.array([[1, -1, -1], [-1, 1, 1], [1, -1, -1]])
board4 = np.array([[1, 0, 0], [0, 0, -1], [0, 0, 0]])
board5 = np.array([[1, 1, -1], [0, 0, -1], [0, 0, 0]])
board6 = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
"""
board0:
array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
board1:
array([[-1, 0, 1],
[ 1, 0, 0],
[ 1, -1, -1]])
board2:
array([[ 1, 0, 1],
[ 0, 0, 0],
[ 0, -1, -1]])
board3:
array([[ 1, -1, -1],
[-1, 1, 1],
[ 1, -1, -1]])
board4:
array([[ 1, 0, 0],
[ 0, 0, -1],
[ 0, 0, 0]])
board5:
array([[ 1, 1, -1],
[ 0, 0, -1],
[ 0, 0, 0]])
board6:
array([[ 0, 0, 0],
[ 0, 1, 0],
[ 0, 0, 0]])
"""
@pytest.mark.parametrize("board, expected", [
(board0, False),
(board1, False),
(board2, False),
(board3, True),
])
def test_check_if_board_is_full(board, expected):
assert check_if_board_is_full(board, 3) == expected
@pytest.mark.parametrize("board, expected", [
(np.array([[-1, 0, 1], [1, -1, 0], [1, -1, -1]]), -1),
(np.array([[-1, 0, 1], [1, 1, 0], [1, -1, -1]]), 1),
])
def test_get_winner_when_game_is_decided(board, expected):
assert get_winner(board) == expected
@pytest.mark.parametrize("board, expected", [
(board1, None),
(board3, None),
])
def test_get_winner_when_game_is_not_decided(board, expected):
assert get_winner(board) is expected
@pytest.mark.parametrize("board, player, expected", [
(board0, 1, 0),
(board0, -1, 0),
(board6, -1, 0),
(board1, 1, 1),
(board1, -1, 1),
(board2, 1, 1),
(board2, -1, 1),
(board4, 1, 1),
(board5, 1, -1),
(board6, 1, 1),
])
def test_negamax_whether_predicts_result(board, player, expected):
# watch out! the negamax function returns the results from the perspective
# of the player to play, so that a line `(board1, -1, 1)` expects player "-1" to win
assert negamax(board, player)['score'] == expected
@pytest.mark.parametrize("board, player, expected", [
(board0, 1, 0),
(board0, -1, 0),
(board6, -1, 0),
(board1, 1, 1),
(board1, -1, 1),
(board2, 1, 1),
(board2, -1, 1),
(board4, 1, 1),
(board5, 1, -1),
])
def test_negamax_alpha_beta_pruned_whether_predicts_result(board, player, expected):
# watch out! the negamax_alpha_beta_pruned function returns the results from the perspective
# of the player to play, so that a line `(board1, -1, 1)` expects player "-1" to win
assert negamax_alpha_beta_pruned(board, player, -np.inf, np.inf)['score'] == expected
@pytest.mark.parametrize("board, player, expected", [
(board1, 1, [(1, 1)]),
(board2, 1, [(0, 1)]),
(board2, -1, [(2, 0), (0, 1)]),
])
def test_negamax_plays_proper_move(board, player, expected):
assert negamax(board, player)['move'] in expected
@pytest.mark.parametrize("board, player, expected", [
(board1, 1, [(1, 1)]),
(board2, 1, [(0, 1)]),
(board2, -1, [(2, 0), (0, 1)]),
])
def test_negamax_alpha_beta_pruned_plays_proper_move(board, player, expected):
assert negamax_alpha_beta_pruned(board, player, -np.inf, np.inf)['move'] in expected
| [
"scripts.utils.negamax_alpha_beta_pruned",
"numpy.zeros",
"scripts.utils.check_if_board_is_full",
"numpy.array",
"scripts.utils.negamax",
"pytest.mark.parametrize",
"scripts.utils.get_winner"
] | [((141, 163), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3, 3)'}), '(shape=(3, 3))\n', (149, 163), True, 'import numpy as np\n'), ((173, 219), 'numpy.array', 'np.array', (['[[-1, 0, 1], [1, 0, 0], [1, -1, -1]]'], {}), '([[-1, 0, 1], [1, 0, 0], [1, -1, -1]])\n', (181, 219), True, 'import numpy as np\n'), ((229, 274), 'numpy.array', 'np.array', (['[[1, 0, 1], [0, 0, 0], [0, -1, -1]]'], {}), '([[1, 0, 1], [0, 0, 0], [0, -1, -1]])\n', (237, 274), True, 'import numpy as np\n'), ((284, 332), 'numpy.array', 'np.array', (['[[1, -1, -1], [-1, 1, 1], [1, -1, -1]]'], {}), '([[1, -1, -1], [-1, 1, 1], [1, -1, -1]])\n', (292, 332), True, 'import numpy as np\n'), ((342, 386), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 0, -1], [0, 0, 0]]'], {}), '([[1, 0, 0], [0, 0, -1], [0, 0, 0]])\n', (350, 386), True, 'import numpy as np\n'), ((396, 441), 'numpy.array', 'np.array', (['[[1, 1, -1], [0, 0, -1], [0, 0, 0]]'], {}), '([[1, 1, -1], [0, 0, -1], [0, 0, 0]])\n', (404, 441), True, 'import numpy as np\n'), ((451, 494), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 1, 0], [0, 0, 0]])\n', (459, 494), True, 'import numpy as np\n'), ((1016, 1132), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""board, expected"""', '[(board0, False), (board1, False), (board2, False), (board3, True)]'], {}), "('board, expected', [(board0, False), (board1, False\n ), (board2, False), (board3, True)])\n", (1039, 1132), False, 'import pytest\n'), ((1523, 1599), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""board, expected"""', '[(board1, None), (board3, None)]'], {}), "('board, expected', [(board1, None), (board3, None)])\n", (1546, 1599), False, 'import pytest\n'), ((1718, 1944), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""board, player, expected"""', '[(board0, 1, 0), (board0, -1, 0), (board6, -1, 0), (board1, 1, 1), (board1,\n -1, 1), (board2, 1, 1), (board2, -1, 1), (board4, 1, 1), (board5, 1, -1\n ), (board6, 1, 1)]'], {}), "('board, player, expected', [(board0, 1, 0), (board0,\n -1, 0), (board6, -1, 0), (board1, 1, 1), (board1, -1, 1), (board2, 1, 1\n ), (board2, -1, 1), (board4, 1, 1), (board5, 1, -1), (board6, 1, 1)])\n", (1741, 1944), False, 'import pytest\n'), ((2272, 2482), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""board, player, expected"""', '[(board0, 1, 0), (board0, -1, 0), (board6, -1, 0), (board1, 1, 1), (board1,\n -1, 1), (board2, 1, 1), (board2, -1, 1), (board4, 1, 1), (board5, 1, -1)]'], {}), "('board, player, expected', [(board0, 1, 0), (board0,\n -1, 0), (board6, -1, 0), (board1, 1, 1), (board1, -1, 1), (board2, 1, 1\n ), (board2, -1, 1), (board4, 1, 1), (board5, 1, -1)])\n", (2295, 2482), False, 'import pytest\n'), ((2877, 3011), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""board, player, expected"""', '[(board1, 1, [(1, 1)]), (board2, 1, [(0, 1)]), (board2, -1, [(2, 0), (0, 1)])]'], {}), "('board, player, expected', [(board1, 1, [(1, 1)]),\n (board2, 1, [(0, 1)]), (board2, -1, [(2, 0), (0, 1)])])\n", (2900, 3011), False, 'import pytest\n'), ((3141, 3275), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""board, player, expected"""', '[(board1, 1, [(1, 1)]), (board2, 1, [(0, 1)]), (board2, -1, [(2, 0), (0, 1)])]'], {}), "('board, player, expected', [(board1, 1, [(1, 1)]),\n (board2, 1, [(0, 1)]), (board2, -1, [(2, 0), (0, 1)])])\n", (3164, 3275), False, 'import pytest\n'), ((1208, 1240), 'scripts.utils.check_if_board_is_full', 'check_if_board_is_full', (['board', '(3)'], {}), '(board, 3)\n', (1230, 1240), False, 'from scripts.utils import check_if_board_is_full, get_winner, negamax, negamax_alpha_beta_pruned\n'), ((1490, 1507), 'scripts.utils.get_winner', 'get_winner', (['board'], {}), '(board)\n', (1500, 1507), False, 'from scripts.utils import check_if_board_is_full, get_winner, negamax, negamax_alpha_beta_pruned\n'), ((1685, 1702), 'scripts.utils.get_winner', 'get_winner', (['board'], {}), '(board)\n', (1695, 1702), False, 'from scripts.utils import check_if_board_is_full, get_winner, negamax, negamax_alpha_beta_pruned\n'), ((1306, 1353), 'numpy.array', 'np.array', (['[[-1, 0, 1], [1, -1, 0], [1, -1, -1]]'], {}), '([[-1, 0, 1], [1, -1, 0], [1, -1, -1]])\n', (1314, 1353), True, 'import numpy as np\n'), ((1365, 1411), 'numpy.array', 'np.array', (['[[-1, 0, 1], [1, 1, 0], [1, -1, -1]]'], {}), '([[-1, 0, 1], [1, 1, 0], [1, -1, -1]])\n', (1373, 1411), True, 'import numpy as np\n'), ((2225, 2247), 'scripts.utils.negamax', 'negamax', (['board', 'player'], {}), '(board, player)\n', (2232, 2247), False, 'from scripts.utils import check_if_board_is_full, get_winner, negamax, negamax_alpha_beta_pruned\n'), ((2795, 2852), 'scripts.utils.negamax_alpha_beta_pruned', 'negamax_alpha_beta_pruned', (['board', 'player', '(-np.inf)', 'np.inf'], {}), '(board, player, -np.inf, np.inf)\n', (2820, 2852), False, 'from scripts.utils import check_if_board_is_full, get_winner, negamax, negamax_alpha_beta_pruned\n'), ((3095, 3117), 'scripts.utils.negamax', 'negamax', (['board', 'player'], {}), '(board, player)\n', (3102, 3117), False, 'from scripts.utils import check_if_board_is_full, get_winner, negamax, negamax_alpha_beta_pruned\n'), ((3377, 3434), 'scripts.utils.negamax_alpha_beta_pruned', 'negamax_alpha_beta_pruned', (['board', 'player', '(-np.inf)', 'np.inf'], {}), '(board, player, -np.inf, np.inf)\n', (3402, 3434), False, 'from scripts.utils import check_if_board_is_full, get_winner, negamax, negamax_alpha_beta_pruned\n')] |
#################################################################################################################
# ewstools
# Description: Python package for computing, analysing and visualising
# early warning signals (EWS) in time-series data
# Author: <NAME>
# Web: http://www.math.uwaterloo.ca/~tbury/
# Code repo: https://github.com/ThomasMBury/ewstools
# Documentation: https://ewstools.readthedocs.io/
#
# The MIT License (MIT)
#
# Copyright (c) 2019 <NAME> http://www.math.uwaterloo.ca/~tbury/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#################################################################################################################
#---------------------------------
# Import relevant packages
#--------------------------------
# For numeric computation and DataFrames
import numpy as np
import pandas as pd
# To compute power spectrum using Welch's method
from scipy import signal
import scipy.linalg
# For fitting power spectrum models and computing AIC weights
from lmfit import Model
def pspec_welch(yVals,
dt,
ham_length=40,
ham_offset=0.5,
w_cutoff=1,
scaling='spectrum'):
'''
Computes the power spectrum of a time-series using Welch's method.
The time-series is assumed to be stationary and to have equally spaced
measurements in time. The power spectrum is computed using Welch's method,
which computes the power spectrum over a rolling window of subsets of the
time-series and then takes the average.
Args
----
yVals: array of floats
Array of time-series values.
dt: float
Seperation between data points.
ham_length: int
Length of Hamming window (number of data points).
ham_offset: float
Hamming offset as a proportion of the Hamming window size.
w_cutoff: float
Cutoff frequency used in power spectrum. Given as a proportion of the
maximum permissable frequency in the empirical
power spectrum.
scaling: {'spectrum', 'density'}
Whether to compute the power spectrum ('spectrum') or
the power spectral density ('density'). The power spectral density
is the power spectrum normalised (such that the area underneath equals one).
Returns
-------
pd.Series:
Power values indexed by frequency
'''
## Assign properties of *series* to parameters
# Compute the sampling frequency
fs = 1/dt
# Number of data points
num_points = len(yVals)
# If ham_length given as a proportion - compute number of data points in ham_length
if 0 < ham_length <= 1:
ham_length = num_points * ham_length
# If Hamming length given is less than the length of the t-series, make ham_length=length of tseries.
if ham_length >= num_points:
ham_length = num_points
# Compute number of points in offset
ham_offset_points = int(ham_offset*ham_length)
## Compute the periodogram using Welch's method (scipy.signal function)
pspec_raw = signal.welch(yVals,
fs,
nperseg=ham_length,
noverlap=ham_offset_points,
return_onesided=False,
scaling=scaling)
# Put into a pandas series and index by frequency (scaled by 2*pi)
pspec_series = pd.Series(pspec_raw[1], index=2*np.pi*pspec_raw[0], name='Power spectrum')
pspec_series.index.name = 'Frequency'
# Sort into ascending frequency
pspec_series.sort_index(inplace=True)
# Append power spectrum with first value (by symmetry)
pspec_series.at[-min(pspec_series.index)] = pspec_series.iat[0]
# Impose cutoff frequency
wmax = w_cutoff*max(pspec_series.index) # cutoff frequency
pspec_output = pspec_series[-wmax:wmax] # subset of power spectrum
return pspec_output
#------------Functional forms of power spectra to fit------------#
def psd_fold(w,sigma,lam):
'''
Analytical approximation for the power spectrum prior to a Fold bifurcation
'''
return (sigma**2 / (2*np.pi))*(1/(w**2+lam**2))
def psd_flip(w,sigma,r):
'''
Analytical approximation for the power spectrum prior to a Flip bifurcation
'''
return (sigma**2 / (2*np.pi))*(1/(1 + r**2 - 2*r*np.cos(w)))
def psd_hopf(w,sigma,mu,w0):
'''
Analytical approximation for the power spectrum prior to a Hopf bifurcation
'''
return (sigma**2/(4*np.pi))*(1/((w+w0)**2+mu**2)+1/((w-w0)**2 +mu**2))
def psd_null(w,sigma):
'''
Power spectrum of white noise (flat).
'''
return sigma**2/(2*np.pi) * w**0
#-------Obtain 'best guess' intitialisation parameters for optimisation------%
def shopf_init(smax, stot, wdom):
'''
Compute the 'best guess' initialisation values for sigma, mu and w0,
when fitting sHopf to the empirical power spectrum.
Args
----
smax: float
Maximum power in the power spectrum.
stot: float
Total power in the power spectrum.
wdom: float
Frequency that has the highest power.
Return
------
list of floats:
List containing the initialisation parameters [sigma, mu, w0]
'''
# Define chunky term (use \ to continue eqn to new line)
def alpha(smax, stot, wdom):
return stot**3 \
+ 9*(np.pi**2)*(wdom**2)*(smax**2)*stot \
+3*np.sqrt(3)*np.pi*np.sqrt(
64*(np.pi**4)*(wdom**6)*(smax**6) \
-13*(np.pi**2)*(wdom**4)*(smax**4)*(stot**2) \
+2*(wdom**2)*(smax**2)*(stot**4) \
)
# Initialisation for mu
mu = -(1/(3*np.pi*smax))*(stot \
+alpha(smax,stot,wdom)**(1/3) \
+(stot**2-12*(np.pi**2)*(wdom**2)*(smax**2))/(alpha(smax,stot,wdom)**(1/3)))
# Initialisation for sigma
sigma = np.sqrt(
-2*mu*stot)
# Initialisation for w0
w0 = wdom
# Return list
return [sigma, mu, w0]
def sfold_init(smax, stot):
'''
Compute the 'best guess' initialisation values for sigma and lamda
when fitting sfold to the empirical power spectrum.
Args
--------------
smax: float
Maximum power in the power spectrum.
stot: float
Total power in the power spectrum.
Return
-----------------
list of floats:
List containing the initialisation parameters [sigma, lambda]
'''
# Initialisation for sigma
sigma = np.sqrt(2*stot**2/(np.pi*smax))
# Initialisation for lamda
lamda = -stot/(np.pi*smax)
# Return list
return [sigma, lamda]
def sflip_init(smax, stot):
'''
Compute the 'best guess' initialisation values for sigma and r
when fitting sflip to the empirical power spectrum.
Args
--------------
smax: float
Maximum power in the power spectrum.
stot: float
Total power in the power spectrum.
Return
-----------------
list of floats:
List containing the initialisation parameters [sigma, r]
'''
# Initialisation for r
r =(stot - 2*np.pi*smax)/(stot + 2*np.pi*smax)
# Initialisation for sigma
sigma = np.sqrt(stot*(1-r**2))
# Return list
return [sigma, r]
def snull_init(stot):
'''
Compute the 'best guess' initialisation values for sigma
when fitting snull to the empirical power spectrum.
Args
--------------
stot: float
Total power in the power spectrum.
Return
-----------------
list of floats:
List containing the initialisation parameters [sigma].
'''
# Initialisation for sigma
sigma = np.sqrt(stot)
# Return list
return [sigma]
#---------Run optimisation to compute best fits-----------#
# Fold fit
def fit_fold(pspec, init):
'''
Fit the Fold power spectrum model to pspec and compute AIC score.
Uses the package LMFIT for optimisation.
Args
--------------
pspec: pd.Series
Power spectrum data as a Series indexed by frequency.
init: list of floats
Initial parameter guesses of the form [sigma_init, lambda_init].
Returns
----------------
list:
Form [aic, result] where aic is the AIC score for the model fit,
and result is a handle that contains further information on the fit.
'''
# Put frequency values and power values as a list to use LMFIT
freq_vals = pspec.index.tolist()
power_vals = pspec.tolist()
sigma_init, lambda_init = init
# Assign model object
model = Model(psd_fold)
# Set up constraint S(wMax) < psi_fold*S(0)
psi_fold = 0.5
wMax = max(freq_vals)
# Parameter constraints for sigma
model.set_param_hint('sigma', value=sigma_init, min=0, max=10*sigma_init)
# Parameter constraints for lambda
model.set_param_hint('lam', min=-np.sqrt(psi_fold/(1-psi_fold))*wMax, max=0, value=lambda_init)
# Assign initial parameter values and constraints
params = model.make_params()
# Fit model to the empircal spectrum
result = model.fit(power_vals, params, w=freq_vals)
# Compute AIC score
aic = result.aic
# Export AIC score and model fit
return [aic, result]
# Fold fit
def fit_flip(pspec, init):
'''
Fit the Flip power spectrum model to pspec and compute AIC score.
Uses the package LMFIT for optimisation.
Args
--------------
pspec: pd.Series
Power spectrum data as a Series indexed by frequency.
init: list of floats
Initial parameter guesses of the form [sigma_init, r_init].
Returns
----------------
list:
Form [aic, result] where aic is the AIC score for the model fit,
and result is a handle that contains further information on the fit.
'''
# Put frequency values and power values as a list to use LMFIT
freq_vals = pspec.index.tolist()
power_vals = pspec.tolist()
sigma_init, r_init = init
# Assign model object
model = Model(psd_flip)
# Parameter constraints for sigma
model.set_param_hint('sigma', value=sigma_init, min=0, max=10*sigma_init)
# Parameter constraints for r
model.set_param_hint('r', min=-1, max=0, value=r_init)
# Assign initial parameter values and constraints
params = model.make_params()
# Fit model to the empircal spectrum
result = model.fit(power_vals, params, w=freq_vals)
# Compute AIC score
aic = result.aic
# print('flip aic is {}'.format(aic))
# Export AIC score and model fit
return [aic, result]
# Function to fit Hopf model to empirical specrum with specified initial parameter guess
def fit_hopf(pspec, init):
'''
Fit the Hopf power spectrum model to pspec and compute AIC score.
Uses the package LMFIT for optimisation.
Args
--------------
pspec: pd.Series
Power spectrum data as a Series indexed by frequency
init: list of floats
Initial parameter guesses of the form [sigma_init, mu_init, w0_init]
Returns
----------------
list:
Form [aic, result] where aic is the AIC score for the model fit,
and result is a handle that contains further information on the fit.
'''
# Put frequency values and power values as a list to use LMFIT
freq_vals = pspec.index.tolist()
power_vals = pspec.tolist()
# Assign labels to initialisation values
sigma_init, mu_init, w0_init = init
# If any labels are nan, resort to default values
if np.isnan(sigma_init) or np.isnan(mu_init) or np.isnan(w0_init):
sigma_init, mu_init, w0_init = [1,-0.1,1]
# Constraint parameter
psi_hopf = 0.2
# Compute initialisation value for the dummy variable delta (direct map with w0)
# It must be positive to adhere to constraint - thus if negative set to 0.
delta_init = max(
w0_init + (mu_init/(2*np.sqrt(psi_hopf)))*np.sqrt(4-3*psi_hopf + np.sqrt(psi_hopf**2-16*psi_hopf+16)),
0.0001)
# Assign model object
model = Model(psd_hopf)
## Set initialisations parameters in model attributes
# Sigma must be positive, and set a (high) upper bound to avoid runaway computation
model.set_param_hint('sigma', value=sigma_init, min=0)
# Psi is a fixed parameter (not used in optimisation)
model.set_param_hint('psi', value=psi_hopf, vary=False)
# Mu must be negative
model.set_param_hint('mu', value=mu_init, max=0, vary=True)
# Delta is a dummy parameter, satisfying d = w0 - wThresh (see paper for wThresh). It is allowed to vary, in place of w0.
model.set_param_hint('delta', value = delta_init, min=0, vary=True)
# w0 is a fixed parameter dependent on delta (w0 = delta + wThresh)
model.set_param_hint('w0',expr='delta - (mu/(2*sqrt(psi)))*sqrt(4-3*psi + sqrt(psi**2-16*psi+16))',max=2.5,vary=False)
# Assign initial parameter values and constraints
params = model.make_params()
# Fit model to the empircal spectrum
result = model.fit(power_vals, params, w=freq_vals)
# Compute AIC score
aic = result.aic
# print('hopf aic is {}'.format(aic))
# Export AIC score and model fit
return [aic, result]
# Function to fit Null model to empirical specrum with specified initial parameter guess
def fit_null(pspec, init):
'''
Fit the Null power spectrum model to pspec and compute AIC score.
Uses the package LMFIT for optimisation.
Args
--------------
pspec: pd.Series
Power spectrum data as a Series indexed by frequency
init: list of floats
Initial parameter guesses of the form [sigma_init]
Returns
----------------
list:
Form [aic, result] where aic is the AIC score for the model fit,
and result is a handle that contains further information on the fit.
'''
# Put frequency values and power values as a list to use LMFIT
freq_vals = pspec.index.tolist()
power_vals = pspec.tolist()
sigma_init = init[0]
# Assign model object
model = Model(psd_null)
# Initial parameter value for Null fit
model.set_param_hint('sigma', value=sigma_init, vary=True, min=0, max=10*sigma_init)
# Assign initial parameter values and constraints
params = model.make_params()
# Fit model to the empircal spectrum
result = model.fit(power_vals, params, w=freq_vals)
# Compute AIC score
aic = result.aic
# Export AIC score and model fit
return [aic, result]
def aic_weights(aic_scores):
'''
Computes AIC weights, given AIC scores.
Args
----------------
aic_scores: np.array
An array of AIC scores
Returns
-----------------
np.array
Array of the corresponding AIC weights
'''
# Best AIC score
aic_best = min(aic_scores)
# Differences in score from best model
aic_diff = aic_scores - aic_best
# Likelihoods for each model
llhd = np.exp(-(1/2)*aic_diff)
# Normalise to get AIC weights
return llhd/sum(llhd)
#-----------Compute spectral metrics (EWS) from power spectrum------#
def pspec_metrics(pspec,
ews = ['smax','cf','aic'],
aic = ['Fold','Hopf','Null'],
sweep = False):
'''
Compute the metrics associated with pspec that can be
used as EWS.
Args
-------------------
pspec: pd.Series
Power spectrum as a Series indexed by frequency
ews: list of {'smax', 'cf', 'aic'}
EWS to be computed. Options include peak in the power spectrum ('smax'),
coherence factor ('cf'), AIC weights ('aic').
aic: AIC weights to compute
sweep: bool
If 'True', sweep over a range of intialisation
parameters when optimising to compute AIC scores, at the expense of
longer computation. If 'False', intialisation parameter is taken as the
'best guess'.
Return
-------------------
dict:
A dictionary of spectral EWS obtained from pspec
'''
# Initialise a dictionary for EWS
spec_ews = {}
## Compute Smax
if 'smax' in ews:
smax = max(pspec)
# add to DataFrame
spec_ews['Smax'] = smax
## Compute the coherence factor
if 'cf' in ews:
# frequency at which peak occurs
w_peak = abs(pspec.idxmax())
# power of peak frequency
power_peak = pspec.max()
# compute the first frequency from -w_peak at which power<power_peak/2
w_half = next( (w for w in pspec[-w_peak:].index if pspec.loc[w] < power_peak/2 ), 'None')
# if there was no such frequency, or if peak crosses zero frequency,
# set w_peak = 0 (makes CF=0)
if w_half == 'None' or w_half > 0:
w_peak = 0
else:
# double the difference between w_half and -w_peak to get the width of the peak
w_width = 2*(w_half - (-w_peak))
# compute coherence factor (height/relative width)
coher_factor = power_peak/(w_width/w_peak) if w_peak != 0 else 0
# add to dataframe
spec_ews['Coherence factor'] = coher_factor
## Compute AIC weights of fitted analytical forms
if 'aic' in ews:
# Compute the empirical metrics that allow us to choose sensible initialisation parameters
# Peak in power spectrum
smax = pspec.max()
# Area underneath power spectrum (~ variance)
stot = pspec.sum()*(pspec.index[1]-pspec.index[0])
# Dominant frequency (take positive value)
wdom = abs(pspec.idxmax())
## Create array of initialisation parmaeters
# Sweep values (as proportion of baseline guess) if sweep = True
sweep_vals = np.array([0.5,1,1.5]) if sweep else np.array([1])
# Baseline parameter initialisations (computed using empirical spectrum)
# Sfold
[sigma_init_fold, lambda_init] = sfold_init(smax,stot)
# Sflip
[sigma_init_flip, r_init] = sflip_init(smax,stot)
# Shopf
[sigma_init_hopf, mu_init, w0_init] = shopf_init(smax,stot,wdom)
# Snull
[sigma_init_null] = snull_init(stot)
# Arrays of initial values
init_fold_array = {'sigma': sweep_vals*sigma_init_fold,
'lambda': sweep_vals*lambda_init}
# r parameter cannot go below -1
r_sweep_vals = [0.5*r_init,r_init,0.5*r_init+0.5] if sweep else [r_init]
init_flip_array = {'sigma': sweep_vals*sigma_init_flip,
'r': r_sweep_vals}
init_hopf_array = {'sigma': sweep_vals*sigma_init_hopf,
'mu': sweep_vals*mu_init,
'w0': sweep_vals*w0_init}
init_null_array = {'sigma': sweep_vals*sigma_init_null}
## Compute AIC values and fits
## Fold
# Initialise list to store AIC and model fits
fold_aic_fits = []
# Sweep over initial parameter guesses and pick best convergence
for i in range(len(init_fold_array['sigma'])):
for j in range(len(init_fold_array['lambda'])):
# Initial parameter guess
init_fold = [init_fold_array['sigma'][i],init_fold_array['lambda'][j]]
# Compute fold fit and AIC score
[aic_temp, model_temp] = fit_fold(pspec, init_fold)
# Store in list
fold_aic_fits.append([aic_temp, model_temp])
# Put list into array
array_temp = np.array(fold_aic_fits)
# Pick out the best model
[aic_fold, model_fold] = array_temp[array_temp[:,0].argmin()]
## Flip
# Initialise list to store AIC and model fits
flip_aic_fits = []
# Sweep over initial parameter guesses and pick best convergence
for i in range(len(init_flip_array['sigma'])):
for j in range(len(init_flip_array['r'])):
# Initial parameter guess
init_flip = [init_flip_array['sigma'][i],init_flip_array['r'][j]]
# Compute fold fit and AIC score
[aic_temp, model_temp] = fit_flip(pspec, init_flip)
# Store in list
flip_aic_fits.append([aic_temp, model_temp])
# Put list into array
array_temp = np.array(flip_aic_fits)
# Pick out the best model
[aic_flip, model_flip] = array_temp[array_temp[:,0].argmin()]
## Hopf
# Initialise list to store AIC and model fits
hopf_aic_fits = []
# Sweep over initial parameter guesses and pick best convergence
for i in range(len(init_hopf_array['sigma'])):
for j in range(len(init_hopf_array['mu'])):
for k in range(len(init_hopf_array['w0'])):
# Initial parameter guess
init_hopf = [init_hopf_array['sigma'][i],init_hopf_array['mu'][j],init_hopf_array['w0'][k]]
# Compute fold fit and AIC score
[aic_temp, model_temp] = fit_hopf(pspec, init_hopf)
# Store in list
hopf_aic_fits.append([aic_temp, model_temp])
# Put list into array
array_temp = np.array(hopf_aic_fits)
# Pick out the best model
[aic_hopf, model_hopf] = array_temp[array_temp[:,0].argmin()]
## Null
# Initialise list to store AIC and model fits
null_aic_fits = []
# Sweep over initial parameter guesses and pick best convergence
for i in range(len(init_null_array['sigma'])):
# Initial parameter guess
init_null = [init_null_array['sigma'][i]]
# Compute fold fit and AIC score
[aic_temp, model_temp] = fit_null(pspec, init_null)
# Store in list
null_aic_fits.append([aic_temp, model_temp])
# Put list into array
array_temp = np.array(null_aic_fits)
# Pick out the best model
[aic_null, model_null] = array_temp[array_temp[:,0].argmin()]
# Compute chosen AIC weights from the AIC scores
aic_scores = {}
if 'Fold' in aic:
aic_scores['Fold']=aic_fold
if 'Flip' in aic:
aic_scores['Flip']=aic_flip
if 'Hopf' in aic:
aic_scores['Hopf']=aic_hopf
if 'Null' in aic:
aic_scores['Null']=aic_null
aicw = aic_weights(np.array([aic_scores[x] for x in aic]))
aic_dict = dict(zip(aic,aicw))
# Add to Dataframe
if 'Fold' in aic:
spec_ews['AIC fold'] = aic_dict['Fold']
if 'Flip' in aic:
spec_ews['AIC flip'] = aic_dict['Flip']
if 'Hopf' in aic:
spec_ews['AIC hopf'] = aic_dict['Hopf']
if 'Null' in aic:
spec_ews['AIC null'] = aic_dict['Null']
# Add fitted parameter values to DataFrame
spec_ews['Params fold'] = dict((k, model_fold.values[k]) for k in ('sigma','lam')) # don't include dummy params
spec_ews['Params flip'] = dict((k, model_flip.values[k]) for k in ('sigma','r'))
spec_ews['Params hopf'] = dict((k, model_hopf.values[k]) for k in ('sigma','mu','w0','delta','psi'))
spec_ews['Params null'] = model_null.values
# Return DataFrame of metrics
return spec_ews
#------------------------
## Function to compute lag-1 autocovariance matrix
def compute_autocov(df_in):
'''
Computes the autocovariance (lag-1) matrix of n
time series provided in df_in.
Using the definition
phi_ij = < X_i(t+1) X_j(t) >
for each element of the autocovariance matrix phi.
Args
-------------------
df_in: DataFrame with n columns indexed by time
Return
-------------------
np.array:
autocovariance matrix
'''
# Obtain column names of df_in
col_names = df_in.columns
# Number of variables
n = len(col_names)
# Define function to compute autocovariance of two columns
def autocov_cols(a,b):
'''
Computes autocovariance of two columns (can be the same)
Note that this does not commute (a<->b) in general
Input:
a,b: Series indexed by time
Output:
float: autocovariance between the columns
'''
# Shift the column of a by 1
a_shift = a.shift(1)
# Put into a dataframe
df_temp = pd.concat([a_shift,b], axis=1)
# Compute covariance of columns a and b_shift
cov = df_temp.cov().iloc[0,1]
# Output
return cov
# Compute elements of autocovariance matrix
list_elements = []
for i in range(n):
for j in range(n):
a = df_in[col_names[i]]
b = df_in[col_names[j]]
# Compute autocovaraince between cols
autocov = autocov_cols(a,b)
# Append to list of elements
list_elements.append(autocov)
# Create autocovariance matrix from list of elements
ar_autocov = np.array(list_elements).reshape(n,n)
# Output
return ar_autocov
'''
Computes the autocovariance (lag-1) matrix of n
time series provided in df_in.
Using the definition
phi_ij = < X_i(t+1) X_j(t) >
for each element of the autocovariance matrix phi.
Args
-------------------
df_in: DataFrame with n columns indexed by time
Return
-------------------
np.array:
autocovariance matrix
'''
#---------------------------------------
## Function to do Jacobian and eval reconstruction
def eval_recon(df_in):
'''
Constructs estimate of Jacobian matrix from stationary time-series data
and outputs the eigenvalues, eigenvectors and jacobian.
Args
-------------------
df_in: DataFrame with two columns indexed by time
Return
-------------------
dict
Consists of
- 'Eigenvalues': np.array of eigenvalues
- 'Eigenvectors': np.array of eigenvectors
- 'Jacobian': pd.DataFrame of Jacobian entries
'''
# Get the time-separation between data points
dt = df_in.index[1] -df_in.index[0]
# Compute autocovaraince matrix from columns
ar_autocov = compute_autocov(df_in)
# Compute the covariance matrix (built in function)
ar_cov = df_in.cov()
# Estimate of discrete Jacobian (formula in Williamson (2015))
# Requires computation of an inverse matrix
jac = np.matmul(ar_autocov, np.linalg.inv(ar_cov))
# Write the Jacobian as a df for output (so we have col lables)
df_jac = pd.DataFrame(jac, columns = df_in.columns, index=df_in.columns)
# Compute eigenvalues and eigenvectors
evals, evecs = np.linalg.eig(jac)
# Dictionary of data output
dic_out = {'Eigenvalues':evals,
'Eigenvectors':evecs,
'Jacobian':df_jac}
return dic_out
| [
"pandas.DataFrame",
"scipy.signal.welch",
"numpy.linalg.eig",
"numpy.isnan",
"numpy.array",
"pandas.Series",
"numpy.exp",
"numpy.linalg.inv",
"numpy.cos",
"numpy.sqrt",
"pandas.concat",
"lmfit.Model"
] | [((4133, 4248), 'scipy.signal.welch', 'signal.welch', (['yVals', 'fs'], {'nperseg': 'ham_length', 'noverlap': 'ham_offset_points', 'return_onesided': '(False)', 'scaling': 'scaling'}), '(yVals, fs, nperseg=ham_length, noverlap=ham_offset_points,\n return_onesided=False, scaling=scaling)\n', (4145, 4248), False, 'from scipy import signal\n'), ((4495, 4573), 'pandas.Series', 'pd.Series', (['pspec_raw[1]'], {'index': '(2 * np.pi * pspec_raw[0])', 'name': '"""Power spectrum"""'}), "(pspec_raw[1], index=2 * np.pi * pspec_raw[0], name='Power spectrum')\n", (4504, 4573), True, 'import pandas as pd\n'), ((7039, 7062), 'numpy.sqrt', 'np.sqrt', (['(-2 * mu * stot)'], {}), '(-2 * mu * stot)\n', (7046, 7062), True, 'import numpy as np\n'), ((7696, 7735), 'numpy.sqrt', 'np.sqrt', (['(2 * stot ** 2 / (np.pi * smax))'], {}), '(2 * stot ** 2 / (np.pi * smax))\n', (7703, 7735), True, 'import numpy as np\n'), ((8436, 8464), 'numpy.sqrt', 'np.sqrt', (['(stot * (1 - r ** 2))'], {}), '(stot * (1 - r ** 2))\n', (8443, 8464), True, 'import numpy as np\n'), ((8937, 8950), 'numpy.sqrt', 'np.sqrt', (['stot'], {}), '(stot)\n', (8944, 8950), True, 'import numpy as np\n'), ((9869, 9884), 'lmfit.Model', 'Model', (['psd_fold'], {}), '(psd_fold)\n', (9874, 9884), False, 'from lmfit import Model\n'), ((11347, 11362), 'lmfit.Model', 'Model', (['psd_flip'], {}), '(psd_flip)\n', (11352, 11362), False, 'from lmfit import Model\n'), ((13443, 13458), 'lmfit.Model', 'Model', (['psd_hopf'], {}), '(psd_hopf)\n', (13448, 13458), False, 'from lmfit import Model\n'), ((15487, 15502), 'lmfit.Model', 'Model', (['psd_null'], {}), '(psd_null)\n', (15492, 15502), False, 'from lmfit import Model\n'), ((16454, 16481), 'numpy.exp', 'np.exp', (['(-(1 / 2) * aic_diff)'], {}), '(-(1 / 2) * aic_diff)\n', (16460, 16481), True, 'import numpy as np\n'), ((28714, 28775), 'pandas.DataFrame', 'pd.DataFrame', (['jac'], {'columns': 'df_in.columns', 'index': 'df_in.columns'}), '(jac, columns=df_in.columns, index=df_in.columns)\n', (28726, 28775), True, 'import pandas as pd\n'), ((28845, 28863), 'numpy.linalg.eig', 'np.linalg.eig', (['jac'], {}), '(jac)\n', (28858, 28863), True, 'import numpy as np\n'), ((12907, 12927), 'numpy.isnan', 'np.isnan', (['sigma_init'], {}), '(sigma_init)\n', (12915, 12927), True, 'import numpy as np\n'), ((12931, 12948), 'numpy.isnan', 'np.isnan', (['mu_init'], {}), '(mu_init)\n', (12939, 12948), True, 'import numpy as np\n'), ((12952, 12969), 'numpy.isnan', 'np.isnan', (['w0_init'], {}), '(w0_init)\n', (12960, 12969), True, 'import numpy as np\n'), ((21227, 21250), 'numpy.array', 'np.array', (['fold_aic_fits'], {}), '(fold_aic_fits)\n', (21235, 21250), True, 'import numpy as np\n'), ((22073, 22096), 'numpy.array', 'np.array', (['flip_aic_fits'], {}), '(flip_aic_fits)\n', (22081, 22096), True, 'import numpy as np\n'), ((23052, 23075), 'numpy.array', 'np.array', (['hopf_aic_fits'], {}), '(hopf_aic_fits)\n', (23060, 23075), True, 'import numpy as np\n'), ((23827, 23850), 'numpy.array', 'np.array', (['null_aic_fits'], {}), '(null_aic_fits)\n', (23835, 23850), True, 'import numpy as np\n'), ((26452, 26483), 'pandas.concat', 'pd.concat', (['[a_shift, b]'], {'axis': '(1)'}), '([a_shift, b], axis=1)\n', (26461, 26483), True, 'import pandas as pd\n'), ((28609, 28630), 'numpy.linalg.inv', 'np.linalg.inv', (['ar_cov'], {}), '(ar_cov)\n', (28622, 28630), True, 'import numpy as np\n'), ((19393, 19416), 'numpy.array', 'np.array', (['[0.5, 1, 1.5]'], {}), '([0.5, 1, 1.5])\n', (19401, 19416), True, 'import numpy as np\n'), ((19429, 19442), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (19437, 19442), True, 'import numpy as np\n'), ((24356, 24394), 'numpy.array', 'np.array', (['[aic_scores[x] for x in aic]'], {}), '([aic_scores[x] for x in aic])\n', (24364, 24394), True, 'import numpy as np\n'), ((27101, 27124), 'numpy.array', 'np.array', (['list_elements'], {}), '(list_elements)\n', (27109, 27124), True, 'import numpy as np\n'), ((6584, 6730), 'numpy.sqrt', 'np.sqrt', (['(64 * np.pi ** 4 * wdom ** 6 * smax ** 6 - 13 * np.pi ** 2 * wdom ** 4 * \n smax ** 4 * stot ** 2 + 2 * wdom ** 2 * smax ** 2 * stot ** 4)'], {}), '(64 * np.pi ** 4 * wdom ** 6 * smax ** 6 - 13 * np.pi ** 2 * wdom **\n 4 * smax ** 4 * stot ** 2 + 2 * wdom ** 2 * smax ** 2 * stot ** 4)\n', (6591, 6730), True, 'import numpy as np\n'), ((5445, 5454), 'numpy.cos', 'np.cos', (['w'], {}), '(w)\n', (5451, 5454), True, 'import numpy as np\n'), ((10170, 10204), 'numpy.sqrt', 'np.sqrt', (['(psi_fold / (1 - psi_fold))'], {}), '(psi_fold / (1 - psi_fold))\n', (10177, 10204), True, 'import numpy as np\n'), ((6567, 6577), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (6574, 6577), True, 'import numpy as np\n'), ((13297, 13314), 'numpy.sqrt', 'np.sqrt', (['psi_hopf'], {}), '(psi_hopf)\n', (13304, 13314), True, 'import numpy as np\n'), ((13340, 13383), 'numpy.sqrt', 'np.sqrt', (['(psi_hopf ** 2 - 16 * psi_hopf + 16)'], {}), '(psi_hopf ** 2 - 16 * psi_hopf + 16)\n', (13347, 13383), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def amplify(x):
n = len(x)
A = np.matrix(np.zeros((2*n,n)))
b = np.matrix(np.zeros((2*n,1)))
for i in range(n):
A[i, i] = 1. # amplify the curvature
A[i, (i+1)%n] = -1.
b[i, 0] = (x[i] - x[(i+1)%n])*1.9
A[n+i, i] = 1*.3 # light data fitting term
b[n+i, 0] = x[i]*.3
return (np.linalg.inv(A.T*A)*A.T*b).tolist()
x = [100,100,97,93,91,87,84,83,85,87,88,89,90,90,90,88,87,86,84,82,80,
77,75,72,69,66,62,58,54,47,42,38,34,32,28,24,22,20,17,15,13,12,9,
7,8,9,8,6,0,0,2,0,0,2,3,2,0,0,1,4,8,11,14,19,24,27,25,23,21,19]
y = [0,25,27,28,30,34,37,41,44,47,51,54,59,64,66,70,74,78,80,83,86,90,93,
95,96,98,99,99,100,99,99,99,98,98,96,94,93,91,90,87,85,79,75,70,65,
62,60,58,52,49,46,44,41,37,34,30,27,20,17,15,16,17,17,19,18,14,11,6,4,1]
plt.plot(x+[x[0]], y+[y[0]], 'g--')
x = amplify(x)
y = amplify(y)
plt.plot(x+[x[0]], y+[y[0]], 'k-', linewidth=3)
plt.axis('off')
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.linalg.inv",
"matplotlib.pyplot.gca"
] | [((865, 904), 'matplotlib.pyplot.plot', 'plt.plot', (['(x + [x[0]])', '(y + [y[0]])', '"""g--"""'], {}), "(x + [x[0]], y + [y[0]], 'g--')\n", (873, 904), True, 'import matplotlib.pyplot as plt\n'), ((933, 984), 'matplotlib.pyplot.plot', 'plt.plot', (['(x + [x[0]])', '(y + [y[0]])', '"""k-"""'], {'linewidth': '(3)'}), "(x + [x[0]], y + [y[0]], 'k-', linewidth=3)\n", (941, 984), True, 'import matplotlib.pyplot as plt\n'), ((981, 996), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (989, 996), True, 'import matplotlib.pyplot as plt\n'), ((1045, 1055), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1053, 1055), True, 'import matplotlib.pyplot as plt\n'), ((101, 121), 'numpy.zeros', 'np.zeros', (['(2 * n, n)'], {}), '((2 * n, n))\n', (109, 121), True, 'import numpy as np\n'), ((138, 158), 'numpy.zeros', 'np.zeros', (['(2 * n, 1)'], {}), '((2 * n, 1))\n', (146, 158), True, 'import numpy as np\n'), ((997, 1006), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1004, 1006), True, 'import matplotlib.pyplot as plt\n'), ((390, 412), 'numpy.linalg.inv', 'np.linalg.inv', (['(A.T * A)'], {}), '(A.T * A)\n', (403, 412), True, 'import numpy as np\n')] |
import numpy as np
from numpy.lib.index_tricks import index_exp
def create_burrow(lines):
longest = max(len(l) for l in lines)
normalised_lines = []
for line in lines:
if len(line) == longest:
normalised_lines.append(line)
continue
missing = longest - len(line)
for _ in range(missing):
line += " "
normalised_lines.append(line)
return np.array([list(l) for l in normalised_lines])
def find_amphopods(burrow):
amphopods = []
for idx, elem in np.ndenumerate(burrow):
if elem not in "ABCD":
continue
amphopods.append(idx)
return amphopods
def find_rooms(burrow):
amphopods = "ABCD"
rooms = []
for idx, elem in np.ndenumerate(burrow):
if elem not in amphopods:
continue
x, y = idx
if burrow[x+1][y] not in amphopods:
continue
room = [(x, y), (x+1, y)]
rooms.append(room)
return rooms
def find_target_room(amphopod, rooms):
amphopods = "ABCD"
room_index = amphopods.index(amphopod)
return rooms[room_index]
def find_possible_moves(burrow, position, searched=None):
if not searched:
searched = set()
searched.add(position)
x, y = position
adjacent = {(x-1, y), (x+1, y), (x, y-1), (x, y+1)}
possible_moves = set()
for i, j in {a for a in adjacent if a not in searched}:
try:
value = burrow[i][j]
except IndexError:
continue
if value == ".":
possible_moves.add((i, j))
possible_moves.update(find_possible_moves(burrow, (i, j), searched))
return possible_moves
def is_in_room(position, rooms):
if any(position in room for room in rooms):
return True
return False
def is_in_hallway(position):
if position[0] == 1 and 1 <= position[1] <= 11:
return True
return False
def is_outside_room(position, rooms):
if not is_in_hallway(position):
return False
for room in rooms:
entrance = room[0]
if position[1] == entrance[1]:
return True
return False
def find_energy_costs(burrow):
rooms = find_rooms(burrow)
positions = find_amphopods(burrow)
for position in positions:
x, y = position
amphopod = burrow[x][y]
target_room = find_target_room(amphopod, rooms)
possible_moves = find_possible_moves(burrow, position)
for move in possible_moves:
if is_outside_room(move, rooms):
continue # can't move to position directly outside room
if is_in_hallway(position) and move not in target_room:
continue # can't move from hallway into a room which isn't the target room
def main():
with open("test.txt") as f:
lines = [l.replace("\n", "") for l in f.readlines()]
burrow = create_burrow(lines)
print(burrow)
rooms = find_rooms(burrow)
print(find_possible_moves(burrow, (3, 9)))
if __name__ == "__main__":
main()
| [
"numpy.ndenumerate"
] | [((539, 561), 'numpy.ndenumerate', 'np.ndenumerate', (['burrow'], {}), '(burrow)\n', (553, 561), True, 'import numpy as np\n'), ((751, 773), 'numpy.ndenumerate', 'np.ndenumerate', (['burrow'], {}), '(burrow)\n', (765, 773), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
N = 4
samplerate1 = (16.474, 13.585, 5.42, 16.138, 7.455)
minstrel1 = (12.653, 10.208, 7.587, 10.867, 8.430)
minproved1 = (17.037, 14.879, 11.107, 15.846, 12.162)
samplerate2 = (13.107, 9.688, 7.982, 13.894)
minstrel2 = (11.575, 10.837, 8.320, 11.729)
minproved2 =(16.869, 15.156, 12.570, 16.292)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
sr1 = ax.bar(ind, samplerate2, width, color='r')
mn1 = ax.bar(ind+width, minstrel2, width, color='b')
mp1 = ax.bar(ind+2*width, minproved2, width, color='y')
# add some
ax.set_ylim([0,20])
ax.set_xlim([-0.5, 6])
ax.set_ylabel('Throughput in Mbps')
ax.set_xticks(ind+width+width)
ax.set_xticklabels( ('clear', 'moving', 'corner', 'interference') )
ax.legend( (mn1[0], sr1[0], mp1[0]), ('Minstrel', 'SampleRate', 'Minproved') )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%r'% (round(height,1)),
ha='center', va='bottom', rotation=60)
autolabel(mn1)
autolabel(sr1)
autolabel(mp1)
plt.show()
| [
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((359, 371), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (368, 371), True, 'import numpy as np\n'), ((456, 468), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (466, 468), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1234), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1232, 1234), True, 'import matplotlib.pyplot as plt\n')] |
# coding: utf-8
# In[1]:
"""
Todo: combine read_lines, load_pickle, etc... to one single function load_file(),
and use if statement to see which suffix the file has. Also keep an optional param
suffix=None just in case we want to force it to load with a certain format
"""
from random import shuffle
import numpy as np
# In[2]:
def to_str(batch_examples):
str_batch_examples = [
[" ".join([str(token) for token in turn])
for turn in example]
for example in batch_examples]
return str_batch_examples
# In[3]:
class DataGenerator(object):
def __init__(self,
norm_dialogues,
adv_dialogues=None,
feed_both_examples=False,
is_training=True,
batch_size=192,
max_dialogue_length=3):
self.norm_dialogues = norm_dialogues
self.adv_dialogues = adv_dialogues
self.feed_both_examples = feed_both_examples
self.is_training = is_training
if self.feed_both_examples: # if we are not feeding both examples
assert batch_size % 2 == 0
assert norm_dilogues is not None, "Error: feeding both examples, need norm dialogues too."
self.batch_size = batch_size // 2 # because we concatenate pos + neg examples in each batch
else:
self.batch_size = batch_size
self.max_dialogue_length = max_dialogue_length
def batch_generator(self):
print(f"There are {len(self.norm_dialogues)} dialogues.")
turn_lengths_lst = [
len(turn)
for dialogue in self.norm_dialogues
for turn in dialogue]
if not self.is_training:
print("We are testing ==> no length threshold is applied.")
length_threshold = int(np.percentile(turn_lengths_lst, 90)) if self.is_training else max(turn_lengths_lst)
print("Length threshold:", length_threshold)
print("All turns longer than this will be truncated to this length.")
# Truncate based on length threshold
norm_dialogues = [
[turn[(-length_threshold):] # only keeping the last length_threshold tokens
for turn in dialogue]
for dialogue in self.norm_dialogues
if len(dialogue) >= self.max_dialogue_length]
if self.norm_dialogues is not None:
adv_dialogues = [
[turn[(-length_threshold):] # only keeping the last length_threshold tokens
for turn in dialogue]
for dialogue in self.adv_dialogues
if len(dialogue) >= self.max_dialogue_length]
num_dialogues = len(norm_dialogues)
print(f"There are {num_dialogues} dialogues left.")
assert num_dialogues >= self.batch_size, "Error: Number of dialogues less than batch_size"
if self.is_training: # only shuffle dataset if we are training
if self.adv_dialogues is None:
shuffle(norm_dialogues)
else:
zipped = list(zip(norm_dialogues, adv_dialogues))
shuffle(zipped)
norm_dialogues, adv_dialogues = list(zip(*zipped))
dialogue_indices = list(range(self.batch_size)) # initialize dialogue indices
next_dialogue_index = self.batch_size # initialize the index of the next dialogue
start_turn_indices = [0] * self.batch_size # track which turn we are at
while True:
norm_batch_examples = [
norm_dialogues[dialogue_index][start_turn_index: (start_turn_index + self.max_dialogue_length)]
for (dialogue_index, start_turn_index)
in zip(dialogue_indices, start_turn_indices)]
if self.adv_dialogues is not None:
# Avoid modifying target turn
adv_batch_examples = [
(adv_dialogues[dialogue_index][start_turn_index: (start_turn_index + self.max_dialogue_length - 1)]
+ norm_dialogues[dialogue_index][(start_turn_index + self.max_dialogue_length - 1): (start_turn_index + self.max_dialogue_length)])
for (dialogue_index, start_turn_index)
in zip(dialogue_indices, start_turn_indices)]
if self.feed_both_examples:
feed_dialogue_indices = dialogue_indices + dialogue_indices
feed_start_turn_indices = start_turn_indices + start_turn_indices
feed_batch_examples = norm_batch_examples + adv_batch_examples
else:
feed_dialogue_indices = dialogue_indices
feed_start_turn_indices = start_turn_indices
feed_batch_examples = adv_batch_examples
turn_lengths_lst = [
[len(turn) for turn in example]
for example in feed_batch_examples]
yield (feed_dialogue_indices,
feed_start_turn_indices,
to_str(feed_batch_examples),
turn_lengths_lst)
for i in range(self.batch_size):
start_turn_indices[i] += 1 # move on to the next example
# If we've finished the current dialogue
if start_turn_indices[i] + self.max_dialogue_length > len(norm_dialogues[dialogue_indices[i]]):
dialogue_indices[i] = next_dialogue_index # move on to the next dialogue
start_turn_indices[i] = 0 # reset example index
next_dialogue_index += 1
if next_dialogue_index >= num_dialogues:
"""todo: let the remaining dialgoues finish when out of new dialgoues"""
yield None
return
| [
"numpy.percentile",
"random.shuffle"
] | [((1876, 1911), 'numpy.percentile', 'np.percentile', (['turn_lengths_lst', '(90)'], {}), '(turn_lengths_lst, 90)\n', (1889, 1911), True, 'import numpy as np\n'), ((3090, 3113), 'random.shuffle', 'shuffle', (['norm_dialogues'], {}), '(norm_dialogues)\n', (3097, 3113), False, 'from random import shuffle\n'), ((3214, 3229), 'random.shuffle', 'shuffle', (['zipped'], {}), '(zipped)\n', (3221, 3229), False, 'from random import shuffle\n')] |
#!/usr/bin/env python
# coding: utf-8
# **Chapter 14 – Deep Computer Vision Using Convolutional Neural Networks**
# _This notebook contains all the sample code in chapter 14._
# <table align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/14_deep_computer_vision_with_cnns.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# </table>
# # Setup
# First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
# In[1]:
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
get_ipython().run_line_magic('tensorflow_version', '2.x')
IS_COLAB = True
except Exception:
IS_COLAB = False
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
if not tf.test.is_gpu_available():
print("No GPU was detected. CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
tf.random.set_seed(42)
# To plot pretty figures
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "cnn"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# A couple utility functions to plot grayscale and RGB images:
# In[2]:
def plot_image(image):
plt.imshow(image, cmap="gray", interpolation="nearest")
plt.axis("off")
def plot_color_image(image):
plt.imshow(image, interpolation="nearest")
plt.axis("off")
# # What is a Convolution?
# In[3]:
import numpy as np
from sklearn.datasets import load_sample_image
# Load sample images
china = load_sample_image("china.jpg") / 255
flower = load_sample_image("flower.jpg") / 255
images = np.array([china, flower])
batch_size, height, width, channels = images.shape
# Create 2 filters
filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
filters[:, 3, :, 0] = 1 # vertical line
filters[3, :, :, 1] = 1 # horizontal line
outputs = tf.nn.conv2d(images, filters, strides=1, padding="SAME")
plt.imshow(outputs[0, :, :, 1], cmap="gray") # plot 1st image's 2nd feature map
plt.axis("off") # Not shown in the book
plt.show()
# In[4]:
for image_index in (0, 1):
for feature_map_index in (0, 1):
plt.subplot(2, 2, image_index * 2 + feature_map_index + 1)
plot_image(outputs[image_index, :, :, feature_map_index])
plt.show()
# In[5]:
def crop(images):
return images[150:220, 130:250]
# In[6]:
plot_image(crop(images[0, :, :, 0]))
save_fig("china_original", tight_layout=False)
plt.show()
for feature_map_index, filename in enumerate(["china_vertical", "china_horizontal"]):
plot_image(crop(outputs[0, :, :, feature_map_index]))
save_fig(filename, tight_layout=False)
plt.show()
# In[7]:
plot_image(filters[:, :, 0, 0])
plt.show()
plot_image(filters[:, :, 0, 1])
plt.show()
# ## Convolutional Layer
# Using `keras.layers.Conv2D()`:
# In[8]:
conv = keras.layers.Conv2D(filters=32, kernel_size=3, strides=1,
padding="SAME", activation="relu")
# In[9]:
plot_image(crop(outputs[0, :, :, 0]))
plt.show()
# ## VALID vs SAME padding
# In[10]:
def feature_map_size(input_size, kernel_size, strides=1, padding="SAME"):
if padding == "SAME":
return (input_size - 1) // strides + 1
else:
return (input_size - kernel_size) // strides + 1
# In[11]:
def pad_before_and_padded_size(input_size, kernel_size, strides=1):
fmap_size = feature_map_size(input_size, kernel_size, strides)
padded_size = max((fmap_size - 1) * strides + kernel_size, input_size)
pad_before = (padded_size - input_size) // 2
return pad_before, padded_size
# In[12]:
def manual_same_padding(images, kernel_size, strides=1):
if kernel_size == 1:
return images.astype(np.float32)
batch_size, height, width, channels = images.shape
top_pad, padded_height = pad_before_and_padded_size(height, kernel_size, strides)
left_pad, padded_width = pad_before_and_padded_size(width, kernel_size, strides)
padded_shape = [batch_size, padded_height, padded_width, channels]
padded_images = np.zeros(padded_shape, dtype=np.float32)
padded_images[:, top_pad:height+top_pad, left_pad:width+left_pad, :] = images
return padded_images
# Using `"SAME"` padding is equivalent to padding manually using `manual_same_padding()` then using `"VALID"` padding (confusingly, `"VALID"` padding means no padding at all):
# In[13]:
kernel_size = 7
strides = 2
conv_valid = keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=strides, padding="VALID")
conv_same = keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=strides, padding="SAME")
valid_output = conv_valid(manual_same_padding(images, kernel_size, strides))
# Need to call build() so conv_same's weights get created
conv_same.build(tf.TensorShape(images.shape))
# Copy the weights from conv_valid to conv_same
conv_same.set_weights(conv_valid.get_weights())
same_output = conv_same(images.astype(np.float32))
assert np.allclose(valid_output.numpy(), same_output.numpy())
# # Pooling layer
# ## Max pooling
# In[14]:
max_pool = keras.layers.MaxPool2D(pool_size=2)
# In[15]:
cropped_images = np.array([crop(image) for image in images])
output = max_pool(cropped_images)
# In[16]:
fig = plt.figure(figsize=(12, 8))
gs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[2, 1])
ax1 = fig.add_subplot(gs[0, 0])
ax1.set_title("Input", fontsize=14)
ax1.imshow(cropped_images[0]) # plot the 1st image
ax1.axis("off")
ax2 = fig.add_subplot(gs[0, 1])
ax2.set_title("Output", fontsize=14)
ax2.imshow(output[0]) # plot the output for the 1st image
ax2.axis("off")
save_fig("china_max_pooling")
plt.show()
# ## Depth-wise pooling
# In[17]:
class DepthMaxPool(keras.layers.Layer):
def __init__(self, pool_size, strides=None, padding="VALID", **kwargs):
super().__init__(**kwargs)
if strides is None:
strides = pool_size
self.pool_size = pool_size
self.strides = strides
self.padding = padding
def call(self, inputs):
return tf.nn.max_pool(inputs,
ksize=(1, 1, 1, self.pool_size),
strides=(1, 1, 1, self.pool_size),
padding=self.padding)
# In[18]:
depth_pool = DepthMaxPool(3)
with tf.device("/cpu:0"): # there is no GPU-kernel yet
depth_output = depth_pool(cropped_images)
depth_output.shape
# Or just use a `Lambda` layer:
# In[19]:
depth_pool = keras.layers.Lambda(lambda X: tf.nn.max_pool(
X, ksize=(1, 1, 1, 3), strides=(1, 1, 1, 3), padding="VALID"))
with tf.device("/cpu:0"): # there is no GPU-kernel yet
depth_output = depth_pool(cropped_images)
depth_output.shape
# In[20]:
plt.figure(figsize=(12, 8))
plt.subplot(1, 2, 1)
plt.title("Input", fontsize=14)
plot_color_image(cropped_images[0]) # plot the 1st image
plt.subplot(1, 2, 2)
plt.title("Output", fontsize=14)
plot_image(depth_output[0, ..., 0]) # plot the output for the 1st image
plt.axis("off")
plt.show()
# ## Average pooling
# In[21]:
avg_pool = keras.layers.AvgPool2D(pool_size=2)
# In[22]:
output_avg = avg_pool(cropped_images)
# In[23]:
fig = plt.figure(figsize=(12, 8))
gs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[2, 1])
ax1 = fig.add_subplot(gs[0, 0])
ax1.set_title("Input", fontsize=14)
ax1.imshow(cropped_images[0]) # plot the 1st image
ax1.axis("off")
ax2 = fig.add_subplot(gs[0, 1])
ax2.set_title("Output", fontsize=14)
ax2.imshow(output_avg[0]) # plot the output for the 1st image
ax2.axis("off")
plt.show()
# ## Global Average Pooling
# In[24]:
global_avg_pool = keras.layers.GlobalAvgPool2D()
global_avg_pool(cropped_images)
# In[25]:
output_global_avg2 = keras.layers.Lambda(lambda X: tf.reduce_mean(X, axis=[1, 2]))
output_global_avg2(cropped_images)
# # Tackling Fashion MNIST With a CNN
# In[26]:
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]
y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]
X_mean = X_train.mean(axis=0, keepdims=True)
X_std = X_train.std(axis=0, keepdims=True) + 1e-7
X_train = (X_train - X_mean) / X_std
X_valid = (X_valid - X_mean) / X_std
X_test = (X_test - X_mean) / X_std
X_train = X_train[..., np.newaxis]
X_valid = X_valid[..., np.newaxis]
X_test = X_test[..., np.newaxis]
# In[27]:
from functools import partial
DefaultConv2D = partial(keras.layers.Conv2D,
kernel_size=3, activation='relu', padding="SAME")
model = keras.models.Sequential([
DefaultConv2D(filters=64, kernel_size=7, input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
DefaultConv2D(filters=128),
DefaultConv2D(filters=128),
keras.layers.MaxPooling2D(pool_size=2),
DefaultConv2D(filters=256),
DefaultConv2D(filters=256),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=128, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
# In[28]:
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10, validation_data=[X_valid, y_valid])
score = model.evaluate(X_test, y_test)
X_new = X_test[:10] # pretend we have new images
y_pred = model.predict(X_new)
# ## ResNet-34
# In[29]:
DefaultConv2D = partial(keras.layers.Conv2D, kernel_size=3, strides=1,
padding="SAME", use_bias=False)
class ResidualUnit(keras.layers.Layer):
def __init__(self, filters, strides=1, activation="relu", **kwargs):
super().__init__(**kwargs)
self.activation = keras.activations.get(activation)
self.main_layers = [
DefaultConv2D(filters, strides=strides),
keras.layers.BatchNormalization(),
self.activation,
DefaultConv2D(filters),
keras.layers.BatchNormalization()]
self.skip_layers = []
if strides > 1:
self.skip_layers = [
DefaultConv2D(filters, kernel_size=1, strides=strides),
keras.layers.BatchNormalization()]
def call(self, inputs):
Z = inputs
for layer in self.main_layers:
Z = layer(Z)
skip_Z = inputs
for layer in self.skip_layers:
skip_Z = layer(skip_Z)
return self.activation(Z + skip_Z)
# In[30]:
model = keras.models.Sequential()
model.add(DefaultConv2D(64, kernel_size=7, strides=2,
input_shape=[224, 224, 3]))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Activation("relu"))
model.add(keras.layers.MaxPool2D(pool_size=3, strides=2, padding="SAME"))
prev_filters = 64
for filters in [64] * 3 + [128] * 4 + [256] * 6 + [512] * 3:
strides = 1 if filters == prev_filters else 2
model.add(ResidualUnit(filters, strides=strides))
prev_filters = filters
model.add(keras.layers.GlobalAvgPool2D())
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(10, activation="softmax"))
# In[31]:
model.summary()
# ## Using a Pretrained Model
# In[32]:
model = keras.applications.resnet50.ResNet50(weights="imagenet")
# In[33]:
images_resized = tf.image.resize(images, [224, 224])
plot_color_image(images_resized[0])
plt.show()
# In[34]:
images_resized = tf.image.resize_with_pad(images, 224, 224, antialias=True)
plot_color_image(images_resized[0])
# In[35]:
images_resized = tf.image.resize_with_crop_or_pad(images, 224, 224)
plot_color_image(images_resized[0])
plt.show()
# In[36]:
china_box = [0, 0.03, 1, 0.68]
flower_box = [0.19, 0.26, 0.86, 0.7]
images_resized = tf.image.crop_and_resize(images, [china_box, flower_box], [0, 1], [224, 224])
plot_color_image(images_resized[0])
plt.show()
plot_color_image(images_resized[1])
plt.show()
# In[37]:
inputs = keras.applications.resnet50.preprocess_input(images_resized * 255)
Y_proba = model.predict(inputs)
# In[38]:
Y_proba.shape
# In[39]:
top_K = keras.applications.resnet50.decode_predictions(Y_proba, top=3)
for image_index in range(len(images)):
print("Image #{}".format(image_index))
for class_id, name, y_proba in top_K[image_index]:
print(" {} - {:12s} {:.2f}%".format(class_id, name, y_proba * 100))
print()
# ## Pretrained Models for Transfer Learning
# In[40]:
import tensorflow_datasets as tfds
dataset, info = tfds.load("tf_flowers", as_supervised=True, with_info=True)
# In[41]:
info.splits
# In[42]:
info.splits["train"]
# In[43]:
class_names = info.features["label"].names
class_names
# In[44]:
n_classes = info.features["label"].num_classes
# In[45]:
dataset_size = info.splits["train"].num_examples
dataset_size
# In[46]:
test_split, valid_split, train_split = tfds.Split.TRAIN.subsplit([10, 15, 75])
test_set_raw = tfds.load("tf_flowers", split=test_split, as_supervised=True)
valid_set_raw = tfds.load("tf_flowers", split=valid_split, as_supervised=True)
train_set_raw = tfds.load("tf_flowers", split=train_split, as_supervised=True)
# In[47]:
plt.figure(figsize=(12, 10))
index = 0
for image, label in train_set_raw.take(9):
index += 1
plt.subplot(3, 3, index)
plt.imshow(image)
plt.title("Class: {}".format(class_names[label]))
plt.axis("off")
plt.show()
# Basic preprocessing:
# In[48]:
def preprocess(image, label):
resized_image = tf.image.resize(image, [224, 224])
final_image = keras.applications.xception.preprocess_input(resized_image)
return final_image, label
# Slightly fancier preprocessing (but you could add much more data augmentation):
# In[49]:
def central_crop(image):
shape = tf.shape(image)
min_dim = tf.reduce_min([shape[0], shape[1]])
top_crop = (shape[0] - min_dim) // 4
bottom_crop = shape[0] - top_crop
left_crop = (shape[1] - min_dim) // 4
right_crop = shape[1] - left_crop
return image[top_crop:bottom_crop, left_crop:right_crop]
def random_crop(image):
shape = tf.shape(image)
min_dim = tf.reduce_min([shape[0], shape[1]]) * 90 // 100
return tf.image.random_crop(image, [min_dim, min_dim, 3])
def preprocess(image, label, randomize=False):
if randomize:
cropped_image = random_crop(image)
cropped_image = tf.image.random_flip_left_right(cropped_image)
else:
cropped_image = central_crop(image)
resized_image = tf.image.resize(cropped_image, [224, 224])
final_image = keras.applications.xception.preprocess_input(resized_image)
return final_image, label
batch_size = 32
train_set = train_set_raw.shuffle(1000).repeat()
train_set = train_set.map(partial(preprocess, randomize=True)).batch(batch_size).prefetch(1)
valid_set = valid_set_raw.map(preprocess).batch(batch_size).prefetch(1)
test_set = test_set_raw.map(preprocess).batch(batch_size).prefetch(1)
# In[50]:
plt.figure(figsize=(12, 12))
for X_batch, y_batch in train_set.take(1):
for index in range(9):
plt.subplot(3, 3, index + 1)
plt.imshow(X_batch[index] / 2 + 0.5)
plt.title("Class: {}".format(class_names[y_batch[index]]))
plt.axis("off")
plt.show()
# In[51]:
plt.figure(figsize=(12, 12))
for X_batch, y_batch in test_set.take(1):
for index in range(9):
plt.subplot(3, 3, index + 1)
plt.imshow(X_batch[index] / 2 + 0.5)
plt.title("Class: {}".format(class_names[y_batch[index]]))
plt.axis("off")
plt.show()
# In[52]:
base_model = keras.applications.xception.Xception(weights="imagenet",
include_top=False)
avg = keras.layers.GlobalAveragePooling2D()(base_model.output)
output = keras.layers.Dense(n_classes, activation="softmax")(avg)
model = keras.models.Model(inputs=base_model.input, outputs=output)
# In[53]:
for index, layer in enumerate(base_model.layers):
print(index, layer.name)
# In[54]:
for layer in base_model.layers:
layer.trainable = False
optimizer = keras.optimizers.SGD(lr=0.2, momentum=0.9, decay=0.01)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer,
metrics=["accuracy"])
history = model.fit(train_set,
steps_per_epoch=int(0.75 * dataset_size / batch_size),
validation_data=valid_set,
validation_steps=int(0.15 * dataset_size / batch_size),
epochs=5)
# In[55]:
for layer in base_model.layers:
layer.trainable = True
optimizer = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9,
nesterov=True, decay=0.001)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer,
metrics=["accuracy"])
history = model.fit(train_set,
steps_per_epoch=int(0.75 * dataset_size / batch_size),
validation_data=valid_set,
validation_steps=int(0.15 * dataset_size / batch_size),
epochs=40)
# # Classification and Localization
# In[56]:
base_model = keras.applications.xception.Xception(weights="imagenet",
include_top=False)
avg = keras.layers.GlobalAveragePooling2D()(base_model.output)
class_output = keras.layers.Dense(n_classes, activation="softmax")(avg)
loc_output = keras.layers.Dense(4)(avg)
model = keras.models.Model(inputs=base_model.input,
outputs=[class_output, loc_output])
model.compile(loss=["sparse_categorical_crossentropy", "mse"],
loss_weights=[0.8, 0.2], # depends on what you care most about
optimizer=optimizer, metrics=["accuracy"])
# In[57]:
def add_random_bounding_boxes(images, labels):
fake_bboxes = tf.random.uniform([tf.shape(images)[0], 4])
return images, (labels, fake_bboxes)
fake_train_set = train_set.take(5).repeat(2).map(add_random_bounding_boxes)
# In[58]:
model.fit(fake_train_set, steps_per_epoch=5, epochs=2)
# ### Mean Average Precision (mAP)
# In[59]:
def maximum_precisions(precisions):
return np.flip(np.maximum.accumulate(np.flip(precisions)))
# In[60]:
recalls = np.linspace(0, 1, 11)
precisions = [0.91, 0.94, 0.96, 0.94, 0.95, 0.92, 0.80, 0.60, 0.45, 0.20, 0.10]
max_precisions = maximum_precisions(precisions)
mAP = max_precisions.mean()
plt.plot(recalls, precisions, "ro--", label="Precision")
plt.plot(recalls, max_precisions, "bo-", label="Max Precision")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.plot([0, 1], [mAP, mAP], "g:", linewidth=3, label="mAP")
plt.grid(True)
plt.axis([0, 1, 0, 1])
plt.legend(loc="lower center", fontsize=14)
plt.show()
# Transpose convolutions:
# In[61]:
tf.random.set_seed(42)
X = images_resized.numpy()
conv_transpose = keras.layers.Conv2DTranspose(filters=5, kernel_size=3, strides=2, padding="VALID")
output = conv_transpose(X)
output.shape
# In[62]:
def normalize(X):
return (X - tf.reduce_min(X)) / (tf.reduce_max(X) - tf.reduce_min(X))
fig = plt.figure(figsize=(12, 8))
gs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[1, 2])
ax1 = fig.add_subplot(gs[0, 0])
ax1.set_title("Input", fontsize=14)
ax1.imshow(X[0]) # plot the 1st image
ax1.axis("off")
ax2 = fig.add_subplot(gs[0, 1])
ax2.set_title("Output", fontsize=14)
ax2.imshow(normalize(output[0, ..., :3]), interpolation="bicubic") # plot the output for the 1st image
ax2.axis("off")
plt.show()
# In[63]:
def upscale_images(images, stride, kernel_size):
batch_size, height, width, channels = images.shape
upscaled = np.zeros((batch_size,
(height - 1) * stride + 2 * kernel_size - 1,
(width - 1) * stride + 2 * kernel_size - 1,
channels))
upscaled[:,
kernel_size - 1:(height - 1) * stride + kernel_size:stride,
kernel_size - 1:(width - 1) * stride + kernel_size:stride,
:] = images
return upscaled
# In[64]:
upscaled = upscale_images(X, stride=2, kernel_size=3)
weights, biases = conv_transpose.weights
reversed_filters = np.flip(weights.numpy(), axis=[0, 1])
reversed_filters = np.transpose(reversed_filters, [0, 1, 3, 2])
manual_output = tf.nn.conv2d(upscaled, reversed_filters, strides=1, padding="VALID")
# In[65]:
def normalize(X):
return (X - tf.reduce_min(X)) / (tf.reduce_max(X) - tf.reduce_min(X))
fig = plt.figure(figsize=(12, 8))
gs = mpl.gridspec.GridSpec(nrows=1, ncols=3, width_ratios=[1, 2, 2])
ax1 = fig.add_subplot(gs[0, 0])
ax1.set_title("Input", fontsize=14)
ax1.imshow(X[0]) # plot the 1st image
ax1.axis("off")
ax2 = fig.add_subplot(gs[0, 1])
ax2.set_title("Upscaled", fontsize=14)
ax2.imshow(upscaled[0], interpolation="bicubic")
ax2.axis("off")
ax3 = fig.add_subplot(gs[0, 2])
ax3.set_title("Output", fontsize=14)
ax3.imshow(normalize(manual_output[0, ..., :3]), interpolation="bicubic") # plot the output for the 1st image
ax3.axis("off")
plt.show()
# In[66]:
np.allclose(output, manual_output.numpy(), atol=1e-7)
# # Exercises
# ## 1. to 8.
# See appendix A.
# ## 9. High Accuracy CNN for MNIST
# Exercise: Build your own CNN from scratch and try to achieve the highest possible accuracy on MNIST.
# In[ ]:
# In[ ]:
# In[ ]:
# ## 10. Use transfer learning for large image classification
#
# ### 10.1)
# Create a training set containing at least 100 images per class. For example, you could classify your own pictures based on the location (beach, mountain, city, etc.), or alternatively you can just use an existing dataset (e.g., from TensorFlow Datasets).
# In[ ]:
# In[ ]:
# In[ ]:
# ### 10.2)
# Split it into a training set, a validation set and a test set.
# In[ ]:
# In[ ]:
# In[ ]:
# ### 10.3)
# Build the input pipeline, including the appropriate preprocessing operations, and optionally add data augmentation.
# In[ ]:
# In[ ]:
# In[ ]:
# ### 10.4)
# Fine-tune a pretrained model on this dataset.
# In[ ]:
# In[ ]:
# In[ ]:
# ## 11.
# Exercise: Go through TensorFlow's [DeepDream tutorial](https://goo.gl/4b2s6g). It is a fun way to familiarize yourself with various ways of visualizing the patterns learned by a CNN, and to generate art using Deep Learning.
#
# Simply download the notebook and follow its instructions. For extra fun, you can produce a series of images, by repeatedly zooming in and running the DeepDream algorithm: using a tool such as [ffmpeg](https://ffmpeg.org/) you can then create a video from these images. For example, here is a [DeepDream video](https://www.youtube.com/watch?v=l6i_fDg30p0) I made... as you will see, it quickly turns into a nightmare. ;-) You can find hundreds of [similar videos](https://www.youtube.com/results?search_query=+deepdream) (often much more artistic) on the web.
# In[ ]:
| [
"tensorflow.random.set_seed",
"matplotlib.pyplot.title",
"matplotlib.rc",
"tensorflow.image.resize_with_crop_or_pad",
"numpy.random.seed",
"tensorflow_datasets.load",
"tensorflow.keras.applications.resnet50.ResNet50",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.MaxPooling2D",
"tensor... | [((1579, 1597), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1593, 1597), True, 'import numpy as np\n'), ((1598, 1620), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (1616, 1620), True, 'import tensorflow as tf\n'), ((1757, 1785), 'matplotlib.rc', 'mpl.rc', (['"""axes"""'], {'labelsize': '(14)'}), "('axes', labelsize=14)\n", (1763, 1785), True, 'import matplotlib as mpl\n'), ((1786, 1815), 'matplotlib.rc', 'mpl.rc', (['"""xtick"""'], {'labelsize': '(12)'}), "('xtick', labelsize=12)\n", (1792, 1815), True, 'import matplotlib as mpl\n'), ((1816, 1845), 'matplotlib.rc', 'mpl.rc', (['"""ytick"""'], {'labelsize': '(12)'}), "('ytick', labelsize=12)\n", (1822, 1845), True, 'import matplotlib as mpl\n'), ((1931, 1983), 'os.path.join', 'os.path.join', (['PROJECT_ROOT_DIR', '"""images"""', 'CHAPTER_ID'], {}), "(PROJECT_ROOT_DIR, 'images', CHAPTER_ID)\n", (1943, 1983), False, 'import os\n'), ((1984, 2023), 'os.makedirs', 'os.makedirs', (['IMAGES_PATH'], {'exist_ok': '(True)'}), '(IMAGES_PATH, exist_ok=True)\n', (1995, 2023), False, 'import os\n'), ((2820, 2845), 'numpy.array', 'np.array', (['[china, flower]'], {}), '([china, flower])\n', (2828, 2845), True, 'import numpy as np\n'), ((2927, 2980), 'numpy.zeros', 'np.zeros', ([], {'shape': '(7, 7, channels, 2)', 'dtype': 'np.float32'}), '(shape=(7, 7, channels, 2), dtype=np.float32)\n', (2935, 2980), True, 'import numpy as np\n'), ((3076, 3132), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['images', 'filters'], {'strides': '(1)', 'padding': '"""SAME"""'}), "(images, filters, strides=1, padding='SAME')\n", (3088, 3132), True, 'import tensorflow as tf\n'), ((3134, 3178), 'matplotlib.pyplot.imshow', 'plt.imshow', (['outputs[0, :, :, 1]'], {'cmap': '"""gray"""'}), "(outputs[0, :, :, 1], cmap='gray')\n", (3144, 3178), True, 'import matplotlib.pyplot as plt\n'), ((3214, 3229), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3222, 3229), True, 'import matplotlib.pyplot as plt\n'), ((3254, 3264), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3262, 3264), True, 'import matplotlib.pyplot as plt\n'), ((3476, 3486), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3484, 3486), True, 'import matplotlib.pyplot as plt\n'), ((3651, 3661), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3659, 3661), True, 'import matplotlib.pyplot as plt\n'), ((3910, 3920), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3918, 3920), True, 'import matplotlib.pyplot as plt\n'), ((3953, 3963), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3961, 3963), True, 'import matplotlib.pyplot as plt\n'), ((4044, 4140), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""SAME"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=3, strides=1, padding='SAME',\n activation='relu')\n", (4063, 4140), False, 'from tensorflow import keras\n'), ((4215, 4225), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4223, 4225), True, 'import matplotlib.pyplot as plt\n'), ((5626, 5719), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(1)', 'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': '"""VALID"""'}), "(filters=1, kernel_size=kernel_size, strides=strides,\n padding='VALID')\n", (5645, 5719), False, 'from tensorflow import keras\n'), ((5728, 5820), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(1)', 'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': '"""SAME"""'}), "(filters=1, kernel_size=kernel_size, strides=strides,\n padding='SAME')\n", (5747, 5820), False, 'from tensorflow import keras\n'), ((6274, 6309), 'tensorflow.keras.layers.MaxPool2D', 'keras.layers.MaxPool2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (6296, 6309), False, 'from tensorflow import keras\n'), ((6439, 6466), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (6449, 6466), True, 'import matplotlib.pyplot as plt\n'), ((6472, 6532), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', ([], {'nrows': '(1)', 'ncols': '(2)', 'width_ratios': '[2, 1]'}), '(nrows=1, ncols=2, width_ratios=[2, 1])\n', (6493, 6532), True, 'import matplotlib as mpl\n'), ((6844, 6854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6852, 6854), True, 'import matplotlib.pyplot as plt\n'), ((7918, 7945), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (7928, 7945), True, 'import matplotlib.pyplot as plt\n'), ((7946, 7966), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (7957, 7966), True, 'import matplotlib.pyplot as plt\n'), ((7967, 7998), 'matplotlib.pyplot.title', 'plt.title', (['"""Input"""'], {'fontsize': '(14)'}), "('Input', fontsize=14)\n", (7976, 7998), True, 'import matplotlib.pyplot as plt\n'), ((8057, 8077), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (8068, 8077), True, 'import matplotlib.pyplot as plt\n'), ((8078, 8110), 'matplotlib.pyplot.title', 'plt.title', (['"""Output"""'], {'fontsize': '(14)'}), "('Output', fontsize=14)\n", (8087, 8110), True, 'import matplotlib.pyplot as plt\n'), ((8184, 8199), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8192, 8199), True, 'import matplotlib.pyplot as plt\n'), ((8200, 8210), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8208, 8210), True, 'import matplotlib.pyplot as plt\n'), ((8258, 8293), 'tensorflow.keras.layers.AvgPool2D', 'keras.layers.AvgPool2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (8280, 8293), False, 'from tensorflow import keras\n'), ((8366, 8393), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (8376, 8393), True, 'import matplotlib.pyplot as plt\n'), ((8399, 8459), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', ([], {'nrows': '(1)', 'ncols': '(2)', 'width_ratios': '[2, 1]'}), '(nrows=1, ncols=2, width_ratios=[2, 1])\n', (8420, 8459), True, 'import matplotlib as mpl\n'), ((8745, 8755), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8753, 8755), True, 'import matplotlib.pyplot as plt\n'), ((8817, 8847), 'tensorflow.keras.layers.GlobalAvgPool2D', 'keras.layers.GlobalAvgPool2D', ([], {}), '()\n', (8845, 8847), False, 'from tensorflow import keras\n'), ((9114, 9154), 'tensorflow.keras.datasets.fashion_mnist.load_data', 'keras.datasets.fashion_mnist.load_data', ([], {}), '()\n', (9152, 9154), False, 'from tensorflow import keras\n'), ((9649, 9727), 'functools.partial', 'partial', (['keras.layers.Conv2D'], {'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""SAME"""'}), "(keras.layers.Conv2D, kernel_size=3, activation='relu', padding='SAME')\n", (9656, 9727), False, 'from functools import partial\n'), ((10732, 10822), 'functools.partial', 'partial', (['keras.layers.Conv2D'], {'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""SAME"""', 'use_bias': '(False)'}), "(keras.layers.Conv2D, kernel_size=3, strides=1, padding='SAME',\n use_bias=False)\n", (10739, 10822), False, 'from functools import partial\n'), ((11778, 11803), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (11801, 11803), False, 'from tensorflow import keras\n'), ((12497, 12553), 'tensorflow.keras.applications.resnet50.ResNet50', 'keras.applications.resnet50.ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (12533, 12553), False, 'from tensorflow import keras\n'), ((12585, 12620), 'tensorflow.image.resize', 'tf.image.resize', (['images', '[224, 224]'], {}), '(images, [224, 224])\n', (12600, 12620), True, 'import tensorflow as tf\n'), ((12657, 12667), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12665, 12667), True, 'import matplotlib.pyplot as plt\n'), ((12699, 12757), 'tensorflow.image.resize_with_pad', 'tf.image.resize_with_pad', (['images', '(224)', '(224)'], {'antialias': '(True)'}), '(images, 224, 224, antialias=True)\n', (12723, 12757), True, 'import tensorflow as tf\n'), ((12825, 12875), 'tensorflow.image.resize_with_crop_or_pad', 'tf.image.resize_with_crop_or_pad', (['images', '(224)', '(224)'], {}), '(images, 224, 224)\n', (12857, 12875), True, 'import tensorflow as tf\n'), ((12912, 12922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12920, 12922), True, 'import matplotlib.pyplot as plt\n'), ((13022, 13099), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (['images', '[china_box, flower_box]', '[0, 1]', '[224, 224]'], {}), '(images, [china_box, flower_box], [0, 1], [224, 224])\n', (13046, 13099), True, 'import tensorflow as tf\n'), ((13136, 13146), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13144, 13146), True, 'import matplotlib.pyplot as plt\n'), ((13183, 13193), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13191, 13193), True, 'import matplotlib.pyplot as plt\n'), ((13217, 13283), 'tensorflow.keras.applications.resnet50.preprocess_input', 'keras.applications.resnet50.preprocess_input', (['(images_resized * 255)'], {}), '(images_resized * 255)\n', (13261, 13283), False, 'from tensorflow import keras\n'), ((13366, 13428), 'tensorflow.keras.applications.resnet50.decode_predictions', 'keras.applications.resnet50.decode_predictions', (['Y_proba'], {'top': '(3)'}), '(Y_proba, top=3)\n', (13412, 13428), False, 'from tensorflow import keras\n'), ((13767, 13826), 'tensorflow_datasets.load', 'tfds.load', (['"""tf_flowers"""'], {'as_supervised': '(True)', 'with_info': '(True)'}), "('tf_flowers', as_supervised=True, with_info=True)\n", (13776, 13826), True, 'import tensorflow_datasets as tfds\n'), ((14147, 14186), 'tensorflow_datasets.Split.TRAIN.subsplit', 'tfds.Split.TRAIN.subsplit', (['[10, 15, 75]'], {}), '([10, 15, 75])\n', (14172, 14186), True, 'import tensorflow_datasets as tfds\n'), ((14203, 14264), 'tensorflow_datasets.load', 'tfds.load', (['"""tf_flowers"""'], {'split': 'test_split', 'as_supervised': '(True)'}), "('tf_flowers', split=test_split, as_supervised=True)\n", (14212, 14264), True, 'import tensorflow_datasets as tfds\n'), ((14281, 14343), 'tensorflow_datasets.load', 'tfds.load', (['"""tf_flowers"""'], {'split': 'valid_split', 'as_supervised': '(True)'}), "('tf_flowers', split=valid_split, as_supervised=True)\n", (14290, 14343), True, 'import tensorflow_datasets as tfds\n'), ((14360, 14422), 'tensorflow_datasets.load', 'tfds.load', (['"""tf_flowers"""'], {'split': 'train_split', 'as_supervised': '(True)'}), "('tf_flowers', split=train_split, as_supervised=True)\n", (14369, 14422), True, 'import tensorflow_datasets as tfds\n'), ((14437, 14465), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (14447, 14465), True, 'import matplotlib.pyplot as plt\n'), ((14660, 14670), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14668, 14670), True, 'import matplotlib.pyplot as plt\n'), ((16219, 16247), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (16229, 16247), True, 'import matplotlib.pyplot as plt\n'), ((16492, 16502), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16500, 16502), True, 'import matplotlib.pyplot as plt\n'), ((16517, 16545), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (16527, 16545), True, 'import matplotlib.pyplot as plt\n'), ((16789, 16799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16797, 16799), True, 'import matplotlib.pyplot as plt\n'), ((16827, 16902), 'tensorflow.keras.applications.xception.Xception', 'keras.applications.xception.Xception', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (16863, 16902), False, 'from tensorflow import keras\n'), ((17090, 17149), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'base_model.input', 'outputs': 'output'}), '(inputs=base_model.input, outputs=output)\n', (17108, 17149), False, 'from tensorflow import keras\n'), ((17330, 17384), 'tensorflow.keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'lr': '(0.2)', 'momentum': '(0.9)', 'decay': '(0.01)'}), '(lr=0.2, momentum=0.9, decay=0.01)\n', (17350, 17384), False, 'from tensorflow import keras\n'), ((17841, 17928), 'tensorflow.keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'learning_rate': '(0.01)', 'momentum': '(0.9)', 'nesterov': '(True)', 'decay': '(0.001)'}), '(learning_rate=0.01, momentum=0.9, nesterov=True, decay\n =0.001)\n', (17861, 17928), False, 'from tensorflow import keras\n'), ((18392, 18467), 'tensorflow.keras.applications.xception.Xception', 'keras.applications.xception.Xception', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (18428, 18467), False, 'from tensorflow import keras\n'), ((18701, 18780), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'base_model.input', 'outputs': '[class_output, loc_output]'}), '(inputs=base_model.input, outputs=[class_output, loc_output])\n', (18719, 18780), False, 'from tensorflow import keras\n'), ((19488, 19509), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (19499, 19509), True, 'import numpy as np\n'), ((19667, 19723), 'matplotlib.pyplot.plot', 'plt.plot', (['recalls', 'precisions', '"""ro--"""'], {'label': '"""Precision"""'}), "(recalls, precisions, 'ro--', label='Precision')\n", (19675, 19723), True, 'import matplotlib.pyplot as plt\n'), ((19724, 19787), 'matplotlib.pyplot.plot', 'plt.plot', (['recalls', 'max_precisions', '"""bo-"""'], {'label': '"""Max Precision"""'}), "(recalls, max_precisions, 'bo-', label='Max Precision')\n", (19732, 19787), True, 'import matplotlib.pyplot as plt\n'), ((19788, 19808), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (19798, 19808), True, 'import matplotlib.pyplot as plt\n'), ((19809, 19832), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (19819, 19832), True, 'import matplotlib.pyplot as plt\n'), ((19833, 19893), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[mAP, mAP]', '"""g:"""'], {'linewidth': '(3)', 'label': '"""mAP"""'}), "([0, 1], [mAP, mAP], 'g:', linewidth=3, label='mAP')\n", (19841, 19893), True, 'import matplotlib.pyplot as plt\n'), ((19894, 19908), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (19902, 19908), True, 'import matplotlib.pyplot as plt\n'), ((19909, 19931), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (19917, 19931), True, 'import matplotlib.pyplot as plt\n'), ((19932, 19975), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower center"""', 'fontsize': '(14)'}), "(loc='lower center', fontsize=14)\n", (19942, 19975), True, 'import matplotlib.pyplot as plt\n'), ((19976, 19986), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19984, 19986), True, 'import matplotlib.pyplot as plt\n'), ((20028, 20050), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (20046, 20050), True, 'import tensorflow as tf\n'), ((20096, 20183), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', ([], {'filters': '(5)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""VALID"""'}), "(filters=5, kernel_size=3, strides=2, padding=\n 'VALID')\n", (20124, 20183), False, 'from tensorflow import keras\n'), ((20332, 20359), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (20342, 20359), True, 'import matplotlib.pyplot as plt\n'), ((20365, 20425), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', ([], {'nrows': '(1)', 'ncols': '(2)', 'width_ratios': '[1, 2]'}), '(nrows=1, ncols=2, width_ratios=[1, 2])\n', (20386, 20425), True, 'import matplotlib as mpl\n'), ((20739, 20749), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20747, 20749), True, 'import matplotlib.pyplot as plt\n'), ((21471, 21515), 'numpy.transpose', 'np.transpose', (['reversed_filters', '[0, 1, 3, 2]'], {}), '(reversed_filters, [0, 1, 3, 2])\n', (21483, 21515), True, 'import numpy as np\n'), ((21532, 21600), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['upscaled', 'reversed_filters'], {'strides': '(1)', 'padding': '"""VALID"""'}), "(upscaled, reversed_filters, strides=1, padding='VALID')\n", (21544, 21600), True, 'import tensorflow as tf\n'), ((21714, 21741), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (21724, 21741), True, 'import matplotlib.pyplot as plt\n'), ((21747, 21810), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', ([], {'nrows': '(1)', 'ncols': '(3)', 'width_ratios': '[1, 2, 2]'}), '(nrows=1, ncols=3, width_ratios=[1, 2, 2])\n', (21768, 21810), True, 'import matplotlib as mpl\n'), ((22267, 22277), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22275, 22277), True, 'import matplotlib.pyplot as plt\n'), ((1276, 1302), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (1300, 1302), True, 'import tensorflow as tf\n'), ((2114, 2169), 'os.path.join', 'os.path.join', (['IMAGES_PATH', "(fig_id + '.' + fig_extension)"], {}), "(IMAGES_PATH, fig_id + '.' + fig_extension)\n", (2126, 2169), False, 'import os\n'), ((2257, 2312), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'format': 'fig_extension', 'dpi': 'resolution'}), '(path, format=fig_extension, dpi=resolution)\n', (2268, 2312), True, 'import matplotlib.pyplot as plt\n'), ((2417, 2472), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""', 'interpolation': '"""nearest"""'}), "(image, cmap='gray', interpolation='nearest')\n", (2427, 2472), True, 'import matplotlib.pyplot as plt\n'), ((2477, 2492), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2485, 2492), True, 'import matplotlib.pyplot as plt\n'), ((2527, 2569), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'interpolation': '"""nearest"""'}), "(image, interpolation='nearest')\n", (2537, 2569), True, 'import matplotlib.pyplot as plt\n'), ((2574, 2589), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2582, 2589), True, 'import matplotlib.pyplot as plt\n'), ((2727, 2757), 'sklearn.datasets.load_sample_image', 'load_sample_image', (['"""china.jpg"""'], {}), "('china.jpg')\n", (2744, 2757), False, 'from sklearn.datasets import load_sample_image\n'), ((2773, 2804), 'sklearn.datasets.load_sample_image', 'load_sample_image', (['"""flower.jpg"""'], {}), "('flower.jpg')\n", (2790, 2804), False, 'from sklearn.datasets import load_sample_image\n'), ((3854, 3864), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3862, 3864), True, 'import matplotlib.pyplot as plt\n'), ((5245, 5285), 'numpy.zeros', 'np.zeros', (['padded_shape'], {'dtype': 'np.float32'}), '(padded_shape, dtype=np.float32)\n', (5253, 5285), True, 'import numpy as np\n'), ((5970, 5998), 'tensorflow.TensorShape', 'tf.TensorShape', (['images.shape'], {}), '(images.shape)\n', (5984, 5998), True, 'import tensorflow as tf\n'), ((7496, 7515), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (7505, 7515), True, 'import tensorflow as tf\n'), ((7789, 7808), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (7798, 7808), True, 'import tensorflow as tf\n'), ((11920, 11953), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (11951, 11953), False, 'from tensorflow import keras\n'), ((11965, 11996), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (11988, 11996), False, 'from tensorflow import keras\n'), ((12008, 12070), 'tensorflow.keras.layers.MaxPool2D', 'keras.layers.MaxPool2D', ([], {'pool_size': '(3)', 'strides': '(2)', 'padding': '"""SAME"""'}), "(pool_size=3, strides=2, padding='SAME')\n", (12030, 12070), False, 'from tensorflow import keras\n'), ((12292, 12322), 'tensorflow.keras.layers.GlobalAvgPool2D', 'keras.layers.GlobalAvgPool2D', ([], {}), '()\n', (12320, 12322), False, 'from tensorflow import keras\n'), ((12334, 12356), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (12354, 12356), False, 'from tensorflow import keras\n'), ((12368, 12412), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (12386, 12412), False, 'from tensorflow import keras\n'), ((14538, 14562), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', 'index'], {}), '(3, 3, index)\n', (14549, 14562), True, 'import matplotlib.pyplot as plt\n'), ((14567, 14584), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (14577, 14584), True, 'import matplotlib.pyplot as plt\n'), ((14643, 14658), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (14651, 14658), True, 'import matplotlib.pyplot as plt\n'), ((14759, 14793), 'tensorflow.image.resize', 'tf.image.resize', (['image', '[224, 224]'], {}), '(image, [224, 224])\n', (14774, 14793), True, 'import tensorflow as tf\n'), ((14812, 14871), 'tensorflow.keras.applications.xception.preprocess_input', 'keras.applications.xception.preprocess_input', (['resized_image'], {}), '(resized_image)\n', (14856, 14871), False, 'from tensorflow import keras\n'), ((15036, 15051), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (15044, 15051), True, 'import tensorflow as tf\n'), ((15066, 15101), 'tensorflow.reduce_min', 'tf.reduce_min', (['[shape[0], shape[1]]'], {}), '([shape[0], shape[1]])\n', (15079, 15101), True, 'import tensorflow as tf\n'), ((15359, 15374), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (15367, 15374), True, 'import tensorflow as tf\n'), ((15448, 15498), 'tensorflow.image.random_crop', 'tf.image.random_crop', (['image', '[min_dim, min_dim, 3]'], {}), '(image, [min_dim, min_dim, 3])\n', (15468, 15498), True, 'import tensorflow as tf\n'), ((15753, 15795), 'tensorflow.image.resize', 'tf.image.resize', (['cropped_image', '[224, 224]'], {}), '(cropped_image, [224, 224])\n', (15768, 15795), True, 'import tensorflow as tf\n'), ((15814, 15873), 'tensorflow.keras.applications.xception.preprocess_input', 'keras.applications.xception.preprocess_input', (['resized_image'], {}), '(resized_image)\n', (15858, 15873), False, 'from tensorflow import keras\n'), ((16959, 16996), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (16994, 16996), False, 'from tensorflow import keras\n'), ((17025, 17076), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['n_classes'], {'activation': '"""softmax"""'}), "(n_classes, activation='softmax')\n", (17043, 17076), False, 'from tensorflow import keras\n'), ((18524, 18561), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (18559, 18561), False, 'from tensorflow import keras\n'), ((18596, 18647), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['n_classes'], {'activation': '"""softmax"""'}), "(n_classes, activation='softmax')\n", (18614, 18647), False, 'from tensorflow import keras\n'), ((18666, 18687), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(4)'], {}), '(4)\n', (18684, 18687), False, 'from tensorflow import keras\n'), ((20883, 21008), 'numpy.zeros', 'np.zeros', (['(batch_size, (height - 1) * stride + 2 * kernel_size - 1, (width - 1) *\n stride + 2 * kernel_size - 1, channels)'], {}), '((batch_size, (height - 1) * stride + 2 * kernel_size - 1, (width -\n 1) * stride + 2 * kernel_size - 1, channels))\n', (20891, 21008), True, 'import numpy as np\n'), ((2234, 2252), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2250, 2252), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3408), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(image_index * 2 + feature_map_index + 1)'], {}), '(2, 2, image_index * 2 + feature_map_index + 1)\n', (3361, 3408), True, 'import matplotlib.pyplot as plt\n'), ((7245, 7361), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['inputs'], {'ksize': '(1, 1, 1, self.pool_size)', 'strides': '(1, 1, 1, self.pool_size)', 'padding': 'self.padding'}), '(inputs, ksize=(1, 1, 1, self.pool_size), strides=(1, 1, 1,\n self.pool_size), padding=self.padding)\n', (7259, 7361), True, 'import tensorflow as tf\n'), ((7701, 7777), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['X'], {'ksize': '(1, 1, 1, 3)', 'strides': '(1, 1, 1, 3)', 'padding': '"""VALID"""'}), "(X, ksize=(1, 1, 1, 3), strides=(1, 1, 1, 3), padding='VALID')\n", (7715, 7777), True, 'import tensorflow as tf\n'), ((8945, 8975), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['X'], {'axis': '[1, 2]'}), '(X, axis=[1, 2])\n', (8959, 8975), True, 'import tensorflow as tf\n'), ((9862, 9900), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (9887, 9900), False, 'from tensorflow import keras\n'), ((9970, 10008), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (9995, 10008), False, 'from tensorflow import keras\n'), ((10078, 10116), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (10103, 10116), False, 'from tensorflow import keras\n'), ((10122, 10144), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (10142, 10144), False, 'from tensorflow import keras\n'), ((10150, 10198), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(128)', 'activation': '"""relu"""'}), "(units=128, activation='relu')\n", (10168, 10198), False, 'from tensorflow import keras\n'), ((10204, 10229), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (10224, 10229), False, 'from tensorflow import keras\n'), ((10235, 10282), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(64)', 'activation': '"""relu"""'}), "(units=64, activation='relu')\n", (10253, 10282), False, 'from tensorflow import keras\n'), ((10288, 10313), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (10308, 10313), False, 'from tensorflow import keras\n'), ((10319, 10369), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(10)', 'activation': '"""softmax"""'}), "(units=10, activation='softmax')\n", (10337, 10369), False, 'from tensorflow import keras\n'), ((11018, 11051), 'tensorflow.keras.activations.get', 'keras.activations.get', (['activation'], {}), '(activation)\n', (11039, 11051), False, 'from tensorflow import keras\n'), ((15632, 15678), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['cropped_image'], {}), '(cropped_image)\n', (15663, 15678), True, 'import tensorflow as tf\n'), ((16326, 16354), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(index + 1)'], {}), '(3, 3, index + 1)\n', (16337, 16354), True, 'import matplotlib.pyplot as plt\n'), ((16363, 16399), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(X_batch[index] / 2 + 0.5)'], {}), '(X_batch[index] / 2 + 0.5)\n', (16373, 16399), True, 'import matplotlib.pyplot as plt\n'), ((16475, 16490), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (16483, 16490), True, 'import matplotlib.pyplot as plt\n'), ((16623, 16651), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(index + 1)'], {}), '(3, 3, index + 1)\n', (16634, 16651), True, 'import matplotlib.pyplot as plt\n'), ((16660, 16696), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(X_batch[index] / 2 + 0.5)'], {}), '(X_batch[index] / 2 + 0.5)\n', (16670, 16696), True, 'import matplotlib.pyplot as plt\n'), ((16772, 16787), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (16780, 16787), True, 'import matplotlib.pyplot as plt\n'), ((11146, 11179), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (11177, 11179), False, 'from tensorflow import keras\n'), ((11258, 11291), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (11289, 11291), False, 'from tensorflow import keras\n'), ((15389, 15424), 'tensorflow.reduce_min', 'tf.reduce_min', (['[shape[0], shape[1]]'], {}), '([shape[0], shape[1]])\n', (15402, 15424), True, 'import tensorflow as tf\n'), ((19442, 19461), 'numpy.flip', 'np.flip', (['precisions'], {}), '(precisions)\n', (19449, 19461), True, 'import numpy as np\n'), ((20267, 20283), 'tensorflow.reduce_min', 'tf.reduce_min', (['X'], {}), '(X)\n', (20280, 20283), True, 'import tensorflow as tf\n'), ((20288, 20304), 'tensorflow.reduce_max', 'tf.reduce_max', (['X'], {}), '(X)\n', (20301, 20304), True, 'import tensorflow as tf\n'), ((20307, 20323), 'tensorflow.reduce_min', 'tf.reduce_min', (['X'], {}), '(X)\n', (20320, 20323), True, 'import tensorflow as tf\n'), ((21649, 21665), 'tensorflow.reduce_min', 'tf.reduce_min', (['X'], {}), '(X)\n', (21662, 21665), True, 'import tensorflow as tf\n'), ((21670, 21686), 'tensorflow.reduce_max', 'tf.reduce_max', (['X'], {}), '(X)\n', (21683, 21686), True, 'import tensorflow as tf\n'), ((21689, 21705), 'tensorflow.reduce_min', 'tf.reduce_min', (['X'], {}), '(X)\n', (21702, 21705), True, 'import tensorflow as tf\n'), ((11468, 11501), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (11499, 11501), False, 'from tensorflow import keras\n'), ((19103, 19119), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (19111, 19119), True, 'import tensorflow as tf\n'), ((15996, 16031), 'functools.partial', 'partial', (['preprocess'], {'randomize': '(True)'}), '(preprocess, randomize=True)\n', (16003, 16031), False, 'from functools import partial\n')] |
import os
from typing import Dict, List, Tuple
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__)+'/'+'..'))
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from common.PER import PrioritizedReplayBuffer
from common.experience_replay import ReplayBuffer
from common.network import LinearNetwork
from common.network import LinearDuelingNetwork
from common.network import NoisyLinearNetwork
from common.network import NoisyLinearDuelingNetwork
from common.arguments import get_args
from tqdm import tqdm
config = get_args()
class DQNAgent(object):
def __init__(
self,
env: gym.Env,
config,
double: bool = False,
PER: bool = False,
dueling: bool = False,
noisy: bool = False,
opt: str = 'Adam',
):
"""Initialization"""
self.obs_dim = env.observation_space.shape[0]
self.action_dim = env.action_space.n
self.env = env
self.batch_size = config.batch_size
self.epsilon = config.max_epsilon
self.epsilon_decay = config.epsilon_decay
self.max_epsilon = config.max_epsilon
self.min_epsilon = config.min_epsilon
self.target_update = config.target_update
self.gamma = config.gamma
'''Components parameters'''
self.PER = PER
self.double = double
self.dueling = dueling
self.noisy = noisy
# PER
# In DQN, We used "ReplayBuffer(obs_dim, memory_size, batch_size)"
self.beta = config.beta
self.prior_eps = config.prior_eps
if not self.PER:
self.memory = ReplayBuffer(self.obs_dim, config.memory_size, config.batch_size)
else:
self.memory = PrioritizedReplayBuffer(self.obs_dim, config.memory_size, config.batch_size, config.alpha)
# device: cpu / gpu
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
print(self.device)
if self.double:
print("Double")
if self.PER:
print("PER")
print("DQN")
# networks: dqn, dqn_target
if not self.dueling and not self.noisy:
self.dqn = LinearNetwork(self.obs_dim, self.action_dim).to(self.device)
self.dqn_target = LinearNetwork(self.obs_dim, self.action_dim).to(self.device)
elif self.dueling and not self.noisy:
print("Dueling")
self.dqn = LinearDuelingNetwork(self.obs_dim, self.action_dim).to(self.device)
self.dqn_target = LinearDuelingNetwork(self.obs_dim, self.action_dim).to(self.device)
elif not self.dueling and self.noisy:
print("Noisy")
self.dqn = NoisyLinearNetwork(self.obs_dim, self.action_dim).to(self.device)
self.dqn_target = NoisyLinearNetwork(self.obs_dim, self.action_dim).to(self.device)
elif self.dueling and self.noisy:
print("Dueling")
print("Noisy")
self.dqn = NoisyLinearDuelingNetwork(self.obs_dim, self.action_dim).to(self.device)
self.dqn_target = NoisyLinearDuelingNetwork(self.obs_dim, self.action_dim).to(self.device)
self.dqn_target.load_state_dict(self.dqn.state_dict())
# dqn_target not for training, without change in dropout and BN
self.dqn_target.eval()
# optimizer
if opt == 'RMSprop':
self.optimizer = optim.RMSprop(self.dqn.parameters(), lr=2e-4, momentum=5e-2)
elif opt == 'Adam':
self.optimizer = optim.Adam(self.dqn.parameters())
# transition to store in memory
# transition (list): transition information including state, action, reward, next_state, done
self.transition = list()
# mode: train / test
self.is_test = False
def select_action(self, state: np.ndarray) -> np.ndarray:
"""Select an action from the input state."""
# epsilon greedy policy
if self.epsilon > np.random.random() and not self.noisy:
selected_action = self.env.action_space.sample()
else:
selected_action = self.dqn(torch.FloatTensor(state).to(self.device)).argmax()
# detach data from tensor
selected_action = selected_action.detach().cpu().numpy()
if not self.is_test:
self.transition = [state, selected_action]
return selected_action
def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:
"""Take an action and return the response of the env."""
next_state, reward, done, _ = self.env.step(action)
if not self.is_test:
self.transition += [reward, next_state, done]
# *list for unwarpping parameters
# store the transition into replay memory
self.memory.store(*self.transition)
return next_state, reward, done
def update_model(self) -> torch.Tensor:
"""Update the model by gradient descent."""
# PER needs beta to calculate weights
if not self.PER:
samples = self.memory.sample_batch()
loss = self._compute_dqn_loss(samples)
else:
samples = self.memory.sample_batch(self.beta)
weights = torch.FloatTensor(
samples["weights"].reshape(-1, 1)
).to(self.device)
indices = samples["indices"]
# PER: importance sampling before average
elementwise_loss = self._compute_dqn_loss(samples)
loss = torch.mean(elementwise_loss * weights)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# PER: update priorities
if self.PER:
loss_for_prior = elementwise_loss.detach().cpu().numpy()
new_priorities = loss_for_prior + self.prior_eps
self.memory.update_priorities(indices, new_priorities)
# NoisyNet: reset noise
if self.noisy:
self.dqn.reset_noise()
self.dqn_target.reset_noise()
return loss.item()
def _compute_dqn_loss(self, samples: Dict[str, np.ndarray]) -> torch.Tensor:
"""Return dqn loss."""
device = self.device # for shortening the following lines
state = torch.FloatTensor(samples["obs"]).to(device)
next_state = torch.FloatTensor(samples["next_obs"]).to(device)
action = torch.LongTensor(samples["acts"].reshape(-1, 1)).to(device)
reward = torch.FloatTensor(samples["rews"].reshape(-1, 1)).to(device)
done = torch.FloatTensor(samples["done"].reshape(-1, 1)).to(device)
curr_q_value = self.dqn(state).gather(1, action)
if not self.double:
# G_t = r + gamma * v(s_{t+1}) if state != Terminal
# = r otherwise
next_q_value = self.dqn_target(next_state).max(dim=1, keepdim=True)[0].detach()
else:
# Double DQN
next_q_value = self.dqn_target(next_state).gather(1, self.dqn(next_state).argmax(dim=1, keepdim=True)).detach()
mask = 1 - done
target = (reward + self.gamma * next_q_value * mask).to(self.device)
if not self.PER:
# calculate dqn loss
loss = F.smooth_l1_loss(curr_q_value, target)
return loss
else:
# calculate element-wise dqn loss
elementwise_loss = F.smooth_l1_loss(curr_q_value, target, reduction="none")
return elementwise_loss
def _target_hard_update(self):
"""Hard update: target <- local."""
self.dqn_target.load_state_dict(self.dqn.state_dict())
def train(self, config):
"""Train the agent."""
self.is_test = False
print(self)
state = self.env.reset()
update_cnt = 0
epsilons = []
losses = []
scores = []
score = 0
for frame_idx in tqdm(range(1, config.num_frames + 1)):
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
# PER: increase beta
if self.PER:
fraction = min(frame_idx / config.num_frames, 1.0)
self.beta = self.beta + fraction * (1.0 - self.beta)
# if episode ends
if done:
state = self.env.reset()
scores.append(score)
score = 0
# if training is ready
if len(self.memory) >= self.batch_size:
loss = self.update_model()
losses.append(loss)
update_cnt += 1
self.epsilon = max(
self.min_epsilon, self.epsilon - (
self.max_epsilon - self.min_epsilon
) * self.epsilon_decay
)
epsilons.append(self.epsilon)
# if hard update is needed
if update_cnt % self.target_update == 0:
self._target_hard_update()
self.env.close()
return frame_idx, scores, losses, epsilons
def test(self) -> None:
"""Test the agent."""
self.is_test = True
state = self.env.reset()
done = False
score = 0
while not done:
self.env.render()
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
print("score: ", score)
self.env.close() | [
"common.arguments.get_args",
"torch.mean",
"common.network.NoisyLinearNetwork",
"os.path.dirname",
"torch.FloatTensor",
"common.network.LinearNetwork",
"common.network.NoisyLinearDuelingNetwork",
"common.PER.PrioritizedReplayBuffer",
"numpy.random.random",
"torch.cuda.is_available",
"common.expe... | [((609, 619), 'common.arguments.get_args', 'get_args', ([], {}), '()\n', (617, 619), False, 'from common.arguments import get_args\n'), ((1701, 1766), 'common.experience_replay.ReplayBuffer', 'ReplayBuffer', (['self.obs_dim', 'config.memory_size', 'config.batch_size'], {}), '(self.obs_dim, config.memory_size, config.batch_size)\n', (1713, 1766), False, 'from common.experience_replay import ReplayBuffer\n'), ((1807, 1901), 'common.PER.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['self.obs_dim', 'config.memory_size', 'config.batch_size', 'config.alpha'], {}), '(self.obs_dim, config.memory_size, config.batch_size,\n config.alpha)\n', (1830, 1901), False, 'from common.PER import PrioritizedReplayBuffer\n'), ((5655, 5693), 'torch.mean', 'torch.mean', (['(elementwise_loss * weights)'], {}), '(elementwise_loss * weights)\n', (5665, 5693), False, 'import torch\n'), ((7404, 7442), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['curr_q_value', 'target'], {}), '(curr_q_value, target)\n', (7420, 7442), True, 'import torch.nn.functional as F\n'), ((7558, 7614), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['curr_q_value', 'target'], {'reduction': '"""none"""'}), "(curr_q_value, target, reduction='none')\n", (7574, 7614), True, 'import torch.nn.functional as F\n'), ((90, 115), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'import os\n'), ((1985, 2010), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2008, 2010), False, 'import torch\n'), ((4086, 4104), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4102, 4104), True, 'import numpy as np\n'), ((6401, 6434), 'torch.FloatTensor', 'torch.FloatTensor', (["samples['obs']"], {}), "(samples['obs'])\n", (6418, 6434), False, 'import torch\n'), ((6467, 6505), 'torch.FloatTensor', 'torch.FloatTensor', (["samples['next_obs']"], {}), "(samples['next_obs'])\n", (6484, 6505), False, 'import torch\n'), ((2288, 2332), 'common.network.LinearNetwork', 'LinearNetwork', (['self.obs_dim', 'self.action_dim'], {}), '(self.obs_dim, self.action_dim)\n', (2301, 2332), False, 'from common.network import LinearNetwork\n'), ((2379, 2423), 'common.network.LinearNetwork', 'LinearNetwork', (['self.obs_dim', 'self.action_dim'], {}), '(self.obs_dim, self.action_dim)\n', (2392, 2423), False, 'from common.network import LinearNetwork\n'), ((2539, 2590), 'common.network.LinearDuelingNetwork', 'LinearDuelingNetwork', (['self.obs_dim', 'self.action_dim'], {}), '(self.obs_dim, self.action_dim)\n', (2559, 2590), False, 'from common.network import LinearDuelingNetwork\n'), ((2637, 2688), 'common.network.LinearDuelingNetwork', 'LinearDuelingNetwork', (['self.obs_dim', 'self.action_dim'], {}), '(self.obs_dim, self.action_dim)\n', (2657, 2688), False, 'from common.network import LinearDuelingNetwork\n'), ((2802, 2851), 'common.network.NoisyLinearNetwork', 'NoisyLinearNetwork', (['self.obs_dim', 'self.action_dim'], {}), '(self.obs_dim, self.action_dim)\n', (2820, 2851), False, 'from common.network import NoisyLinearNetwork\n'), ((2898, 2947), 'common.network.NoisyLinearNetwork', 'NoisyLinearNetwork', (['self.obs_dim', 'self.action_dim'], {}), '(self.obs_dim, self.action_dim)\n', (2916, 2947), False, 'from common.network import NoisyLinearNetwork\n'), ((3086, 3142), 'common.network.NoisyLinearDuelingNetwork', 'NoisyLinearDuelingNetwork', (['self.obs_dim', 'self.action_dim'], {}), '(self.obs_dim, self.action_dim)\n', (3111, 3142), False, 'from common.network import NoisyLinearDuelingNetwork\n'), ((3189, 3245), 'common.network.NoisyLinearDuelingNetwork', 'NoisyLinearDuelingNetwork', (['self.obs_dim', 'self.action_dim'], {}), '(self.obs_dim, self.action_dim)\n', (3214, 3245), False, 'from common.network import NoisyLinearDuelingNetwork\n'), ((4239, 4263), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (4256, 4263), False, 'import torch\n')] |
import eigenpy
eigenpy.switchToNumpyArray()
import numpy as np
import numpy.linalg as la
dim = 100
A = np.random.rand(dim,dim)
A = (A + A.T)*0.5 + np.diag(10. + np.random.rand(dim))
ldlt = eigenpy.LDLT(A)
L = ldlt.matrixL()
D = ldlt.vectorD()
P = ldlt.transpositionsP()
assert eigenpy.is_approx(np.transpose(P).dot(L.dot(np.diag(D).dot(np.transpose(L).dot(P)))),A)
| [
"eigenpy.LDLT",
"numpy.transpose",
"eigenpy.switchToNumpyArray",
"numpy.random.rand",
"numpy.diag"
] | [((15, 43), 'eigenpy.switchToNumpyArray', 'eigenpy.switchToNumpyArray', ([], {}), '()\n', (41, 43), False, 'import eigenpy\n'), ((105, 129), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (119, 129), True, 'import numpy as np\n'), ((193, 208), 'eigenpy.LDLT', 'eigenpy.LDLT', (['A'], {}), '(A)\n', (205, 208), False, 'import eigenpy\n'), ((164, 183), 'numpy.random.rand', 'np.random.rand', (['dim'], {}), '(dim)\n', (178, 183), True, 'import numpy as np\n'), ((304, 319), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (316, 319), True, 'import numpy as np\n'), ((330, 340), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (337, 340), True, 'import numpy as np\n'), ((345, 360), 'numpy.transpose', 'np.transpose', (['L'], {}), '(L)\n', (357, 360), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Embedding, Dot, Add, Flatten
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam
# df = pd.read_csv("./data/processed_rating.csv")
# N = df["user_idx"].max() + 1
# M = df["isbn_idx"].max() + 1
# df = shuffle(df)
# cut_off = int(0.8 * len(df))
# df_train = df.iloc[:cut_off]
# df_test = df.iloc[cut_off:]
# K = 15
# mu = df_train["Book-Rating"].mean()
# epochs = 15
# reg_penalty = 0.0
# u = Input(shape=(1, ))
# b = Input(shape=(1, ))
# u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg_penalty))(u)
# b_embedding = Embedding(M, K, embeddings_regularizer=l2(reg_penalty))(b)
# u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg_penalty))(u)
# b_bias = Embedding(M, 1, embeddings_regularizer=l2(reg_penalty))(b)
# x = Dot(axes=2)([u_embedding, b_embedding])
# x = Add()([x, u_bias, b_bias])
# x = Flatten()(x)
# model = Model(inputs=[u, b], outputs=x)
# model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=["mse"])
# r = model.fit(
# x=[df_train["user_idx"].values, df_train["isbn_idx"].values],
# y=df_train["Book-Rating"].values - mu,
# epochs=epochs,
# batch_size=128,
# validation_data=([df_test["user_idx"].values,
# df_test["isbn_idx"].values], df_test["Book-Rating"].values - mu))
# plt.plot(r.history['loss'], label="train loss")
# plt.plot(r.history['val_loss'], label="test loss")
# plt.legend()
# plt.show()
df = pd.read_csv("./data/archive/ratings.csv")
# N = len(set(df["user_id"].values)) + 1
# M = len(set(df["book_id"].values)) + 1
# df = shuffle(df)
# cut_off = int(0.8 * len(df))
# df_train = df.iloc[:cut_off]
# df_test = df.iloc[cut_off:]
# K = 15
# mu = df_train["rating"].mean()
# epochs = 15
# reg_penalty = 0.0
# u = Input(shape=(1, ))
# b = Input(shape=(1, ))
# u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg_penalty))(u)
# b_embedding = Embedding(M, K, embeddings_regularizer=l2(reg_penalty))(b)
# u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg_penalty))(u)
# b_bias = Embedding(M, 1, embeddings_regularizer=l2(reg_penalty))(b)
# x = Dot(axes=2)([u_embedding, b_embedding])
# x = Add()([x, u_bias, b_bias])
# x = Flatten()(x)
# model = Model(inputs=[u, b], outputs=x)
# model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=["mse"])
# r = model.fit(x=[df_train["user_id"].values, df_train["book_id"].values],
# y=df_train["rating"].values - mu,
# epochs=epochs,
# batch_size=128,
# validation_data=([
# df_test["user_id"].values, df_test["book_id"].values
# ], df_test["rating"].values - mu))
# model.save('regression_model.h5')
# plt.plot(r.history['loss'], label="train loss")
# plt.plot(r.history['val_loss'], label="test loss")
# plt.legend()
# plt.show()
def predict(user_id):
model = keras.models.load_model('regression_model.h5')
book_data = np.array(list(set(df.book_id)))
user = np.array([user_id for i in range(len(book_data))])
predictions = model.predict([user, book_data])
predictions = np.array([a[0] for a in predictions])
recommended_book_ids = (-predictions).argsort()[:5]
print(recommended_book_ids)
print(predictions[recommended_book_ids])
predict(1)
| [
"pandas.read_csv",
"tensorflow.keras.models.load_model",
"numpy.array"
] | [((1699, 1740), 'pandas.read_csv', 'pd.read_csv', (['"""./data/archive/ratings.csv"""'], {}), "('./data/archive/ratings.csv')\n", (1710, 1740), True, 'import pandas as pd\n'), ((3126, 3172), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""regression_model.h5"""'], {}), "('regression_model.h5')\n", (3149, 3172), False, 'from tensorflow import keras\n'), ((3352, 3389), 'numpy.array', 'np.array', (['[a[0] for a in predictions]'], {}), '([a[0] for a in predictions])\n', (3360, 3389), True, 'import numpy as np\n')] |
import numpy as np
import random
import torch
import torch.nn as nn
from torch import optim
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, num_layers = 1):
super(Encoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size = input_size, hidden_size = hidden_size, num_layers = num_layers)
def forward(self, x):
flat = x.view(x.shape[0], x.shape[1], self.input_size)
out, h = self.lstm(flat)
return out, h
class Decoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size = 1, num_layers = 1):
super(Decoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.lstm = nn.LSTM(input_size = input_size, hidden_size = hidden_size, num_layers = num_layers)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, x, h):
out, h = self.lstm(x.unsqueeze(0), h)
y = self.linear(out.squeeze(0))
return y, h
class EncoderDecoder(nn.Module):
def __init__(self, hidden_size, input_size = 1, output_size = 1):
super(EncoderDecoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.encoder = Encoder(input_size = input_size, hidden_size = hidden_size)
self.decoder = Decoder(input_size = input_size, hidden_size = hidden_size, output_size = output_size)
def train_model(
self, train, target, epochs, target_len, method = 'recursive',
tfr = 0.5, lr = 0.01, dynamic_tf = False
):
losses = np.full(epochs, np.nan)
optimizer = optim.Adam(self.parameters(), lr = lr)
criterion = nn.MSELoss()
for e in range(epochs):
predicted = torch.zeros(target_len, train.shape[1], train.shape[2])
optimizer.zero_grad()
_, enc_h = self.encoder(train)
dec_in = train[-1, :, :]
dec_h = enc_h
if method == 'recursive':
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
predicted[t] = dec_out
dec_in = dec_out
if method == 'teacher_forcing':
# use teacher forcing
if random.random() < tfr:
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
predicted[t] = dec_out
dec_in = target[t, :, :]
# predict recursively
else:
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
predicted[t] = dec_out
dec_in = dec_out
if method == 'mixed_teacher_forcing':
# predict using mixed teacher forcing
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
predicted[t] = dec_out
# predict with teacher forcing
if random.random() < tfr:
dec_in = target[t, :, :]
# predict recursively
else:
dec_in = dec_out
loss = criterion(predicted, target)
loss.backward()
optimizer.step()
losses[e] = loss.item()
if e % 10 == 0:
print(f'Epoch {e}/{epochs}: {round(loss.item(), 4)}')
# dynamic teacher forcing
if dynamic_tf and tfr > 0:
tfr = tfr - 0.02
return losses
def predict(self, x, target_len):
y = torch.zeros(target_len, x.shape[1], x.shape[2])
_, enc_h = self.encoder(x)
dec_in = x[-1, :, :]
dec_h = enc_h
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
y[t] = dec_out
dec_in = dec_out
return y
| [
"numpy.full",
"torch.nn.MSELoss",
"random.random",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.LSTM"
] | [((360, 438), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers)\n', (367, 438), True, 'import torch.nn as nn\n'), ((914, 992), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers)\n', (921, 992), True, 'import torch.nn as nn\n'), ((1021, 1056), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1030, 1056), True, 'import torch.nn as nn\n'), ((1790, 1813), 'numpy.full', 'np.full', (['epochs', 'np.nan'], {}), '(epochs, np.nan)\n', (1797, 1813), True, 'import numpy as np\n'), ((1893, 1905), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1903, 1905), True, 'import torch.nn as nn\n'), ((3930, 3977), 'torch.zeros', 'torch.zeros', (['target_len', 'x.shape[1]', 'x.shape[2]'], {}), '(target_len, x.shape[1], x.shape[2])\n', (3941, 3977), False, 'import torch\n'), ((1963, 2018), 'torch.zeros', 'torch.zeros', (['target_len', 'train.shape[1]', 'train.shape[2]'], {}), '(target_len, train.shape[1], train.shape[2])\n', (1974, 2018), False, 'import torch\n'), ((2490, 2505), 'random.random', 'random.random', ([], {}), '()\n', (2503, 2505), False, 'import random\n'), ((3322, 3337), 'random.random', 'random.random', ([], {}), '()\n', (3335, 3337), False, 'import random\n')] |
import json
import os
import numpy as np
import torch
from zerogercrnn.lib.constants import EMPTY_TOKEN_ID, UNKNOWN_TOKEN_ID
from zerogercrnn.experiments.ast_level.utils import read_non_terminals
from zerogercrnn.lib.constants import EMPTY_TOKEN_ID, UNKNOWN_TOKEN_ID, EOF_TOKEN
from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy
class NonTerminalsMetricsWrapper(Metrics):
"""Metrics that extract non-terminals from target and pass non-terminals tensor to base metrics."""
def __init__(self, base: Metrics):
super().__init__()
self.base = base
def drop_state(self):
self.base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
self.base.report((prediction, target.non_terminals))
def get_current_value(self, should_print=False):
return self.base.get_current_value(should_print)
def decrease_hits(self, number):
self.base.decrease_hits(number)
class SingleNonTerminalAccuracyMetrics(Metrics):
"""Metrics that show accuracies per non-terminal. It should not be used for plotting, but to
print results on console during model evaluation."""
def __init__(self, non_terminals_file, results_dir=None, group=False, dim=2):
"""
:param non_terminals_file: file with json of non-terminals
:param results_dir: where to save json with accuracies per non-terminal
:param dim: dimension to run max function on for predicted values
"""
super().__init__()
print('SingleNonTerminalAccuracyMetrics created!')
self.non_terminals = read_non_terminals(non_terminals_file)
self.non_terminals_number = len(self.non_terminals)
self.results_dir = results_dir
self.group = group
self.dim = dim
self.accuracies = [IndexedAccuracyMetrics(label='ERROR') for _ in self.non_terminals]
def drop_state(self):
for accuracy in self.accuracies:
accuracy.drop_state()
def report(self, data):
prediction, target = data
if self.dim is None:
predicted = prediction
else:
_, predicted = torch.max(prediction, dim=self.dim)
predicted = predicted.view(-1)
target = target.non_terminals.view(-1)
for cur in range(len(self.non_terminals)):
indices = (target == cur).nonzero().squeeze()
self.accuracies[cur].report(predicted, target, indices)
def get_current_value(self, should_print=False):
result = []
for cur in range(len(self.non_terminals)):
cur_accuracy = self.accuracies[cur].get_current_value(should_print=False)
result.append(cur_accuracy)
# if should_print:
# print('Accuracy on {} is {}'.format(self.non_terminals[cur], cur_accuracy))
self.save_to_file(result)
return 0 # this metrics if only for printing
def save_to_file(self, result):
if self.results_dir is not None:
if self.group:
nt, res = self.get_grouped_result()
else:
nt, res = self.non_terminals, result
with open(os.path.join(self.results_dir, 'nt_acc.txt'), mode='w') as f:
f.write(json.dumps(nt))
f.write('\n')
f.write(json.dumps(res))
def get_grouped_result(self):
"""Calc accuracies ignoring last two bits of information."""
nt = set()
hits = {}
misses = {}
for i in range(len(self.non_terminals)):
base = self.non_terminals[i]
if self.non_terminals[i] != EOF_TOKEN:
base = base[:-2] # remove last two bits
nt.add(base)
if base not in hits:
hits[base] = 0
if base not in misses:
misses[base] = 0
hits[base] += self.accuracies[i].metrics.hits
misses[base] += self.accuracies[i].metrics.misses
nt = sorted(list(nt))
result = []
nt.remove('Program')
nt.remove('AssignmentPattern')
for cur in nt:
if hits[cur] + misses[cur] == 0:
result.append(0)
else:
result.append(float(hits[cur]) / (hits[cur] + misses[cur]))
return nt, result
class TerminalAccuracyMetrics(Metrics):
def __init__(self, dim=2):
super().__init__()
self.dim = dim
self.general_accuracy = BaseAccuracyMetrics()
self.empty_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is <empty>'
)
self.non_empty_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is not <empty>'
)
self.ground_not_unk_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is not <unk> (and ground truth is not <empty>)'
)
self.model_not_unk_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that model predicted to non <unk> (and ground truth is not <empty>)'
)
def drop_state(self):
self.general_accuracy.drop_state()
self.empty_accuracy.drop_state()
self.non_empty_accuracy.drop_state()
self.ground_not_unk_accuracy.drop_state()
self.model_not_unk_accuracy.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
_, predicted = torch.max(prediction, dim=self.dim)
predicted = predicted.view(-1)
target = target.view(-1)
self.general_accuracy.report((predicted, target))
if not self.is_train:
empty_indexes = torch.nonzero(target == 0).squeeze()
self.empty_accuracy.report(predicted, target, empty_indexes)
non_empty_indexes = torch.nonzero(target - EMPTY_TOKEN_ID).squeeze()
self.non_empty_accuracy.report(predicted, target, non_empty_indexes)
predicted = torch.index_select(predicted, 0, non_empty_indexes)
target = torch.index_select(target, 0, non_empty_indexes)
ground_not_unk_indexes = torch.nonzero(target - UNKNOWN_TOKEN_ID).squeeze()
self.ground_not_unk_accuracy.report(predicted, target, ground_not_unk_indexes)
model_not_unk_indexes = torch.nonzero(predicted - UNKNOWN_TOKEN_ID).squeeze()
self.model_not_unk_accuracy.report(predicted, target, model_not_unk_indexes)
def get_current_value(self, should_print=False):
general_accuracy = self.general_accuracy.get_current_value(should_print=should_print)
if (not self.is_train) and should_print:
self.empty_accuracy.get_current_value(should_print=True)
self.non_empty_accuracy.get_current_value(should_print=True)
self.ground_not_unk_accuracy.get_current_value(should_print=True)
self.model_not_unk_accuracy.get_current_value(should_print=True)
return general_accuracy
class NonTerminalTerminalAccuracyMetrics(Metrics):
def __init__(self):
super().__init__()
self.nt_accuracy = MaxPredictionAccuracyMetrics()
self.t_accuracy = MaxPredictionAccuracyMetrics()
def drop_state(self):
self.nt_accuracy.drop_state()
self.t_accuracy.drop_state()
def report(self, data):
nt_prediction, t_prediction, nt_target, t_target = data
self.nt_accuracy.report((nt_prediction, nt_target))
self.t_accuracy.report((t_prediction, t_target))
def get_current_value(self, should_print=False):
nt_value = self.nt_accuracy.get_current_value(should_print=False)
t_value = self.t_accuracy.get_current_value(should_print=False)
if should_print:
print('Non terminals accuracy: {}'.format(nt_value))
print('Terminals accuracy: {}'.format(t_value))
return nt_value, t_value
class LayeredNodeDepthsAttentionMetrics(Metrics):
"""Metrics that is able to visualize attention coefficient per node depths"""
def __init__(self):
super().__init__()
self.per_depth_attention_sum = np.zeros((50, 50))
self.per_depth_reports = np.zeros((50))
def drop_state(self):
pass
def report(self, node_depths, attention_coefficients):
for i in range(50):
index = torch.nonzero((node_depths == i))
if index.size()[0] == 0:
continue
selected_attention = torch.index_select(attention_coefficients, dim=0, index=index.squeeze())
selected_attention = selected_attention.squeeze(2)
to_report = torch.sum(selected_attention, dim=0).cpu().numpy()
self.per_depth_attention_sum[i] += to_report
self.per_depth_reports[i] += index.size()[0]
def get_current_value(self, should_print=False):
for i in range(50):
if abs(self.per_depth_reports[i]) > 1e-6:
self.per_depth_attention_sum[i] /= self.per_depth_reports[i]
np.save('eval/temp/attention/per_depth_matrix', self.per_depth_attention_sum)
return 0 # this metrics is only for saving results to file.
class PerNtAttentionMetrics(Metrics):
def __init__(self):
super().__init__()
def report(self, current_input, attention_coefficients):
nt_ids = torch.argmax(current_input, dim=-1)
# for i in range(97): # TODO: check
# index = torch.nonzero((nt_ids == i))
# if index.size()[0] == 0:
# continue
# selected_attention = torch.index_select(attention_coefficients, dim=0, index=index.squeeze())
# selected_attention = selected_attention.squeeze(2)
# to_report = torch.sum(selected_attention, dim=0).cpu().numpy()
# self.per_depth_attention_sum[i] += to_report
# self.per_depth_reports[i] += index.size()[0]
def drop_state(self):
pass
def get_current_value(self, should_print=False):
pass
class EmptyNonEmptyWrapper(Metrics):
def __init__(self, non_emp_base: Metrics, with_emp_base:Metrics):
super().__init__()
self.non_emp_base = non_emp_base
self.with_emp_base = with_emp_base
def drop_state(self):
self.non_emp_base.drop_state()
self.with_emp_base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1)
target = target.view(-1)
self.with_emp_base.report((prediction, target))
non_emp_indices = (target != EMPTY_TOKEN_ID).nonzero().squeeze()
prediction = torch.index_select(prediction, 0, non_emp_indices)
target = torch.index_select(target, 0, non_emp_indices)
self.non_emp_base.report((prediction, target))
def get_current_value(self, should_print=False):
print('Non Empty')
self.non_emp_base.get_current_value(should_print=should_print)
print('With Empty')
self.with_emp_base.get_current_value(should_print=should_print)
class EmptyNonEmptyTerminalTopKAccuracyWrapper(Metrics):
def __init__(self):
super().__init__()
self.non_emp_base = TopKAccuracy(k=5)
self.with_emp_base = TopKAccuracy(k=5)
def drop_state(self):
self.non_emp_base.drop_state()
self.with_emp_base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1, prediction.size()[-1])
target = target.view(-1)
self.with_emp_base.report((prediction, target))
non_emp_indices = (target != EMPTY_TOKEN_ID).nonzero().squeeze()
prediction = torch.index_select(prediction, 0, non_emp_indices)
target = torch.index_select(target, 0, non_emp_indices)
self.non_emp_base.report((prediction, target))
def get_current_value(self, should_print=False):
print('Non Empty')
self.non_emp_base.get_current_value(should_print=should_print)
print('With Empty')
self.with_emp_base.get_current_value(should_print=should_print)
# class AggregatedTerminalTopKMetrics(Metrics):
#
# def __init__(self, k):
# super().__init__()
# self.k = k
# self.common = BaseAccuracyMetrics()
# self.target_non_unk = Top
# self.prediction_non_unk = IndexedAccuracyMetrics('Prediction not unk')
#
# def drop_state(self):
# self.common.drop_state()
# self.target_non_unk.drop_state()
# self.prediction_non_unk.drop_state()
#
# def report(self, prediction_target):
# prediction, target = prediction_target
# prediction = prediction.view(-1)
# target = target.view(-1)
#
# self.common.report((prediction, target))
#
# pred_non_unk_indices = (prediction != UNKNOWN_TOKEN_ID).nonzero().squeeze()
# target_non_unk_indices = (target != UNKNOWN_TOKEN_ID).nonzero().squeeze()
#
# self.prediction_non_unk.report(prediction, target, pred_non_unk_indices)
# self.target_non_unk.report(prediction, target, target_non_unk_indices)
#
# def get_current_value(self, should_print=False):
# print('P(hat(t) == t) = {}'.format(self.common.get_current_value(False)))
# print('P(hat(t) == t && hat(t) != unk) = {}'.format(self.prediction_non_unk.metrics.hits / (self.common.hits + self.common.misses)))
# print('P(hat(t) == t | t != unk) = {}'.format(self.target_non_unk.get_current_value(False)))
# print('P(hat(t) == t | hat(t) != unk) = {}'.format(self.prediction_non_unk.get_current_value(False)))
class AggregatedTerminalMetrics(Metrics):
def __init__(self):
super().__init__()
self.common = BaseAccuracyMetrics()
self.target_non_unk = IndexedAccuracyMetrics('Target not unk')
self.prediction_non_unk = IndexedAccuracyMetrics('Prediction not unk')
def drop_state(self):
self.common.drop_state()
self.target_non_unk.drop_state()
self.prediction_non_unk.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1)
target = target.view(-1)
self.common.report((prediction, target))
pred_non_unk_indices = (prediction != UNKNOWN_TOKEN_ID).nonzero().squeeze()
target_non_unk_indices = (target != UNKNOWN_TOKEN_ID).nonzero().squeeze()
self.prediction_non_unk.report(prediction, target, pred_non_unk_indices)
self.target_non_unk.report(prediction, target, target_non_unk_indices)
def get_current_value(self, should_print=False):
print('P(hat(t) == t) = {}'.format(self.common.get_current_value(False)))
print('P(hat(t) == t && hat(t) != unk) = {}'.format(self.prediction_non_unk.metrics.hits / (self.common.hits + self.common.misses)))
print('P(hat(t) == t | t != unk) = {}'.format(self.target_non_unk.get_current_value(False)))
print('P(hat(t) == t | hat(t) != unk) = {}'.format(self.prediction_non_unk.get_current_value(False)))
| [
"numpy.save",
"torch.sum",
"zerogercrnn.lib.metrics.MaxPredictionAccuracyMetrics",
"torch.argmax",
"zerogercrnn.experiments.ast_level.utils.read_non_terminals",
"numpy.zeros",
"torch.nonzero",
"json.dumps",
"torch.index_select",
"torch.max",
"zerogercrnn.lib.metrics.BaseAccuracyMetrics",
"zero... | [((1701, 1739), 'zerogercrnn.experiments.ast_level.utils.read_non_terminals', 'read_non_terminals', (['non_terminals_file'], {}), '(non_terminals_file)\n', (1719, 1739), False, 'from zerogercrnn.experiments.ast_level.utils import read_non_terminals\n'), ((4580, 4601), 'zerogercrnn.lib.metrics.BaseAccuracyMetrics', 'BaseAccuracyMetrics', ([], {}), '()\n', (4599, 4601), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((4632, 4719), 'zerogercrnn.lib.metrics.IndexedAccuracyMetrics', 'IndexedAccuracyMetrics', ([], {'label': '"""Accuracy on terminals that ground truth is <empty>"""'}), "(label=\n 'Accuracy on terminals that ground truth is <empty>')\n", (4654, 4719), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((4771, 4862), 'zerogercrnn.lib.metrics.IndexedAccuracyMetrics', 'IndexedAccuracyMetrics', ([], {'label': '"""Accuracy on terminals that ground truth is not <empty>"""'}), "(label=\n 'Accuracy on terminals that ground truth is not <empty>')\n", (4793, 4862), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((4919, 5047), 'zerogercrnn.lib.metrics.IndexedAccuracyMetrics', 'IndexedAccuracyMetrics', ([], {'label': '"""Accuracy on terminals that ground truth is not <unk> (and ground truth is not <empty>)"""'}), "(label=\n 'Accuracy on terminals that ground truth is not <unk> (and ground truth is not <empty>)'\n )\n", (4941, 5047), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((5098, 5229), 'zerogercrnn.lib.metrics.IndexedAccuracyMetrics', 'IndexedAccuracyMetrics', ([], {'label': '"""Accuracy on terminals that model predicted to non <unk> (and ground truth is not <empty>)"""'}), "(label=\n 'Accuracy on terminals that model predicted to non <unk> (and ground truth is not <empty>)'\n )\n", (5120, 5229), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((5609, 5644), 'torch.max', 'torch.max', (['prediction'], {'dim': 'self.dim'}), '(prediction, dim=self.dim)\n', (5618, 5644), False, 'import torch\n'), ((7273, 7303), 'zerogercrnn.lib.metrics.MaxPredictionAccuracyMetrics', 'MaxPredictionAccuracyMetrics', ([], {}), '()\n', (7301, 7303), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((7330, 7360), 'zerogercrnn.lib.metrics.MaxPredictionAccuracyMetrics', 'MaxPredictionAccuracyMetrics', ([], {}), '()\n', (7358, 7360), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((8283, 8301), 'numpy.zeros', 'np.zeros', (['(50, 50)'], {}), '((50, 50))\n', (8291, 8301), True, 'import numpy as np\n'), ((8335, 8347), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (8343, 8347), True, 'import numpy as np\n'), ((9173, 9250), 'numpy.save', 'np.save', (['"""eval/temp/attention/per_depth_matrix"""', 'self.per_depth_attention_sum'], {}), "('eval/temp/attention/per_depth_matrix', self.per_depth_attention_sum)\n", (9180, 9250), True, 'import numpy as np\n'), ((9490, 9525), 'torch.argmax', 'torch.argmax', (['current_input'], {'dim': '(-1)'}), '(current_input, dim=-1)\n', (9502, 9525), False, 'import torch\n'), ((10805, 10855), 'torch.index_select', 'torch.index_select', (['prediction', '(0)', 'non_emp_indices'], {}), '(prediction, 0, non_emp_indices)\n', (10823, 10855), False, 'import torch\n'), ((10873, 10919), 'torch.index_select', 'torch.index_select', (['target', '(0)', 'non_emp_indices'], {}), '(target, 0, non_emp_indices)\n', (10891, 10919), False, 'import torch\n'), ((11365, 11382), 'zerogercrnn.lib.metrics.TopKAccuracy', 'TopKAccuracy', ([], {'k': '(5)'}), '(k=5)\n', (11377, 11382), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((11412, 11429), 'zerogercrnn.lib.metrics.TopKAccuracy', 'TopKAccuracy', ([], {'k': '(5)'}), '(k=5)\n', (11424, 11429), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((11874, 11924), 'torch.index_select', 'torch.index_select', (['prediction', '(0)', 'non_emp_indices'], {}), '(prediction, 0, non_emp_indices)\n', (11892, 11924), False, 'import torch\n'), ((11942, 11988), 'torch.index_select', 'torch.index_select', (['target', '(0)', 'non_emp_indices'], {}), '(target, 0, non_emp_indices)\n', (11960, 11988), False, 'import torch\n'), ((13926, 13947), 'zerogercrnn.lib.metrics.BaseAccuracyMetrics', 'BaseAccuracyMetrics', ([], {}), '()\n', (13945, 13947), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((13978, 14018), 'zerogercrnn.lib.metrics.IndexedAccuracyMetrics', 'IndexedAccuracyMetrics', (['"""Target not unk"""'], {}), "('Target not unk')\n", (14000, 14018), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((14053, 14097), 'zerogercrnn.lib.metrics.IndexedAccuracyMetrics', 'IndexedAccuracyMetrics', (['"""Prediction not unk"""'], {}), "('Prediction not unk')\n", (14075, 14097), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((1917, 1954), 'zerogercrnn.lib.metrics.IndexedAccuracyMetrics', 'IndexedAccuracyMetrics', ([], {'label': '"""ERROR"""'}), "(label='ERROR')\n", (1939, 1954), False, 'from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy\n'), ((2254, 2289), 'torch.max', 'torch.max', (['prediction'], {'dim': 'self.dim'}), '(prediction, dim=self.dim)\n', (2263, 2289), False, 'import torch\n'), ((6133, 6184), 'torch.index_select', 'torch.index_select', (['predicted', '(0)', 'non_empty_indexes'], {}), '(predicted, 0, non_empty_indexes)\n', (6151, 6184), False, 'import torch\n'), ((6206, 6254), 'torch.index_select', 'torch.index_select', (['target', '(0)', 'non_empty_indexes'], {}), '(target, 0, non_empty_indexes)\n', (6224, 6254), False, 'import torch\n'), ((8498, 8529), 'torch.nonzero', 'torch.nonzero', (['(node_depths == i)'], {}), '(node_depths == i)\n', (8511, 8529), False, 'import torch\n'), ((3271, 3315), 'os.path.join', 'os.path.join', (['self.results_dir', '"""nt_acc.txt"""'], {}), "(self.results_dir, 'nt_acc.txt')\n", (3283, 3315), False, 'import os\n'), ((3357, 3371), 'json.dumps', 'json.dumps', (['nt'], {}), '(nt)\n', (3367, 3371), False, 'import json\n'), ((3427, 3442), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (3437, 3442), False, 'import json\n'), ((5835, 5861), 'torch.nonzero', 'torch.nonzero', (['(target == 0)'], {}), '(target == 0)\n', (5848, 5861), False, 'import torch\n'), ((5978, 6016), 'torch.nonzero', 'torch.nonzero', (['(target - EMPTY_TOKEN_ID)'], {}), '(target - EMPTY_TOKEN_ID)\n', (5991, 6016), False, 'import torch\n'), ((6293, 6333), 'torch.nonzero', 'torch.nonzero', (['(target - UNKNOWN_TOKEN_ID)'], {}), '(target - UNKNOWN_TOKEN_ID)\n', (6306, 6333), False, 'import torch\n'), ((6472, 6515), 'torch.nonzero', 'torch.nonzero', (['(predicted - UNKNOWN_TOKEN_ID)'], {}), '(predicted - UNKNOWN_TOKEN_ID)\n', (6485, 6515), False, 'import torch\n'), ((8787, 8823), 'torch.sum', 'torch.sum', (['selected_attention'], {'dim': '(0)'}), '(selected_attention, dim=0)\n', (8796, 8823), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.