code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8
"""
This module is designed to hold functions for visualization.
This file is part of project dhnx (). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location:
SPDX-License-Identifier: MIT
"""
import logging
from collections import namedtuple
import folium as fol
import matplotlib.collections as collections
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from folium.features import DivIcon
logger = logging.getLogger()
logger.setLevel(logging.INFO)
cartopy_installed = True
try:
from cartopy import crs as ccrs
from cartopy.io.img_tiles import Stamen
except ImportError:
logging.info("Cartopy is not installed. Background maps will not be drawn.")
cartopy_installed = False
class InteractiveMap():
r"""
An interactive map of a network.ThermalNetwork.
"""
def __init__(self, thermal_network):
self.node_data = self.collect_node_data(thermal_network)
self.edge_data = thermal_network.components.pipes
self.edge_data['value'] = 1
self.node_id = self.node_data.index
self.lat = self.node_data['lat']
self.lon = self.node_data['lon']
self.component_type = self.node_data['component_type']
self._add_colors()
@staticmethod
def collect_node_data(thermal_network):
node_data = {
list_name: thermal_network.components[list_name].copy() for list_name in [
'consumers',
'producers',
'forks'
]
}
for k, v in node_data.items():
v.index = [k + '-' + str(id) for id in v.index]
return pd.concat(node_data.values())
def _add_colors(self):
color = {'producer': '#ff0000',
'consumer': '#00ff00',
'split': '#000000'}
self.node_data = (
self.node_data
.assign(node_color=self.node_data['component_type'])
.replace({'node_color': color}))
return self.node_data['node_color']
@staticmethod
def _get_bearing(p1, p2):
'''
Returns compass bearing from p1 to p2
Parameters
p1 : namedtuple with lat lon
p2 : namedtuple with lat lon
Return
compass bearing of type float
'''
y = p2[0] - p1[0]
x = p2[1] - p1[1]
bearing = np.arctan2(x, y) / np.pi * 180
# adjusting for compass bearing
if bearing < 0:
return bearing + 360
return bearing
def _get_arrows(self, locations, color='black', size=8, n_arrows=3):
'''
Get a list of correctly placed and rotated
arrows/markers to be plotted
Parameters
locations : list of lists of lat lons that represent the
start and end of the line.
eg [[41.1132, -96.1993],[41.3810, -95.8021]]
color : default is 'black'
size : default is 8
n_arrows : number of arrows to create. default is 3
Return
list of arrows/markers
'''
Point = namedtuple('Point', field_names=['lat', 'lon'])
# creating point from our Point named tuple
p1 = Point(locations[0][0], locations[0][1])
p2 = Point(locations[1][0], locations[1][1])
# getting the rotation needed for our marker.
# Subtracting 90 to account for the marker's orientation
# of due East(get_bearing returns North)
rotation = self._get_bearing(p1, p2) - 90
# get an evenly space list of lats and lons for our arrows
# note that I'm discarding the first and last for aesthetics
# as I'm using markers to denote the start and end
arrow_lats = np.linspace(p1.lat, p2.lat, n_arrows + 2)[1:n_arrows + 1]
arrow_lons = np.linspace(p1.lon, p2.lon, n_arrows + 2)[1:n_arrows + 1]
arrows = []
# creating each "arrow" and appending them to our arrows list
for points in zip(arrow_lats, arrow_lons):
arrows.append(
fol.RegularPolygonMarker(
location=points,
color=color, number_of_sides=3,
radius=size, rotation=rotation, fill=True))
return arrows
def draw(self):
# create map
m = fol.Map(location=[self.lat.mean(), self.lon.mean()],
zoom_start=14)
for i in range(0, len(self.node_data)):
# draw nodes
fol.CircleMarker([self.lat[i], self.lon[i]],
# popup=data['node_id'][i],
color=self.node_data['node_color'][i],
fill_color=self.node_data['node_color'][i],
radius=20).add_to(m)
# draw node ids
fol.Marker(
[self.lat[i], self.lon[i]],
icon=DivIcon(
icon_size=(-35, 75),
icon_anchor=(0, 0),
html='<div style="font-size: 16pt">%s</div>'
% self.node_data.index[i]
)
).add_to(m)
for i in range(0, len(self.edge_data)):
# linewidth settings
lw_avg = self.edge_data['value'].mean()
lw = self.edge_data['value'][i] / lw_avg
fol.PolyLine(locations=[[self.lat[self.edge_data['from_node'][i]],
self.lon[self.edge_data['from_node'][i]]],
[self.lat[self.edge_data['to_node'][i]],
self.lon[self.edge_data['to_node'][i]]]],
color='orange',
weight=lw * 3).add_to(m)
arrows = self._get_arrows(
locations=[[self.lat[self.edge_data['from_node'][i]],
self.lon[self.edge_data['from_node'][i]]],
[self.lat[self.edge_data['to_node'][i]],
self.lon[self.edge_data['to_node'][i]]]],
color='orange', n_arrows=3)
for arrow in arrows:
arrow.add_to(m)
return m
class StaticMap():
r"""
A static map of a network.ThermalNetwork.
"""
def __init__(self, thermal_network, figsize=(5, 5), node_size=3,
edge_width=3, node_color='r', edge_color='g'):
self.graph = thermal_network.to_nx_graph()
self.figsize = figsize
self.node_size = node_size
self.edge_width = edge_width
self.node_color = node_color
self.edge_color = edge_color
self.positions = {node_id: np.array([data['lon'], data['lat']])
for node_id, data in self.graph.nodes(data=True)}
self.extent = self._get_extent()
def _get_extent(self):
lon = [pos[0] for pos in self.positions.values()]
lat = [pos[1] for pos in self.positions.values()]
extent = np.array([np.min(lon), np.max(lon), np.min(lat), np.max(lat)])
delta = [extent[1] - extent[0], extent[3] - extent[2]]
extent = extent.astype(float)
extent += 0.1 * np.array([-delta[0], delta[0], -delta[1], delta[1]])
return extent
def draw(self, bgcolor='w', no_axis=False, background_map=False,
use_geom=False, edge_color='b', edge_linewidth=2,
edge_alpha=1, node_size=40, node_color='r', node_alpha=1,
edgecolor='r', node_zorder=1):
"""
This function has been adapted from osmnx plots.plot_graph() function.
"""
if background_map:
if not cartopy_installed:
logging.warning('To draw background map, cartopy must be installed.')
background_map = False
if background_map:
imagery = Stamen(style='toner-lite')
zoom_level = 15
fig, ax = plt.subplots(
figsize=self.figsize,
subplot_kw={'projection': imagery.crs}
)
ax.set_extent(self.extent, crs=ccrs.Geodetic())
ax.add_image(imagery, zoom_level, alpha=1, interpolation='bilinear')
else:
fig, ax = plt.subplots(figsize=self.figsize, facecolor=bgcolor)
lines = []
for u, v, data in self.graph.edges(data=True):
if 'geometry' in data and use_geom:
# if it has a geometry attribute (a list of line segments), add them
# to the list of lines to plot
xs, ys = data['geometry'].xy
lines.append(list(zip(xs, ys)))
else:
# if it doesn't have a geometry attribute, the edge is a straight
# line from node to node
x1 = self.graph.nodes[u]['lon']
y1 = self.graph.nodes[u]['lat']
x2 = self.graph.nodes[v]['lon']
y2 = self.graph.nodes[v]['lat']
line = [(x1, y1), (x2, y2)]
lines.append(line)
# add the lines to the axis as a linecollection
lc = collections.LineCollection(lines,
colors=edge_color,
linewidths=edge_linewidth,
alpha=edge_alpha,
zorder=2)
ax.add_collection(lc)
node_Xs = [float(x) for _, x in self.graph.nodes(data='lon')]
node_Ys = [float(y) for _, y in self.graph.nodes(data='lat')]
ax.scatter(node_Xs,
node_Ys,
s=node_size,
c=node_color,
alpha=node_alpha,
edgecolor=edgecolor,
zorder=node_zorder)
if no_axis:
ax = plt.gca()
ax.set_axis_off()
return fig, ax
| [
"logging.getLogger",
"collections.namedtuple",
"cartopy.io.img_tiles.Stamen",
"matplotlib.pyplot.gca",
"cartopy.crs.Geodetic",
"logging.warning",
"folium.RegularPolygonMarker",
"matplotlib.collections.LineCollection",
"numpy.max",
"numpy.array",
"numpy.linspace",
"folium.CircleMarker",
"nump... | [((522, 541), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (539, 541), False, 'import logging\n'), ((709, 785), 'logging.info', 'logging.info', (['"""Cartopy is not installed. Background maps will not be drawn."""'], {}), "('Cartopy is not installed. Background maps will not be drawn.')\n", (721, 785), False, 'import logging\n'), ((3168, 3215), 'collections.namedtuple', 'namedtuple', (['"""Point"""'], {'field_names': "['lat', 'lon']"}), "('Point', field_names=['lat', 'lon'])\n", (3178, 3215), False, 'from collections import namedtuple\n'), ((9168, 9280), 'matplotlib.collections.LineCollection', 'collections.LineCollection', (['lines'], {'colors': 'edge_color', 'linewidths': 'edge_linewidth', 'alpha': 'edge_alpha', 'zorder': '(2)'}), '(lines, colors=edge_color, linewidths=\n edge_linewidth, alpha=edge_alpha, zorder=2)\n', (9194, 9280), True, 'import matplotlib.collections as collections\n'), ((3811, 3852), 'numpy.linspace', 'np.linspace', (['p1.lat', 'p2.lat', '(n_arrows + 2)'], {}), '(p1.lat, p2.lat, n_arrows + 2)\n', (3822, 3852), True, 'import numpy as np\n'), ((3890, 3931), 'numpy.linspace', 'np.linspace', (['p1.lon', 'p2.lon', '(n_arrows + 2)'], {}), '(p1.lon, p2.lon, n_arrows + 2)\n', (3901, 3931), True, 'import numpy as np\n'), ((6739, 6775), 'numpy.array', 'np.array', (["[data['lon'], data['lat']]"], {}), "([data['lon'], data['lat']])\n", (6747, 6775), True, 'import numpy as np\n'), ((7242, 7294), 'numpy.array', 'np.array', (['[-delta[0], delta[0], -delta[1], delta[1]]'], {}), '([-delta[0], delta[0], -delta[1], delta[1]])\n', (7250, 7294), True, 'import numpy as np\n'), ((7908, 7934), 'cartopy.io.img_tiles.Stamen', 'Stamen', ([], {'style': '"""toner-lite"""'}), "(style='toner-lite')\n", (7914, 7934), False, 'from cartopy.io.img_tiles import Stamen\n'), ((7985, 8059), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'self.figsize', 'subplot_kw': "{'projection': imagery.crs}"}), "(figsize=self.figsize, subplot_kw={'projection': imagery.crs})\n", (7997, 8059), True, 'import matplotlib.pyplot as plt\n'), ((8284, 8337), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'self.figsize', 'facecolor': 'bgcolor'}), '(figsize=self.figsize, facecolor=bgcolor)\n', (8296, 8337), True, 'import matplotlib.pyplot as plt\n'), ((9883, 9892), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9890, 9892), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2460), 'numpy.arctan2', 'np.arctan2', (['x', 'y'], {}), '(x, y)\n', (2454, 2460), True, 'import numpy as np\n'), ((4134, 4254), 'folium.RegularPolygonMarker', 'fol.RegularPolygonMarker', ([], {'location': 'points', 'color': 'color', 'number_of_sides': '(3)', 'radius': 'size', 'rotation': 'rotation', 'fill': '(True)'}), '(location=points, color=color, number_of_sides=3,\n radius=size, rotation=rotation, fill=True)\n', (4158, 4254), True, 'import folium as fol\n'), ((7064, 7075), 'numpy.min', 'np.min', (['lon'], {}), '(lon)\n', (7070, 7075), True, 'import numpy as np\n'), ((7077, 7088), 'numpy.max', 'np.max', (['lon'], {}), '(lon)\n', (7083, 7088), True, 'import numpy as np\n'), ((7090, 7101), 'numpy.min', 'np.min', (['lat'], {}), '(lat)\n', (7096, 7101), True, 'import numpy as np\n'), ((7103, 7114), 'numpy.max', 'np.max', (['lat'], {}), '(lat)\n', (7109, 7114), True, 'import numpy as np\n'), ((7749, 7818), 'logging.warning', 'logging.warning', (['"""To draw background map, cartopy must be installed."""'], {}), "('To draw background map, cartopy must be installed.')\n", (7764, 7818), False, 'import logging\n'), ((4564, 4707), 'folium.CircleMarker', 'fol.CircleMarker', (['[self.lat[i], self.lon[i]]'], {'color': "self.node_data['node_color'][i]", 'fill_color': "self.node_data['node_color'][i]", 'radius': '(20)'}), "([self.lat[i], self.lon[i]], color=self.node_data[\n 'node_color'][i], fill_color=self.node_data['node_color'][i], radius=20)\n", (4580, 4707), True, 'import folium as fol\n'), ((5418, 5656), 'folium.PolyLine', 'fol.PolyLine', ([], {'locations': "[[self.lat[self.edge_data['from_node'][i]], self.lon[self.edge_data[\n 'from_node'][i]]], [self.lat[self.edge_data['to_node'][i]], self.lon[\n self.edge_data['to_node'][i]]]]", 'color': '"""orange"""', 'weight': '(lw * 3)'}), "(locations=[[self.lat[self.edge_data['from_node'][i]], self.lon\n [self.edge_data['from_node'][i]]], [self.lat[self.edge_data['to_node'][\n i]], self.lon[self.edge_data['to_node'][i]]]], color='orange', weight=\n lw * 3)\n", (5430, 5656), True, 'import folium as fol\n'), ((8149, 8164), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (8162, 8164), True, 'from cartopy import crs as ccrs\n'), ((4975, 5100), 'folium.features.DivIcon', 'DivIcon', ([], {'icon_size': '(-35, 75)', 'icon_anchor': '(0, 0)', 'html': '(\'<div style="font-size: 16pt">%s</div>\' % self.node_data.index[i])'}), '(icon_size=(-35, 75), icon_anchor=(0, 0), html=\n \'<div style="font-size: 16pt">%s</div>\' % self.node_data.index[i])\n', (4982, 5100), False, 'from folium.features import DivIcon\n')] |
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
import numpy as np
from scipy.misc import doccer
from ...stats import nonuniform
from ...auxiliary.array import normalize, nunique, accum
__all__ = ['markov']
_doc_default_callparams = """\
startprob : array_like
Start probabilities.
transmat : array_like
Transition matrix.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
}
# noinspection PyPep8Naming
class markov_gen(object):
"""Markov model.
The `startprob` keyword specifies the start probabilities for the model.
The `transmat` keyword specifies the transition probabilities the model
follows.
Methods
-------
score(x, startprob, transmat)
Log probability of the given data `x`.
sample(x, startprob, transmat, size=1)
Draw random samples from a Markov model.
fit(x)
Fits a Markov model from data via MLE or MAP.
Parameters
----------
%(_doc_default_callparams)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Markov model:
rv = normal_invwishart(startprob=None, transmat=None)
- Frozen object with the same methods but holding the given
start probabilities and transitions fixed.
Examples
--------
>>> from mlpy.stats.models import markov
>>> startprob = np.array([0.1, 0.4, 0.5])
>>> transmat = np.array([[0.3, 0.2, 0.5], [0.6, 0.3, 0.1], [0.1, 0.5, 0.4]])
>>> m = markov(startprob, transmat)
>>> m.sample(size=2)
[[2 2]]
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) <NAME> and <NAME>
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def __init__(self):
super(markov_gen, self).__init__()
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, startprob, transmat):
markov_frozen(startprob, transmat)
def score(self, x, startprob, transmat):
"""Log probability for a given data `x`.
Attributes
----------
x : ndarray
Data to evaluate.
%(_doc_default_callparams)s
Returns
-------
log_prob : float
The log probability of the data.
"""
log_transmat = np.log(transmat + np.finfo(float).eps)
log_startprob = np.log(startprob + np.finfo(float).eps)
log_prior = log_startprob[x[:, 0]]
n = x.shape[0]
nstates = log_startprob.shape[0]
logp = np.zeros(n)
for i in range(n):
njk = accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates), dtype=np.int32)
logp[i] = np.sum(njk * log_transmat)
return logp + log_prior
def sample(self, startprob, transmat, size=1):
"""Sample from a Markov model.
Attributes
----------
size: int
Defining number of sampled variates. Defaults to `1`.
Returns
-------
vals: ndarray
The sampled sequences of size (nseq, seqlen).
"""
if np.isscalar(size):
size = (1, size)
vals = np.zeros(size, dtype=np.int32)
nseq, seqlen = size
for i in range(nseq):
vals[i][0] = nonuniform.rvs(startprob)
for t in range(1, seqlen):
vals[i][t] = nonuniform.rvs(transmat[vals[i][t - 1]])
return vals
def fit(self, x):
"""Fit a Markov model from data via MLE or MAP.
Attributes
----------
x : ndarray[int]
Observed data
Returns
-------
%(_doc_default_callparams)s
"""
# TODO: allow to pass pseudo_counts as parameter?
nstates = nunique(x.ravel())
pi_pseudo_counts = np.ones(nstates)
transmat_pseudo_counts = np.ones((nstates, nstates))
n = x.shape[0]
startprob = normalize(np.bincount(x[:, 0])) + pi_pseudo_counts - 1
counts = np.zeros((nstates, nstates))
for i in range(n):
counts += accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates))
transmat = normalize(counts + transmat_pseudo_counts - 1, 1)
return startprob, transmat
markov = markov_gen()
# noinspection PyPep8Naming
class markov_frozen(object):
def __init__(self, startprob, transmat):
"""Create a "frozen" Markov model.
Parameters
----------
startprob : array_like
Start probabilities
transmat : array_like
Transition matrix
"""
self._model = markov_gen()
self.startprob = startprob
self.transmat = transmat
def score(self, x):
return self._model.score(x, self.startprob, self.transmat)
def sample(self, size=1):
return self._model.sample(self.startprob, self.transmat, size)
| [
"scipy.misc.doccer.docformat",
"six.moves.range",
"numpy.ones",
"numpy.isscalar",
"numpy.sum",
"numpy.zeros",
"numpy.vstack",
"numpy.finfo",
"numpy.bincount"
] | [((2310, 2356), 'scipy.misc.doccer.docformat', 'doccer.docformat', (['self.__doc__', 'docdict_params'], {}), '(self.__doc__, docdict_params)\n', (2326, 2356), False, 'from scipy.misc import doccer\n'), ((3032, 3043), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3040, 3043), True, 'import numpy as np\n'), ((3061, 3069), 'six.moves.range', 'range', (['n'], {}), '(n)\n', (3066, 3069), False, 'from six.moves import range\n'), ((3609, 3626), 'numpy.isscalar', 'np.isscalar', (['size'], {}), '(size)\n', (3620, 3626), True, 'import numpy as np\n'), ((3673, 3703), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.int32'}), '(size, dtype=np.int32)\n', (3681, 3703), True, 'import numpy as np\n'), ((3750, 3761), 'six.moves.range', 'range', (['nseq'], {}), '(nseq)\n', (3755, 3761), False, 'from six.moves import range\n'), ((4316, 4332), 'numpy.ones', 'np.ones', (['nstates'], {}), '(nstates)\n', (4323, 4332), True, 'import numpy as np\n'), ((4366, 4393), 'numpy.ones', 'np.ones', (['(nstates, nstates)'], {}), '((nstates, nstates))\n', (4373, 4393), True, 'import numpy as np\n'), ((4511, 4539), 'numpy.zeros', 'np.zeros', (['(nstates, nstates)'], {}), '((nstates, nstates))\n', (4519, 4539), True, 'import numpy as np\n'), ((4557, 4565), 'six.moves.range', 'range', (['n'], {}), '(n)\n', (4562, 4565), False, 'from six.moves import range\n'), ((3199, 3225), 'numpy.sum', 'np.sum', (['(njk * log_transmat)'], {}), '(njk * log_transmat)\n', (3205, 3225), True, 'import numpy as np\n'), ((3835, 3851), 'six.moves.range', 'range', (['(1)', 'seqlen'], {}), '(1, seqlen)\n', (3840, 3851), False, 'from six.moves import range\n'), ((2823, 2838), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2831, 2838), True, 'import numpy as np\n'), ((2887, 2902), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2895, 2902), True, 'import numpy as np\n'), ((3095, 3128), 'numpy.vstack', 'np.vstack', (['[x[i, 0:-1], x[i, 1:]]'], {}), '([x[i, 0:-1], x[i, 1:]])\n', (3104, 3128), True, 'import numpy as np\n'), ((4449, 4469), 'numpy.bincount', 'np.bincount', (['x[:, 0]'], {}), '(x[:, 0])\n', (4460, 4469), True, 'import numpy as np\n'), ((4595, 4628), 'numpy.vstack', 'np.vstack', (['[x[i, 0:-1], x[i, 1:]]'], {}), '([x[i, 0:-1], x[i, 1:]])\n', (4604, 4628), True, 'import numpy as np\n')] |
import numpy as np
from keras.models import Model
from keras.models import load_model, model_from_json
from os.path import join
import config.settings as cnst
import plots.plots as plots
from predict.predict import predict_byte, predict_byte_by_section
from predict.predict_args import DefaultPredictArguments, Predict as pObj
from .ati_args import SectionActivationDistribution
import pandas as pd
from analyzers.collect_exe_files import get_partition_data, store_partition_data
import gc
import logging
import pefile
def find_qualified_sections(sd, trend, common_trend, support, fold_index):
""" Function for training Tier-1 model with whole byte sequence data
Args:
sd: object to hold activation distribution of PE sections
trend: plain activation trend found by core ATI process
common_trend: not used here
support: not used here
fold_index: current fold index of cross validation
Returns:
q_sections_by_q_criteria: a dict with q_criterion found for each percentile supplied and
their respective list of sections qualified.
"""
btrend = trend.loc["BENIGN_ACTIVATION_MAGNITUDE"]
mtrend = trend.loc["MALWARE_ACTIVATION_MAGNITUDE"]
# Averaging based on respective benign and malware population
btrend = btrend / sd.b1_b_truth_count
mtrend = mtrend / sd.b1_m_truth_count
btrend[btrend == 0] = 1
mtrend[mtrend == 0] = 1
malfluence = mtrend / btrend
benfluence = btrend / mtrend
mal_q_criteria_by_percentiles = np.percentile(malfluence, q=cnst.PERCENTILES)
ben_q_criteria_by_percentiles = np.percentile(benfluence, q=cnst.PERCENTILES)
q_sections_by_q_criteria = {}
for i, _ in enumerate(cnst.PERCENTILES):
# Uncomment [:50] for unqualified sections. Set percentile to 48
q_sections_by_q_criteria[mal_q_criteria_by_percentiles[i]] = np.unique(np.concatenate([trend.columns[malfluence > mal_q_criteria_by_percentiles[i]], trend.columns[benfluence > ben_q_criteria_by_percentiles[i]]])) # [:50]
if i == 0: # Do once for lowest percentile
list_qsec = np.concatenate([trend.columns[malfluence > mal_q_criteria_by_percentiles[i]], trend.columns[benfluence > ben_q_criteria_by_percentiles[i]]]) # [:50]
list_avg_act_mag_signed = np.concatenate([malfluence[malfluence > mal_q_criteria_by_percentiles[i]] * -1, benfluence[benfluence > ben_q_criteria_by_percentiles[i]]]) # [:50]
available_sec = pd.read_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC + 'available_sections.csv', header=None)
available_sec = list(available_sec.iloc[0])
sec_emb = pd.read_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC + 'section_embeddings.csv')
list_qsec_id = []
list_qsec_emb = []
for q in list_qsec:
try:
list_qsec_emb.append(sec_emb[q][0])
list_qsec_id.append(available_sec.index(q) + 1)
except Exception as e:
if not (cnst.LEAK in str(e) or cnst.PADDING in str(e)):
logging.debug("The section ["+str(q)+"] is not present in available_sections.csv/section_embeddings.csv")
influence = np.concatenate([malfluence[malfluence > mal_q_criteria_by_percentiles[i]], benfluence[benfluence > ben_q_criteria_by_percentiles[i]]])
qdf = pd.DataFrame([list_qsec, list_qsec_id, list_qsec_emb, list_avg_act_mag_signed, influence], columns=list_qsec, index=['a', 'b', 'c', 'd', 'e'])
qdf = qdf.transpose().sort_values(by='e', ascending=False).transpose()
qdf.to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC + 'qsections_meta_'+str(fold_index)+'.csv', header=None, index=False)
# print("Mal Sections:", trend.columns[malfluence > mal_q_criteria_by_percentiles[i]])
# print("Ben Sections:", trend.columns[benfluence > ben_q_criteria_by_percentiles[i]])
logging.info("Qsections found - " + str(len(q_sections_by_q_criteria.keys())))
logging.info(q_sections_by_q_criteria.keys())
return q_sections_by_q_criteria
def parse_pe_pkl(file_index, file_id, fjson, unprocessed):
""" Function to parse pickle file to find the boundaries of PE sections in a sample's pickle representation
Args:
file_index: PE sample index
file_id: PE name
fjson: pickle data representation of PE sample
unprocessed: keeps track of count of samples not processed properly
Returns:
section_bounds: PE section boundaries
unprocessed: keeps track of count of samples not processed properly
file_byte_size: size of full sample
"""
section_bounds = []
file_byte_size = None
max_section_end_offset = 0
try:
# file_byte_size = fjson['size_byte']
with open(cnst.RAW_SAMPLE_DIR + file_id, 'rb') as f:
file_byte_size = len(list(f.read()))
pe = pefile.PE(cnst.RAW_SAMPLE_DIR + file_id)
for pkl_section in pe.sections:
section_bounds.append(
(pkl_section.Name.strip(b'\x00').decode("utf-8").strip(),
pkl_section.PointerToRawData,
pkl_section.PointerToRawData + pkl_section.SizeOfRawData))
if (pkl_section.PointerToRawData + pkl_section.SizeOfRawData) > max_section_end_offset:
max_section_end_offset = (pkl_section.PointerToRawData + pkl_section.SizeOfRawData)
# Placeholder section "padding" - for activations in padding region
# if max_section_end_offset < fjson["size_byte"]:
# section_bounds.append((cnst.TAIL, max_section_end_offset + 1, fjson["size_byte"]))
# section_bounds.append((cnst.PADDING, fjson["size_byte"] + 1, cnst.MAX_FILE_SIZE_LIMIT))
except Exception as e:
logging.Exception("parse failed . . . [FILE INDEX - " + str(file_index) + "] [" + str(file_id) + "] ")
unprocessed += 1
return section_bounds, unprocessed, file_byte_size
def map_act_to_sec(ftype, fmap, sbounds, sd):
"""
Function to map each hidden layer activation found to corresponding PE section
Params:
ftype: Benign or Malware
fmap: Hidden layer activation map
sbounds: Dict of PE sections and their boundaries
Return:
sd: Object to hold computed activation distribution of PE sections
Description of other variables/objects used:
section_support: Information about how many samples in a given category has a section <Influence by presence>
activation_histogram: Information about total count of activations occurred in a given section for all samples
of given category <Influence by activation count>
activation_magnitude: Information about total sum of magnitude of activations occurred in a given section
for all samples of given category <Influence by activation strength>
"""
# fmap = fmap // 1 # print("FEATURE MAP ", len(feature_map), " : \n", feature_map)
idx = np.argsort(fmap)[::-1][:len(fmap)] # Sort activations in descending order -- Helpful to find top activations
if sbounds is not None:
for j in range(0, len(sbounds)):
section = sbounds[j][0]
sd.a_section_support[section] = (
sd.a_section_support[section] + 1) if section in sd.a_section_support.keys() else 1
if ftype == cnst.BENIGN:
sd.b_section_support[section] = (
sd.b_section_support[section] + 1) if section in sd.b_section_support.keys() else 1
if section not in sd.m_section_support.keys():
sd.m_section_support[section] = 0
else:
if section not in sd.b_section_support.keys():
sd.b_section_support[section] = 0
sd.m_section_support[section] = (
sd.m_section_support[section] + 1) if section in sd.m_section_support.keys() else 1
for current_activation_window in range(0, len(fmap)): # range(0, int(cnst.MAX_FILE_SIZE_LIMIT / cnst.CONV_STRIDE_SIZE)):
section = None
offset = idx[current_activation_window] * cnst.CONV_WINDOW_SIZE
act_val = fmap[idx[current_activation_window]]
######################################################################################
# Change for Pooling layer based Activation trend - Only Max activation is traced back
if act_val == 0:
continue
######################################################################################
for j in range(0, len(sbounds)):
cur_section = sbounds[j]
if cur_section[1] <= offset <= cur_section[2]:
section = cur_section[0]
break
if section is not None:
# if "." not in section: section = "." + section #Same section's name with and without dot are different
# Sum of Magnitude of Activations
if section in sd.a_activation_magnitude.keys():
sd.a_activation_magnitude[section] += act_val
sd.a_activation_histogram[section] += 1
if ftype == cnst.BENIGN:
if sd.b_activation_magnitude[section] is None:
sd.b_activation_magnitude[section] = act_val
sd.b_activation_histogram[section] = 1
else:
sd.b_activation_magnitude[section] += act_val
sd.b_activation_histogram[section] += 1
else:
if sd.m_activation_magnitude[section] is None:
sd.m_activation_magnitude[section] = act_val
sd.m_activation_histogram[section] = 1
else:
sd.m_activation_magnitude[section] += act_val
sd.m_activation_histogram[section] += 1
else:
sd.a_activation_magnitude[section] = act_val
sd.a_activation_histogram[section] = 1
if ftype == cnst.BENIGN:
sd.b_activation_magnitude[section] = act_val
sd.b_activation_histogram[section] = 1
sd.m_activation_magnitude[section] = None
sd.m_activation_histogram[section] = None
else:
sd.b_activation_magnitude[section] = None
sd.b_activation_histogram[section] = None
sd.m_activation_magnitude[section] = act_val
sd.m_activation_histogram[section] = 1
else:
# !!! VERIFY ALL OFFSET IS MATCHED AND CHECK FOR LEAKAGE !!!
# print("No matching section found for OFFSET:", offset)
sd.a_activation_magnitude[cnst.LEAK] += act_val
sd.a_activation_histogram[cnst.LEAK] += 1
if ftype == cnst.BENIGN:
sd.b_activation_magnitude[cnst.LEAK] += act_val
sd.b_activation_histogram[cnst.LEAK] += 1
else:
sd.m_activation_magnitude[cnst.LEAK] += act_val
sd.m_activation_histogram[cnst.LEAK] += 1
return sd
def get_feature_maps(smodel, partition, files):
"""
Function to obtain hidden layer activation (feature) maps using given stunted model
Params:
smodel: stunted model to use
partition: partition for current set of B1 samples under process
files: IDs of the samples to be processed from the partition
Returns:
raw_feature_maps: hidden layer activation (feature) maps
"""
predict_args = DefaultPredictArguments()
predict_args.verbose = cnst.ATI_PREDICT_VERBOSE
xlen = len(files)
predict_args.pred_steps = xlen // predict_args.batch_size if xlen % predict_args.batch_size == 0 else xlen // predict_args.batch_size + 1
raw_feature_maps = predict_byte(smodel, files, predict_args)
return raw_feature_maps
def process_files(stunted_model, args, sd):
"""
Function to process the B1 samples to obtain hidden layer activation maps and trace back their PE sections
Params:
stunted_model: Tier-1 model that is stunted up to required hidden layer where activation maps are collected.
args: contains various config data
Returns:
sd: Object to hold computed activation distribution of PE sections
"""
unprocessed = 0
samplewise_feature_maps = []
files = args.t2_x_train
files_type = args.t2_y_train
logging.info("FMAP MODULE Total B1 [{0}]\tGroundTruth [{1}:{2}]".format(len(args.t2_y_train), len(np.where(args.t2_y_train == cnst.BENIGN)[0]), len(np.where(args.t2_y_train == cnst.MALWARE)[0])))
# file_type = pObj_fmap.ytrue[i] # Using Ground Truth to get trend of actual benign and malware files
# file_whole_bytes = {file[:-4]: args.whole_b1_train_partition[file[:-4]]}
raw_feature_maps = get_feature_maps(stunted_model, args.whole_b1_train_partition, files)
del args.whole_b1_train_partition
gc.collect()
for i in range(0, len(files)):
section_bounds, unprocessed, fsize = parse_pe_pkl(i, files[i], args.section_b1_train_partition[files[i]], unprocessed)
if cnst.USE_POOLING_LAYER:
try:
pooled_max_1D_map = np.sum(raw_feature_maps[i] == np.amax(raw_feature_maps[i], axis=0), axis=1)[:np.min([cnst.MAX_FILE_CONVOLUTED_SIZE,int(fsize/cnst.CONV_STRIDE_SIZE)+2])]
sd = map_act_to_sec(files_type[i], pooled_max_1D_map, section_bounds, sd)
except Exception as e:
logging.exception("$$$$$$$$ " + str(np.shape(raw_feature_maps[i]))) # .size, files[i], args.whole_b1_train_partition[files[i][:-4]])
else:
feature_map = raw_feature_maps[i].sum(axis=1).ravel()
# feature_map_histogram(feature_map, prediction)
samplewise_feature_maps.append(feature_map)
sd = map_act_to_sec(files_type[i], feature_map, section_bounds, sd)
del args.section_b1_train_partition
gc.collect()
return sd
# print(section_stat)
# print("Unprocessed file count: ", unprocessed)
# Find activation distribution
# raw_arr = np.array(np.squeeze(temp_feature_map_list))
# print(len(raw_arr), raw_arr.max())
# raw_arr = raw_arr[raw_arr > 0.3]
# print(len(raw_arr))
# plt.hist(raw_arr, 10)#range(0, len(raw_arr)))
# plt.show()
'''for key in act.keys():
# key = "."+key if "." not in key else key
if key is not None and key != '' and key != '.padding':
with open("BENIGN" if "benign" in section_stat_file else "MALWARE" + "_activation_" + key[1:] + ".csv", mode='a+') as f:
f.write(str(act[key]))
'''
'''
#overall_stat.append(section_stat)
for x in pcs_keys:
overall_stat_str += str(section_stat[x]) + ","
overall_stat_str = overall_stat_str[:-1] + "\n"
print("\n[Unprocessed Files : ", unprocessed, "] Overall Stats: ", overall_stat_str)
processed_file_count = len(fn_list) - unprocessed
normalized_stats_str = str(section_stat["header"]/processed_file_count) + "," \
+ str(section_stat["text"]/processed_file_count) + "," \
+ str(section_stat["data"]/processed_file_count) + "," \
+ str(section_stat["rsrc"]/processed_file_count) + "," \
+ str(section_stat["pdata"]/processed_file_count) + "," \
+ str(section_stat["rdata"]/processed_file_count) + "\n"
#+ str(section_stat["padding"]/processed_file_count) \
print("Normalized Stats: ", normalized_stats_str)
#plt.show()
with open(section_stat_file, 'w+') as f:
f.write(overall_stat_str)
f.write("\n")
f.write(normalized_stats_str)
'''
def change_model(model, new_input_shape=(None, cnst.SAMPLE_SIZE)):
""" Function to transfer weights of pre-trained Malconv to the block based model with reduced input shape.
Args:
model: An object with required parameters/hyper-parameters for loading, configuring and compiling
new_input_shape: a value <= Tier-1 model's input shape. Typically, ( Num of Conv. Filters * Size of Conv. Stride )
Returns:
new_model: new model with reduced input shape and weights updated
"""
model._layers[0].batch_input_shape = new_input_shape
new_model = model_from_json(model.to_json())
for layer in new_model.layers:
try:
layer.set_weights(model.get_layer(name=layer.name).get_weights())
# logging.info("Loaded and weights set for layer {}".format(layer.name))
except Exception as e:
logging.exception("Could not transfer weights for layer {}".format(layer.name))
return new_model
def get_stunted_model(args, tier):
""" Function to stunt the given model up to the required hidden layer
based on the supplied hidden layer number
"""
complete_model = load_model(join(args.save_path, args.t1_model_name if tier == 1 else args.t2_model_name))
complete_model = change_model(complete_model, new_input_shape=(None, cnst.SAMPLE_SIZE))
# model.summary()
# redefine model to output right after the sixth hidden layer
# (ReLU activation layer after convolution - before max pooling)
stunted_outputs = [complete_model.layers[x].output for x in [args.layer_num]]
# stunted_outputs = complete_model.get_layer('multiply_1').output
stunted_model = Model(inputs=complete_model.inputs, outputs=stunted_outputs)
# stunted_model.summary()
logging.debug("Model stunted upto " + str(stunted_outputs[0]) + " Layer number passed to stunt:" + str(args.layer_num))
return stunted_model
def save_activation_trend(sd):
"""
Function to save the various activation trends identified in CSV format files.
Params:
sd: Object containing computed activation distribution of PE sections
Returns:
fmaps_trend: used to identify the qualified sections in subsequent steps
others: Not in use currently
"""
fmaps_trend = pd.DataFrame()
fmaps_common_trend = pd.DataFrame()
fmaps_section_support = pd.DataFrame()
fmaps_trend["ACTIVATION / HISTOGRAM"] = ["ALL_ACTIVATION_MAGNITUDE", "BENIGN_ACTIVATION_MAGNITUDE",
"MALWARE_ACTIVATION_MAGNITUDE", "HISTOGRAM_ALL", "HISTOGRAM_BENIGN",
"HISTOGRAM_MALWARE"]
fmaps_common_trend["COMMON"] = ["ALL_ACTIVATION_MAGNITUDE", "BENIGN_ACTIVATION_MAGNITUDE",
"MALWARE_ACTIVATION_MAGNITUDE", "HISTOGRAM_ALL", "HISTOGRAM_BENIGN",
"HISTOGRAM_MALWARE"]
fmaps_section_support["SUPPORT"] = ["PRESENCE_IN_ALL", "PRESENCE_IN_BENIGN", "PRESENCE_IN_MALWARE",
"SUPPORT_IN_ALL", "SUPPORT_IN_BENIGN", "SUPPORT_IN_MALWARE"]
for key in sd.a_activation_histogram.keys():
fmaps_trend[key] = [int(sd.a_activation_magnitude[key]) if sd.a_activation_magnitude[key] is not None else
sd.a_activation_magnitude[key],
int(sd.b_activation_magnitude[key]) if sd.b_activation_magnitude[key] is not None else
sd.b_activation_magnitude[key],
int(sd.m_activation_magnitude[key]) if sd.m_activation_magnitude[key] is not None else
sd.m_activation_magnitude[key],
int(sd.a_activation_histogram[key]) if sd.a_activation_histogram[key] is not None else
sd.a_activation_histogram[key],
int(sd.b_activation_histogram[key]) if sd.b_activation_histogram[key] is not None else
sd.b_activation_histogram[key],
int(sd.m_activation_histogram[key]) if sd.m_activation_histogram[key] is not None else
sd.m_activation_histogram[key]]
if sd.b_activation_histogram[key] is not None and sd.m_activation_histogram[key] is not None:
fmaps_common_trend[key] = [
int(sd.a_activation_magnitude[key]) if sd.a_activation_magnitude[key] is not None else
sd.a_activation_magnitude[key],
int(sd.b_activation_magnitude[key]) if sd.b_activation_magnitude[key] is not None else
sd.b_activation_magnitude[key],
int(sd.m_activation_magnitude[key]) if sd.m_activation_magnitude[key] is not None else
sd.m_activation_magnitude[key],
int(sd.a_activation_histogram[key]) if sd.a_activation_histogram[key] is not None else
sd.a_activation_histogram[key],
int(sd.b_activation_histogram[key]) if sd.b_activation_histogram[key] is not None else
sd.b_activation_histogram[key],
int(sd.m_activation_histogram[key]) if sd.m_activation_histogram[key] is not None else
sd.m_activation_histogram[key]]
if sd.b1_count > 0 and sd.b1_b_truth_count > 0 and sd.b1_m_truth_count > 0:
for key in sd.a_section_support.keys():
fmaps_section_support[key] = [sd.a_section_support[key], sd.b_section_support[key],
sd.m_section_support[key],
"{:0.1f}%".format(sd.a_section_support[key] / sd.b1_count * 100),
"{:0.1f}%".format(sd.b_section_support[key] / sd.b1_b_truth_count * 100),
"{:0.1f}%".format(sd.m_section_support[key] / sd.b1_m_truth_count * 100)]
fmaps_trend.fillna(-1, inplace=True)
fmaps_trend.set_index('ACTIVATION / HISTOGRAM', inplace=True)
fmaps_common_trend.set_index('COMMON', inplace=True)
fmaps_section_support.set_index('SUPPORT', inplace=True)
# Store activation trend identified
fmaps_trend.to_csv(cnst.COMBINED_FEATURE_MAP_STATS_FILE, index=True)
fmaps_common_trend.to_csv(cnst.COMMON_COMBINED_FEATURE_MAP_STATS_FILE, index=True)
fmaps_section_support.to_csv(cnst.SECTION_SUPPORT, index=True)
# Drop padding and leak information after saving - not useful for further processing
try:
fmaps_trend.drop([cnst.PADDING], axis=1, inplace=True)
fmaps_common_trend.drop([cnst.PADDING], axis=1, inplace=True)
fmaps_section_support.drop([cnst.PADDING], axis=1, inplace=True)
fmaps_trend.drop([cnst.LEAK], axis=1, inplace=True)
fmaps_common_trend.drop([cnst.LEAK], axis=1, inplace=True)
except:
logging.info("Proceeding after trying to clean fmap data.")
return fmaps_trend, fmaps_common_trend, fmaps_section_support
def start_ati_process(args, fold_index, partition_count, sd):
"""
Function to perform the ATI process over all partitions of B1 training set
Params:
args: contains various config data
fold_index: current fold index of cross validation
partition_count: count of train B1 partitions
Returns:
sd: Object containing computed activation distribution of PE sections
"""
args.layer_num = cnst.LAYER_NUM_TO_STUNT
stunted_model = get_stunted_model(args, tier=1)
for pcount in range(0, partition_count):
logging.info("ATI for partition: %s", pcount)
b1datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "b1_train_" + str(fold_index) + "_p" + str(pcount) + ".csv", header=None)
args.t2_x_train, args.t2_y_train = b1datadf.iloc[:, 0], b1datadf.iloc[:, 1]
args.whole_b1_train_partition = get_partition_data("b1_train", fold_index, pcount, "t1")
args.section_b1_train_partition = get_partition_data("b1_train", fold_index, pcount, "t2")
sd = process_files(stunted_model, args, sd)
del stunted_model
gc.collect()
return sd
def get_top_act_blocks(top_acts_idx, sbounds, q_sections, whole_bytes):
"""
Function to map the top activation back to Qualified section's byte blocks and collating them to form block dataset
Params:
top_acts_idx: act as offsets of top activations in the hidden layer activation (feature) map
sbounds: Pe section boundaries
q_sections: qualified sections
whole_bytes: Entire byte content of a PE sample
Returns:
top_blocks: single sequence of all top blocks found
"""
top_blocks = []
top_acts_idx.sort()
if sbounds is not None:
for idx, offset in enumerate(top_acts_idx * cnst.CONV_STRIDE_SIZE):
for sname, low, upp in sbounds:
if low <= offset <= upp:
if sname in q_sections:
try:
top_blocks.extend(whole_bytes[offset:offset+cnst.CONV_STRIDE_SIZE])
break
except Exception as e:
logging.exception("[MODULE: get_section_id_vector()] Error occurred while mapping section id: %s %s %s %s %s %s",
idx, low, offset, upp, sname, sname in q_sections)
# else:
# print(sname, sname in q_sections, sname in available_sections)
else:
logging.info("Sections bounds not available. Returning a vector of Zeroes for section id vector.")
return top_blocks
def collect_b1_block_dataset(args, fold_index, partition_count, mode, qcnt='X'):
"""
Function to generate the top ativation blocks based dataset from B1 sample set
Params:
args: an object containing various config data
fold_index: current fold index of cross validation
partition_count: count of B1 train partitions
mode: phase of data collection - Train / Val / Test
qcnt: index of the current q_criterion. 'X' for Testing phase
Returns:
None (collected data is persisted directly to disk storage)
"""
args.layer_num = cnst.LAYER_NUM_TO_COLLECT_NN_DATASET
stunted_model = get_stunted_model(args, tier=cnst.TIER_TO_COLLECT_BLOCK_DATA)
for pcount in range(0, partition_count):
logging.info("Collecting Block data for partition: %s", pcount)
b1datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "b1_"+mode+"_" + str(fold_index) + "_p" + str(pcount) + ".csv", header=None)
files, files_type = b1datadf.iloc[:, 0], b1datadf.iloc[:, 1]
args.whole_b1_partition = get_partition_data("b1_"+mode, fold_index, pcount, "t1")
args.section_b1_partition = get_partition_data("b1_"+mode, fold_index, pcount, "t2")
unprocessed = 0
logging.info("Block Module Total B1 [{0}]\tGroundTruth [{1}:{2}]".format(len(files_type), len(np.where(files_type == cnst.BENIGN)[0]), len(np.where(files_type == cnst.MALWARE)[0])))
nn_predict_args = DefaultPredictArguments()
nn_predict_args.verbose = cnst.ATI_PREDICT_VERBOSE
raw_feature_maps = predict_byte_by_section(stunted_model, args.section_b1_partition, files, args.q_sections, None, nn_predict_args)
logging.info("Raw feature maps found.")
for i in range(0, len(files)):
section_bounds, unprocessed, fsize = parse_pe_pkl(i, files[i][:-4], args.section_b1_partition[files[i][:-4]], unprocessed)
if cnst.USE_POOLING_LAYER:
try:
cur_fmap = raw_feature_maps[i]
top_acts_idx = np.argmax(cur_fmap, axis=0)
top_blocks = get_top_act_blocks(top_acts_idx, section_bounds, args.q_sections, args.whole_b1_partition[files[i][:-4]]["whole_bytes"])
if sum(top_blocks) == 0:
logging.debug("No useful top block data added for sample " + files[i])
except Exception as e:
logging.exception("$$$$ Error occurred in Top Activation Block Module. $$$$")
args.whole_b1_partition[files[i][:-4]]["whole_bytes"] = top_blocks
store_partition_data("block_b1_"+mode, fold_index, pcount, "t1", args.whole_b1_partition)
del args.section_b1_partition
del args.whole_b1_partition
gc.collect()
del stunted_model
gc.collect()
def init(args, fold_index, partition_count, b1_all_file_cnt, b1b_all_truth_cnt, b1m_all_truth_cnt):
""" Activation Trend Identification (ATI) Module
Args:
args: various data required for ATI
fold_index: current fold of cross-validation
partition_count: number of partitions created for b1 training set
b1_all_file_cnt: count of samples in b1 set
b1b_all_truth_cnt: count of benign samples in b1 training set
b1m_all_truth_cnt: count of malware samples in b1 training set
Returns:
None (Resultant data are stored in CSV for further use)
"""
sd = SectionActivationDistribution()
sd.b1_count = b1_all_file_cnt
sd.b1_b_truth_count = b1b_all_truth_cnt
sd.b1_m_truth_count = b1m_all_truth_cnt
sd = start_ati_process(args, fold_index, partition_count, sd)
trend, common_trend, support = save_activation_trend(sd)
# select sections for Tier-2 based on identified activation trend
q_sections_by_q_criteria = find_qualified_sections(sd, trend, common_trend, support, fold_index)
# select, drop = plots.save_stats_as_plot(fmaps, qualification_criteria)
# Save qualified sections by Q_criteria
qdata = [np.concatenate([[str(q_criterion)], q_sections_by_q_criteria[q_criterion]]) for q_criterion in q_sections_by_q_criteria]
pd.DataFrame(qdata).to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + "out" + cnst.ESC + "result" + cnst.ESC + "qsections_by_qcriteria_" + str(fold_index) + ".csv", index=False, header=None)
return # select, drop
if __name__ == '__main__':
# start_visualization_process(args)
plots.save_stats_as_plot()
# pe = pefile.PE("D:\\08_Dataset\\benign\\git-gui.exe")
# parse_pe(0, "D:\\08_Dataset\\benign\\git-gui.exe", 204800, 0)
# for section in pe.sections:
# print(section)
# print(pe.OPTIONAL_HEADER, "\n", pe.NT_HEADERS, "\n", pe.FILE_HEADER, "\n", pe.RICH_HEADER, "\n", pe.DOS_HEADER,
# \"\n", pe.__IMAGE_DOS_HEADER_format__, "\n", pe.header, "\n", "LENGTH", len(pe.header))
'''def display_edit_distance():
sections = []
top_sections = []
malware_edit_distance = []
print("\n SECTION [EDIT DISTANCE SCORE]")
df = pd.read_csv(combined_stat_file)
df.set_index("type", inplace=True)
for i in range(0, len(keys)):
a = df.loc['FN'].values[i]
b = df.loc['BENIGN'].values[i]
c = df.loc['MALWARE'].values[i]
dist1 = norm(a-b) // 1
dist2 = norm(a-c) // 1
print(keys[i], dist1, dist2, "[MALWARE]" if dist2 < dist1 else "[BENIGN]", dist1 - dist2)
if dist2 < dist1:
malware_edit_distance.append(dist1 - dist2)
sections.append(keys[i])
idx = np.argsort(malware_edit_distance)[::-1]
for t in idx:
print("%10s" % sections[t], "%20s" % str(malware_edit_distance[t]))
top_sections.append(sections[t])
return top_sections[:3]
def ks(cutoff):
from scipy import stats
keys = ['header', 'text', 'data', 'rsrc', 'pdata', 'rdata']
for key in keys:
b = pd.read_csv('D:\\03_GitWorks\\echelon\\out\\result_multi\\benign.csv' + ".activation_" + key + ".csv", header=None)
m = pd.read_csv('D:\\03_GitWorks\\echelon\\out\\result_multi\\malware.csv' + ".activation_" + key + ".csv", header=None)
b = np.squeeze((b.get_values()))
m = np.squeeze((m.get_values()))
b = (b - b.min()) / (b.max() - b.min())
m = (m - m.min()) / (m.max() - m.min())
print(key, b.max(), len(b), len(b[b > cutoff]))
print(key, m.max(), len(m), len(m[m > cutoff]))
print("Section: ", key[:4], "\t\t", stats.ks_2samp(np.array(b), np.array(m)))
plt.hist(b[b > cutoff], 100)
plt.hist(m[m > cutoff], 100)
plt.legend(['benign', 'malware'])
plt.show()
# break
'''
| [
"predict.predict.predict_byte",
"logging.debug",
"pandas.read_csv",
"logging.exception",
"numpy.argsort",
"predict.predict.predict_byte_by_section",
"logging.info",
"pefile.PE",
"numpy.where",
"keras.models.Model",
"numpy.concatenate",
"analyzers.collect_exe_files.get_partition_data",
"panda... | [((1589, 1634), 'numpy.percentile', 'np.percentile', (['malfluence'], {'q': 'cnst.PERCENTILES'}), '(malfluence, q=cnst.PERCENTILES)\n', (1602, 1634), True, 'import numpy as np\n'), ((1671, 1716), 'numpy.percentile', 'np.percentile', (['benfluence'], {'q': 'cnst.PERCENTILES'}), '(benfluence, q=cnst.PERCENTILES)\n', (1684, 1716), True, 'import numpy as np\n'), ((12123, 12148), 'predict.predict_args.DefaultPredictArguments', 'DefaultPredictArguments', ([], {}), '()\n', (12146, 12148), False, 'from predict.predict_args import DefaultPredictArguments, Predict as pObj\n'), ((12390, 12431), 'predict.predict.predict_byte', 'predict_byte', (['smodel', 'files', 'predict_args'], {}), '(smodel, files, predict_args)\n', (12402, 12431), False, 'from predict.predict import predict_byte, predict_byte_by_section\n'), ((13533, 13545), 'gc.collect', 'gc.collect', ([], {}), '()\n', (13543, 13545), False, 'import gc\n'), ((14548, 14560), 'gc.collect', 'gc.collect', ([], {}), '()\n', (14558, 14560), False, 'import gc\n'), ((18082, 18142), 'keras.models.Model', 'Model', ([], {'inputs': 'complete_model.inputs', 'outputs': 'stunted_outputs'}), '(inputs=complete_model.inputs, outputs=stunted_outputs)\n', (18087, 18142), False, 'from keras.models import Model\n'), ((18719, 18733), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (18731, 18733), True, 'import pandas as pd\n'), ((18759, 18773), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (18771, 18773), True, 'import pandas as pd\n'), ((18802, 18816), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (18814, 18816), True, 'import pandas as pd\n'), ((24561, 24573), 'gc.collect', 'gc.collect', ([], {}), '()\n', (24571, 24573), False, 'import gc\n'), ((28942, 28954), 'gc.collect', 'gc.collect', ([], {}), '()\n', (28952, 28954), False, 'import gc\n'), ((30612, 30638), 'plots.plots.save_stats_as_plot', 'plots.save_stats_as_plot', ([], {}), '()\n', (30636, 30638), True, 'import plots.plots as plots\n'), ((5047, 5087), 'pefile.PE', 'pefile.PE', (['(cnst.RAW_SAMPLE_DIR + file_id)'], {}), '(cnst.RAW_SAMPLE_DIR + file_id)\n', (5056, 5087), False, 'import pefile\n'), ((17581, 17658), 'os.path.join', 'join', (['args.save_path', '(args.t1_model_name if tier == 1 else args.t2_model_name)'], {}), '(args.save_path, args.t1_model_name if tier == 1 else args.t2_model_name)\n', (17585, 17658), False, 'from os.path import join\n'), ((24016, 24061), 'logging.info', 'logging.info', (['"""ATI for partition: %s"""', 'pcount'], {}), "('ATI for partition: %s', pcount)\n", (24028, 24061), False, 'import logging\n'), ((24326, 24382), 'analyzers.collect_exe_files.get_partition_data', 'get_partition_data', (['"""b1_train"""', 'fold_index', 'pcount', '"""t1"""'], {}), "('b1_train', fold_index, pcount, 't1')\n", (24344, 24382), False, 'from analyzers.collect_exe_files import get_partition_data, store_partition_data\n'), ((24425, 24481), 'analyzers.collect_exe_files.get_partition_data', 'get_partition_data', (['"""b1_train"""', 'fold_index', 'pcount', '"""t2"""'], {}), "('b1_train', fold_index, pcount, 't2')\n", (24443, 24481), False, 'from analyzers.collect_exe_files import get_partition_data, store_partition_data\n'), ((26000, 26108), 'logging.info', 'logging.info', (['"""Sections bounds not available. Returning a vector of Zeroes for section id vector."""'], {}), "(\n 'Sections bounds not available. Returning a vector of Zeroes for section id vector.'\n )\n", (26012, 26108), False, 'import logging\n'), ((26892, 26955), 'logging.info', 'logging.info', (['"""Collecting Block data for partition: %s"""', 'pcount'], {}), "('Collecting Block data for partition: %s', pcount)\n", (26904, 26955), False, 'import logging\n'), ((27202, 27260), 'analyzers.collect_exe_files.get_partition_data', 'get_partition_data', (["('b1_' + mode)", 'fold_index', 'pcount', '"""t1"""'], {}), "('b1_' + mode, fold_index, pcount, 't1')\n", (27220, 27260), False, 'from analyzers.collect_exe_files import get_partition_data, store_partition_data\n'), ((27295, 27353), 'analyzers.collect_exe_files.get_partition_data', 'get_partition_data', (["('b1_' + mode)", 'fold_index', 'pcount', '"""t2"""'], {}), "('b1_' + mode, fold_index, pcount, 't2')\n", (27313, 27353), False, 'from analyzers.collect_exe_files import get_partition_data, store_partition_data\n'), ((27592, 27617), 'predict.predict_args.DefaultPredictArguments', 'DefaultPredictArguments', ([], {}), '()\n', (27615, 27617), False, 'from predict.predict_args import DefaultPredictArguments, Predict as pObj\n'), ((27704, 27820), 'predict.predict.predict_byte_by_section', 'predict_byte_by_section', (['stunted_model', 'args.section_b1_partition', 'files', 'args.q_sections', 'None', 'nn_predict_args'], {}), '(stunted_model, args.section_b1_partition, files,\n args.q_sections, None, nn_predict_args)\n', (27727, 27820), False, 'from predict.predict import predict_byte, predict_byte_by_section\n'), ((27825, 27864), 'logging.info', 'logging.info', (['"""Raw feature maps found."""'], {}), "('Raw feature maps found.')\n", (27837, 27864), False, 'import logging\n'), ((28731, 28827), 'analyzers.collect_exe_files.store_partition_data', 'store_partition_data', (["('block_b1_' + mode)", 'fold_index', 'pcount', '"""t1"""', 'args.whole_b1_partition'], {}), "('block_b1_' + mode, fold_index, pcount, 't1', args.\n whole_b1_partition)\n", (28751, 28827), False, 'from analyzers.collect_exe_files import get_partition_data, store_partition_data\n'), ((28903, 28915), 'gc.collect', 'gc.collect', ([], {}), '()\n', (28913, 28915), False, 'import gc\n'), ((1949, 2094), 'numpy.concatenate', 'np.concatenate', (['[trend.columns[malfluence > mal_q_criteria_by_percentiles[i]], trend.\n columns[benfluence > ben_q_criteria_by_percentiles[i]]]'], {}), '([trend.columns[malfluence > mal_q_criteria_by_percentiles[i]\n ], trend.columns[benfluence > ben_q_criteria_by_percentiles[i]]])\n', (1963, 2094), True, 'import numpy as np\n'), ((2176, 2321), 'numpy.concatenate', 'np.concatenate', (['[trend.columns[malfluence > mal_q_criteria_by_percentiles[i]], trend.\n columns[benfluence > ben_q_criteria_by_percentiles[i]]]'], {}), '([trend.columns[malfluence > mal_q_criteria_by_percentiles[i]\n ], trend.columns[benfluence > ben_q_criteria_by_percentiles[i]]])\n', (2190, 2321), True, 'import numpy as np\n'), ((2364, 2507), 'numpy.concatenate', 'np.concatenate', (['[malfluence[malfluence > mal_q_criteria_by_percentiles[i]] * -1, benfluence\n [benfluence > ben_q_criteria_by_percentiles[i]]]'], {}), '([malfluence[malfluence > mal_q_criteria_by_percentiles[i]] *\n -1, benfluence[benfluence > ben_q_criteria_by_percentiles[i]]])\n', (2378, 2507), True, 'import numpy as np\n'), ((2542, 2652), 'pandas.read_csv', 'pd.read_csv', (["(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC +\n 'available_sections.csv')"], {'header': 'None'}), "(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC +\n 'available_sections.csv', header=None)\n", (2553, 2652), True, 'import pandas as pd\n'), ((2727, 2824), 'pandas.read_csv', 'pd.read_csv', (["(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC +\n 'section_embeddings.csv')"], {}), "(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC +\n 'section_embeddings.csv')\n", (2738, 2824), True, 'import pandas as pd\n'), ((3330, 3468), 'numpy.concatenate', 'np.concatenate', (['[malfluence[malfluence > mal_q_criteria_by_percentiles[i]], benfluence[\n benfluence > ben_q_criteria_by_percentiles[i]]]'], {}), '([malfluence[malfluence > mal_q_criteria_by_percentiles[i]],\n benfluence[benfluence > ben_q_criteria_by_percentiles[i]]])\n', (3344, 3468), True, 'import numpy as np\n'), ((3483, 3633), 'pandas.DataFrame', 'pd.DataFrame', (['[list_qsec, list_qsec_id, list_qsec_emb, list_avg_act_mag_signed, influence]'], {'columns': 'list_qsec', 'index': "['a', 'b', 'c', 'd', 'e']"}), "([list_qsec, list_qsec_id, list_qsec_emb,\n list_avg_act_mag_signed, influence], columns=list_qsec, index=['a', 'b',\n 'c', 'd', 'e'])\n", (3495, 3633), True, 'import pandas as pd\n'), ((7222, 7238), 'numpy.argsort', 'np.argsort', (['fmap'], {}), '(fmap)\n', (7232, 7238), True, 'import numpy as np\n'), ((23294, 23353), 'logging.info', 'logging.info', (['"""Proceeding after trying to clean fmap data."""'], {}), "('Proceeding after trying to clean fmap data.')\n", (23306, 23353), False, 'import logging\n'), ((30328, 30347), 'pandas.DataFrame', 'pd.DataFrame', (['qdata'], {}), '(qdata)\n', (30340, 30347), True, 'import pandas as pd\n'), ((13113, 13153), 'numpy.where', 'np.where', (['(args.t2_y_train == cnst.BENIGN)'], {}), '(args.t2_y_train == cnst.BENIGN)\n', (13121, 13153), True, 'import numpy as np\n'), ((13163, 13204), 'numpy.where', 'np.where', (['(args.t2_y_train == cnst.MALWARE)'], {}), '(args.t2_y_train == cnst.MALWARE)\n', (13171, 13204), True, 'import numpy as np\n'), ((28185, 28212), 'numpy.argmax', 'np.argmax', (['cur_fmap'], {'axis': '(0)'}), '(cur_fmap, axis=0)\n', (28194, 28212), True, 'import numpy as np\n'), ((27478, 27513), 'numpy.where', 'np.where', (['(files_type == cnst.BENIGN)'], {}), '(files_type == cnst.BENIGN)\n', (27486, 27513), True, 'import numpy as np\n'), ((27523, 27559), 'numpy.where', 'np.where', (['(files_type == cnst.MALWARE)'], {}), '(files_type == cnst.MALWARE)\n', (27531, 27559), True, 'import numpy as np\n'), ((28436, 28506), 'logging.debug', 'logging.debug', (["('No useful top block data added for sample ' + files[i])"], {}), "('No useful top block data added for sample ' + files[i])\n", (28449, 28506), False, 'import logging\n'), ((28566, 28643), 'logging.exception', 'logging.exception', (['"""$$$$ Error occurred in Top Activation Block Module. $$$$"""'], {}), "('$$$$ Error occurred in Top Activation Block Module. $$$$')\n", (28583, 28643), False, 'import logging\n'), ((13827, 13863), 'numpy.amax', 'np.amax', (['raw_feature_maps[i]'], {'axis': '(0)'}), '(raw_feature_maps[i], axis=0)\n', (13834, 13863), True, 'import numpy as np\n'), ((14128, 14157), 'numpy.shape', 'np.shape', (['raw_feature_maps[i]'], {}), '(raw_feature_maps[i])\n', (14136, 14157), True, 'import numpy as np\n'), ((25662, 25841), 'logging.exception', 'logging.exception', (['"""[MODULE: get_section_id_vector()] Error occurred while mapping section id: %s %s %s %s %s %s"""', 'idx', 'low', 'offset', 'upp', 'sname', '(sname in q_sections)'], {}), "(\n '[MODULE: get_section_id_vector()] Error occurred while mapping section id: %s %s %s %s %s %s'\n , idx, low, offset, upp, sname, sname in q_sections)\n", (25679, 25841), False, 'import logging\n')] |
import numpy as np
from scipy.sparse import lil_matrix
import scipy.sparse.linalg as sp
import scipy.sparse as sparse
import math
import csv
import matplotlib.pyplot as plt
def linear_powerflow_model(Y00,Y01,Y10,Y11_inv,I_coeff,V1,slack_no):
# voltage linearlization
V1_conj = np.conj(V1[slack_no:])
V1_conj_inv = 1 / V1_conj
coeff_V = Y11_inv * V1_conj_inv
coeff_V_P = coeff_V
coeff_V_Q = -1j*coeff_V
coeff_Vm = -np.dot(Y11_inv,np.dot(Y10,V1[:slack_no]))
# voltage magnitude linearization
m = coeff_Vm
m_inv = 1 / coeff_Vm
coeff_Vmag_k = abs(m)
A = (np.multiply(coeff_V.transpose(),m_inv)).transpose()
coeff_Vmag_P = (np.multiply(A.real.transpose(),coeff_Vmag_k)).transpose()
coeff_Vmag_Q = (np.multiply((-1j*A).real.transpose(),coeff_Vmag_k)).transpose()
# current linearization
if len(I_coeff):
coeff_I_P = np.dot(I_coeff[:,slack_no:],coeff_V_P)
coeff_I_Q = np.dot(I_coeff[:,slack_no:],coeff_V_Q)
coeff_I_const = np.dot(I_coeff[:,slack_no:],coeff_Vm) + np.dot(I_coeff[:,:slack_no],V1[:slack_no])
else:
coeff_I_P = []
coeff_I_Q = []
coeff_I_const = []
#=========================================Yiyun's Notes===========================================#
# Output relations: Vmag = coeff_Vmag_P * Pnode + coeff_Vmag_Q * Qnode + coeff_Vm
# I = coeff_I_P * Pnode + coeff_I_Q * Qnode + coeff_I_const (complex value)
# ================================================================================================#
return coeff_V_P, coeff_V_Q, coeff_Vm, coeff_Vmag_P, coeff_Vmag_Q, coeff_Vmag_k, coeff_I_P, coeff_I_Q, coeff_I_const
def validate_linear_model(coeff_Vp,coeff_Vq,coeff_Vm,PQ_node,slack_number):
V_cal = coeff_Vm + np.dot(coeff_Vp,np.array([np.real(ii)*1000 for ii in PQ_node[slack_number:]])) + np.dot(coeff_Vq,np.array([np.imag(ii)*1000 for ii in PQ_node[slack_number:]]))
v_cal_1 = coeff_Vm + np.dot(coeff_Vp,np.conj(PQ_node[slack_number:]*1000))
#coeff_Vp*Pnode + coeff_Vq*Qnode + coeff_Vm
# =========================================Yiyun's Notes===========================================#
# 1000 should be the S base
# =================================================================================================#
return [V_cal,v_cal_1]
def check_VI_correct(V1,PQ_node,slack_number,coeff_V,coeff_Vm,coeff_Vmag_P,coeff_Vmag_Q,coeff_Vmag_k,Y10,Y11,coeff_I_P, coeff_I_Q, coeff_I_const,I_coeff):
V1_linear = np.dot(coeff_V,np.conj(PQ_node[slack_number:]*1000)) + coeff_Vm
V1_linear = list(V1_linear)
Vdiff = list(map(lambda x: abs(x[0]-x[1])/abs(x[0])*100,zip(V1[slack_number:],V1_linear)))
print(sum(Vdiff))
with open('voltage_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Vdiff)
f.close()
V1_mag_linear = np.dot(coeff_Vmag_P,(PQ_node[slack_number:]*1000).real) + np.dot(coeff_Vmag_Q,(PQ_node[slack_number:]*1000).imag) + coeff_Vmag_k
V1_mag_linear = list(V1_mag_linear)
Vdiff = list(map(lambda x: abs(abs(x[0])-x[1])/abs(x[0])*100,zip(V1[slack_number:],V1_mag_linear)))
print(sum(Vdiff))
with open('voltageMag_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Vdiff)
f.close()
# get Ibus
Ibus = list(map(lambda x: (x[0]*1000/x[1]).conjugate(),zip(list(PQ_node)[slack_number:],V1[slack_number:])))
Ibus_cal_0 = np.dot(Y10,V1[0:slack_number])
Ibus_cal_1 = np.dot(Y11,V1[slack_number:])
Ibus_cal = list(map(lambda x: x[0]+x[1],zip(Ibus_cal_0,Ibus_cal_1)))
Idiff = list(map(lambda x: abs(x[0]-x[1]),zip(Ibus,Ibus_cal)))
print(sum(Idiff))
with open('currentBus_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Idiff)
f.close()
# get Ibranch
Ibranch = np.dot(I_coeff,V1)
Ibranch_cal = np.dot(I_coeff[:,slack_number:],V1_linear)+np.dot(I_coeff[:,0:slack_number],V1[:slack_number])
Ibranch_diff = list(map(lambda x: abs(x[0]-x[1]),zip(Ibranch,Ibranch_cal)))
print(sum(Ibranch_diff))
with open('current_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Ibranch_diff)
f.close()
def costFun(x,dual_upper,dual_lower,v1_pu,Ppv_max,coeff_p,coeff_q,NPV,control_bus_index,Vupper,Vlower,dual_current,ThermalLimit,I1_mag):
# cost_function = coeff_p*(Pmax-P)^2+coeff_q*Q^2+dual_upper*(v1-1.05)+dual_lower*(0.95-v1)
f1 = 0
for ii in range(NPV):
f1 = f1 + coeff_p*(Ppv_max[ii]-x[ii])*(Ppv_max[ii]-x[ii])+coeff_q*x[ii+NPV]*x[ii+NPV]
#f = f1 + np.dot(dual_upper,(np.array(v1_pu)[control_bus_index]-Vupper)) + np.dot(dual_lower,(Vlower-np.array(v1_pu)[control_bus_index]))
v_evaluate = [v1_pu[ii] for ii in control_bus_index]
f2 = f1 + np.dot(dual_upper,np.array([max(ii-Vupper,0) for ii in v_evaluate])) + np.dot(dual_lower,np.array([max(Vlower-ii,0) for ii in v_evaluate]))
f3 = np.dot(dual_current,np.array([max(ii,0) for ii in list(map(lambda x: x[0]*x[0]-x[1]*x[1],zip(I1_mag,ThermalLimit)))]))
f = f2+f3
# =========================================Yiyun's Notes===========================================#
# f1 is the quadratic PV curtailment plus quadratic reactive power injection
# f2 is the Lagrangian term for voltage violations and line current violations
# ===> Note the "control_bus_index" might be the index for measurement sensitivity analysis
# =================================================================================================#
return [f1,f]
def PV_costFun_gradient(x, coeff_p, coeff_q, Pmax):
grad = np.zeros(len(x))
for ii in range(int(len(x)/2)):
grad[ii] = -2*coeff_p*(Pmax[ii]*1000-x[ii]*1000)
grad[ii+int(len(x)/2)] = 2*coeff_q*x[ii+int(len(x)/2)]*1000
#grad[ii + int(len(x) / 2)] = 0
# =========================================Yiyun's Notes===========================================#
# x is the decision vector [P,Q]
# =================================================================================================#
return grad
def voltage_constraint_gradient(AllNodeNames,node_withPV, dual_upper, dual_lower, coeff_Vmag_p, coeff_Vmag_q):
node_noslackbus = AllNodeNames
node_noslackbus[0:3] = []
# =========================================Yiyun's Notes===========================================#
# remove the slack bus
# =================================================================================================#
grad_upper = np.matrix([0] * len(node_noslackbus)*2).transpose()
grad_lower = np.matrix([0] * len(node_noslackbus)*2).transpose()
count = 0
for node in node_noslackbus:
if node in node_withPV:
grad_upper[count] = dual_upper.transpose()*coeff_Vmag_p[:,count]
grad_upper[count+len(node_noslackbus)] = dual_upper.transpose() * coeff_Vmag_q[:,count]
grad_lower[count] = -dual_lower.transpose() * coeff_Vmag_p[:, count]
grad_lower[count + len(node_noslackbus)] = -dual_lower.transpose() * coeff_Vmag_q[:, count]
count = count + 1
return [grad_upper,grad_lower]
def current_constraint_gradient(AllNodeNames,node_withPV, dual_upper,coeff_Imag_p, coeff_Imag_q):
node_noslackbus = AllNodeNames
node_noslackbus[0:3] = []
grad_upper = np.matrix([0] * len(node_noslackbus)*2).transpose()
count = 0
for node in node_noslackbus:
if node in node_withPV:
grad_upper[count] = dual_upper.transpose()*coeff_Imag_p[:,count]
grad_upper[count+len(node_noslackbus)] = dual_upper.transpose() * coeff_Imag_q[:,count]
count = count + 1
return grad_upper
# =========================================Yiyun's Notes===========================================#
# PV_costFun_gradient, voltage_constraint_gradient, current_constraint_gradient and project_PV..
# ... are set up for updating the PV decision variables in eq(10)
# =================================================================================================#
def voltage_constraint(V1_mag):
g = V1_mag-1.05
g.append(0.95-V1_mag)
return g
def current_constraint(I1_mag,Imax):
g = []
g.append(I1_mag-Imax)
# =========================================Yiyun's Notes===========================================#
# assume single directional power flow
# voltage_constraint, current_constraint, and project_dualvariable are set up for updating the dual...
# ... variables in eq (11)
# =================================================================================================#
return g
def project_dualvariable(mu):
for ii in range(len(mu)):
mu[ii] = max(mu[ii],0)
# =========================================Yiyun's Notes===========================================#
# If the corresponding constraints in primal problem is in canonical form, then dual variable is >=0
# =================================================================================================#
return mu
def project_PV(x,Pmax,Sinv):
Qavailable = 0
Pavailable = 0
num = len(Sinv)
for ii in range(num):
if x[ii] > Pmax[ii]:
x[ii] = Pmax[ii]
elif x[ii] < 0:
x[ii] = 0
if Sinv[ii] > x[ii]:
Qmax = math.sqrt(Sinv[ii]*Sinv[ii]-x[ii]*x[ii])
else:
Qmax = 0
if x[ii+num] > Qmax:
x[ii+num] = Qmax
# elif x[ii + num] < 0:
# x[ii + num] = 0
elif x[ii+num] < -Qmax:
x[ii+num] = -Qmax
Pavailable = Pavailable + Pmax[ii]
Qavailable = Qavailable + Qmax
return [x,Pavailable,Qavailable]
def dual_update(mu,coeff_mu,constraint):
mu_new = mu + coeff_mu*constraint
mu_new = project_dualvariable(mu_new)
# =========================================Yiyun's Notes===========================================#
# normal way for update Lagrangian variable is by the sub-gradient of cost function
# Here is the equation (11) in the draft paper
# =================================================================================================#
return mu_new
def matrix_cal_for_subPower(V0, Y00, Y01, Y11, V1_noload):
diag_V0 = np.matrix([[complex(0, 0)] * 3] * 3)
diag_V0[0, 0] = V0[0]
diag_V0[1, 1] = V0[1]
diag_V0[2, 2] = V0[2]
K = diag_V0 * Y01.conj() * np.linalg.inv(Y11.conj())
g = diag_V0 * Y00.conj() * np.matrix(V0).transpose().conj() + diag_V0 * Y01.conj() * V1_noload.conj()
return[K,g]
def subPower_PQ(V1, PQ_node, K, g):
diag_V1 = np.matrix([[complex(0, 0)] * len(V1)] * len(V1))
for ii in range(len(V1)):
diag_V1[ii, ii] = V1[ii]
M = K * np.linalg.inv(diag_V1)
MR = M.real
MI = M.imag
P0 = g.real + (MR.dot(PQ_node.real)*1000 - MI.dot(PQ_node.imag)*1000)
Q0 = g.imag + (MR.dot(PQ_node.imag)*1000 + MI.dot(PQ_node.real)*1000)
P0 = P0/1000
Q0 = Q0/1000 # convert to kW/kVar
# =========================================Yiyun's Notes===========================================#
# Power injection at substation/feeder head
# =================================================================================================#
return [P0, Q0, M]
def sub_costFun_gradient(x, sub_ref, coeff_sub, sub_measure, M, node_withPV):
grad_a = np.matrix([0] * len(x)).transpose()
grad_b = np.matrix([0] * len(x)).transpose()
grad_c = np.matrix([0] * len(x)).transpose()
MR = M.real
MI = M.imag
count = 0
for node in node_withPV:
grad_a[count] = -MR[0, int(node)]
grad_b[count] = -MR[1, int(node)]
grad_c[count] = -MR[2, int(node)]
grad_a[count + len(node_withPV)] = MI[0, int(node)]
grad_b[count + len(node_withPV)] = MI[1, int(node)]
grad_c[count + len(node_withPV)] = MI[2, int(node)]
count = count + 1
res = coeff_sub * ((sub_measure[0] - sub_ref[0]) *1000* grad_a + (sub_measure[1] - sub_ref[1])*1000 * grad_b
+ (sub_measure[2] - sub_ref[2])*1000 * grad_c)
res = res/1000
return res
def projection(x,xmax,xmin):
for ii in range(len(x)):
if x.item(ii) > xmax[ii]:
x[ii] = xmax[ii]
if x.item(ii) < xmin[ii]:
x[ii] = xmin[ii]
return x
class DERMS:
def __init__(self, pvData,controlbus,controlelem,controlelem_limit,sub_node_names,sub_elem_names):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PV_name: names of all PVs in the zone
# PV_size: sizes of all PVs in the zone
# PV_location: busnames of all PVs in the zone
# controlbus: names of all controlled nodes
# sub_node_names: names of all nodes in the zone
# sub_node_names "include" controlbus
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.PV_name = pvData["pvName"]
self.PV_location = pvData["pvLocation"]
self.PV_size = pvData["pvSize"]
self.inverter_size = pvData["inverterSize"]
self.control_bus = controlbus
sub_node_names = [ii.upper() for ii in sub_node_names]
self.controlbus_index = [sub_node_names.index(ii.upper()) for ii in controlbus] # control bus index in the sub system (number)
# here
PVbus_index = []
for bus in self.PV_location:
temp = bus.split('.')
if len(temp) == 1:
temp = temp + ['1', '2', '3']
for ii in range(len(temp) - 1):
PVbus_index.append(sub_node_names.index((temp[0] + '.' + temp[ii + 1]).upper()))
# =========================================Yiyun's Notes===========================================#
# adding .1 .2 .3 following the number to recognize the three phases.
# =================================================================================================#
self.PVbus_index = PVbus_index
self.control_elem = controlelem
self.controlelem_limit = controlelem_limit
self.controlelem_index = [sub_elem_names.index(ii) for ii in controlelem] # control branches index in the sub system (number)
def monitor(self, dss, dssObjects, PVSystem_1phase):
PVpowers = []
for pv in PVSystem_1phase["Name"].tolist():
nPhases = dssObjects["Generators"][pv].GetValue("phases")
power = dssObjects["Generators"][pv].GetValue("Powers")
PVpowers.append([sum(power[::2])/nPhases, sum(power[1::2])/nPhases])
PVpowers = np.asarray(PVpowers)
Vmes = []
for bus in self.control_bus:
busName = bus.split('.')[0].lower()
Vmag = dssObjects["Buses"][busName].GetValue("puVmagAngle")[::2]
allbusnode = dss.Bus.Nodes()
phase = bus.split('.')[1]
index = allbusnode.index(int(phase))
Vnode = Vmag[index]
Vmes.append(Vnode)
Imes = []
for elem in self.control_elem:
className = elem.split('.')[0] + "s"
I = dssObjects[className][elem].GetValue("CurrentsMagAng")[::2][:3] #TODO: Why is there a hardcoded [:3] ?
Imes.append(I)
return [self.PV_location,PVpowers,Vmes,Imes]
def control(self, linear_PF_coeff, Options,stepsize,mu0,Vlimit,PVpower,Imes,Vmes,PV_Pmax_forecast):
coeff_p = Options["coeff_p"]
coeff_q = Options["coeff_q"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# linear_PF_coeff is the linear power flow model coefficients for the zone, and linear power flow model
# coefficients are the result vector from function "linear_powerflow_model"
# coeff_p, coeff_q are constant coefficients in PV cost function
# stepsize is a vector of stepsize constants
# mu0 is the dual variable from last time step: mu_Vmag_upper0, mu_Vmag_lower0, mu_I0
# Vlimit is the allowed voltage limit: Vupper and Vlower
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PVname = self.PV_name
NPV = len(PVname)
x0 = np.zeros(2 * NPV)
for ii in range(NPV):
x0[ii] = -PVpower[ii][0] # in kW
x0[ii + NPV] = -PVpower[ii][1] # in kVar
#coeff_V_P = linear_PF_coeff[0]
#coeff_V_Q = linear_PF_coeff[1]
#coeff_Vm = linear_PF_coeff[2]
coeff_Vmag_P = linear_PF_coeff[3]
coeff_Vmag_Q = linear_PF_coeff[4]
#coeff_Vmag_k = linear_PF_coeff[5]
coeff_I_P = linear_PF_coeff[6]
coeff_I_Q = linear_PF_coeff[7]
#coeff_I_const = linear_PF_coeff[8]
stepsize_xp = stepsize[0]
stepsize_xq = stepsize[1]
stepsize_mu = stepsize[2]
Vupper = Vlimit[0]
Vlower = Vlimit[1]
controlbus_index = self.controlbus_index
PVbus_index = self.PVbus_index
controlelem_index = self.controlelem_index
PV_inverter_size = self.inverter_size
Imes_limit = self.controlelem_limit
mu_Vmag_upper0 = mu0[0]
mu_Vmag_lower0 = mu0[1]
mu_I0 = mu0[2]
#print([max(mu_Vmag_upper0),max(mu_Vmag_lower0)])
# compute gradient
PVcost_fun_gradient = PV_costFun_gradient(x0, coeff_p, coeff_q, PV_Pmax_forecast)
Vmag_upper_gradient = np.concatenate((np.dot(coeff_Vmag_P[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_upper0),
np.dot(coeff_Vmag_Q[np.ix_([ii for ii in controlbus_index], [ii for ii in PVbus_index])].transpose(), mu_Vmag_upper0)),axis=0)
Vmag_lower_gradient = np.concatenate((np.dot(coeff_Vmag_P[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_lower0),
np.dot(coeff_Vmag_Q[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_lower0)),axis=0)
Vmag_gradient = Vmag_upper_gradient - Vmag_lower_gradient
if len(mu_I0)>0 :
temp_real = mu_I0 * np.array(Imes.real)
temp_imag = mu_I0 * np.array(Imes.imag)
I_gradient_real = np.concatenate((np.dot(
coeff_I_P[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].real.transpose(),
temp_real), np.dot(
coeff_I_Q[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].real.transpose(),
temp_real)), axis=0)
I_gradient_imag = np.concatenate((np.dot(
coeff_I_P[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].imag.transpose(),
temp_imag), np.dot(
coeff_I_Q[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].imag.transpose(),
temp_imag)), axis=0)
I_gradient = 2 * I_gradient_real + 2 * I_gradient_imag
else:
I_gradient = 0
gradient = PVcost_fun_gradient + Vmag_gradient + I_gradient / 1000
# compute x1, mu1
x1 = np.concatenate([x0[:NPV] - stepsize_xp * gradient[:NPV], x0[NPV:] - stepsize_xq * gradient[NPV:]])
#print('solved: '+str(sum(x1[0:NPV]))+','+str(sum(x1[NPV:]))) # in kW/kVar
[x1, Pmax_allPV, Qmax_allPV] = project_PV(x1, PV_Pmax_forecast, PV_inverter_size)
#print('Available P = '+str(Pmax_allPV)+' , Available Q = '+str(Qmax_allPV))
#print('projected: ' + str(sum(x1[0:NPV])) + ',' + str(sum(x1[NPV:]))) # in kW/kVar
x1 = np.array([round(ii, 5) for ii in x1])
mu_Vmag_lower1 = mu_Vmag_lower0 + stepsize_mu * (Vlower - np.array(Vmes))
mu_Vmag_upper1 = mu_Vmag_upper0 + stepsize_mu * (np.array(Vmes) - Vupper)
mu_Vmag_lower1 = project_dualvariable(mu_Vmag_lower1)
mu_Vmag_upper1 = project_dualvariable(mu_Vmag_upper1)
if mu_I0:
mu_I1 = mu_I0 + stepsize_mu / 300 * np.array(list(map(lambda x: x[0] * x[0] - x[1] * x[1], zip(Imes, Imes_limit))))
mu_I1 = project_dualvariable(mu_I1)
else:
mu_I1 = mu_I0
mu1 = [mu_Vmag_upper1,mu_Vmag_lower1,mu_I1]
# =========================================Yiyun's Notes===========================================#
# Each time of calling DERMS.control, it is a one step update of PV real and reactive power outputs
# =================================================================================================#
return [x1,mu1]
| [
"numpy.conj",
"csv.writer",
"numpy.asarray",
"math.sqrt",
"numpy.ix_",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"numpy.zeros",
"numpy.real",
"numpy.concatenate",
"numpy.matrix",
"numpy.imag"
] | [((286, 308), 'numpy.conj', 'np.conj', (['V1[slack_no:]'], {}), '(V1[slack_no:])\n', (293, 308), True, 'import numpy as np\n'), ((3454, 3485), 'numpy.dot', 'np.dot', (['Y10', 'V1[0:slack_number]'], {}), '(Y10, V1[0:slack_number])\n', (3460, 3485), True, 'import numpy as np\n'), ((3502, 3532), 'numpy.dot', 'np.dot', (['Y11', 'V1[slack_number:]'], {}), '(Y11, V1[slack_number:])\n', (3508, 3532), True, 'import numpy as np\n'), ((3856, 3875), 'numpy.dot', 'np.dot', (['I_coeff', 'V1'], {}), '(I_coeff, V1)\n', (3862, 3875), True, 'import numpy as np\n'), ((885, 925), 'numpy.dot', 'np.dot', (['I_coeff[:, slack_no:]', 'coeff_V_P'], {}), '(I_coeff[:, slack_no:], coeff_V_P)\n', (891, 925), True, 'import numpy as np\n'), ((944, 984), 'numpy.dot', 'np.dot', (['I_coeff[:, slack_no:]', 'coeff_V_Q'], {}), '(I_coeff[:, slack_no:], coeff_V_Q)\n', (950, 984), True, 'import numpy as np\n'), ((2798, 2811), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2808, 2811), False, 'import csv\n'), ((3243, 3256), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3253, 3256), False, 'import csv\n'), ((3761, 3774), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3771, 3774), False, 'import csv\n'), ((3893, 3937), 'numpy.dot', 'np.dot', (['I_coeff[:, slack_number:]', 'V1_linear'], {}), '(I_coeff[:, slack_number:], V1_linear)\n', (3899, 3937), True, 'import numpy as np\n'), ((3936, 3989), 'numpy.dot', 'np.dot', (['I_coeff[:, 0:slack_number]', 'V1[:slack_number]'], {}), '(I_coeff[:, 0:slack_number], V1[:slack_number])\n', (3942, 3989), True, 'import numpy as np\n'), ((4161, 4174), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4171, 4174), False, 'import csv\n'), ((10777, 10799), 'numpy.linalg.inv', 'np.linalg.inv', (['diag_V1'], {}), '(diag_V1)\n', (10790, 10799), True, 'import numpy as np\n'), ((14589, 14609), 'numpy.asarray', 'np.asarray', (['PVpowers'], {}), '(PVpowers)\n', (14599, 14609), True, 'import numpy as np\n'), ((16160, 16177), 'numpy.zeros', 'np.zeros', (['(2 * NPV)'], {}), '(2 * NPV)\n', (16168, 16177), True, 'import numpy as np\n'), ((19132, 19235), 'numpy.concatenate', 'np.concatenate', (['[x0[:NPV] - stepsize_xp * gradient[:NPV], x0[NPV:] - stepsize_xq * gradient\n [NPV:]]'], {}), '([x0[:NPV] - stepsize_xp * gradient[:NPV], x0[NPV:] - \n stepsize_xq * gradient[NPV:]])\n', (19146, 19235), True, 'import numpy as np\n'), ((458, 484), 'numpy.dot', 'np.dot', (['Y10', 'V1[:slack_no]'], {}), '(Y10, V1[:slack_no])\n', (464, 484), True, 'import numpy as np\n'), ((1007, 1046), 'numpy.dot', 'np.dot', (['I_coeff[:, slack_no:]', 'coeff_Vm'], {}), '(I_coeff[:, slack_no:], coeff_Vm)\n', (1013, 1046), True, 'import numpy as np\n'), ((1047, 1091), 'numpy.dot', 'np.dot', (['I_coeff[:, :slack_no]', 'V1[:slack_no]'], {}), '(I_coeff[:, :slack_no], V1[:slack_no])\n', (1053, 1091), True, 'import numpy as np\n'), ((1992, 2030), 'numpy.conj', 'np.conj', (['(PQ_node[slack_number:] * 1000)'], {}), '(PQ_node[slack_number:] * 1000)\n', (1999, 2030), True, 'import numpy as np\n'), ((2536, 2574), 'numpy.conj', 'np.conj', (['(PQ_node[slack_number:] * 1000)'], {}), '(PQ_node[slack_number:] * 1000)\n', (2543, 2574), True, 'import numpy as np\n'), ((2881, 2939), 'numpy.dot', 'np.dot', (['coeff_Vmag_P', '(PQ_node[slack_number:] * 1000).real'], {}), '(coeff_Vmag_P, (PQ_node[slack_number:] * 1000).real)\n', (2887, 2939), True, 'import numpy as np\n'), ((2939, 2997), 'numpy.dot', 'np.dot', (['coeff_Vmag_Q', '(PQ_node[slack_number:] * 1000).imag'], {}), '(coeff_Vmag_Q, (PQ_node[slack_number:] * 1000).imag)\n', (2945, 2997), True, 'import numpy as np\n'), ((9365, 9411), 'math.sqrt', 'math.sqrt', (['(Sinv[ii] * Sinv[ii] - x[ii] * x[ii])'], {}), '(Sinv[ii] * Sinv[ii] - x[ii] * x[ii])\n', (9374, 9411), False, 'import math\n'), ((18125, 18144), 'numpy.array', 'np.array', (['Imes.real'], {}), '(Imes.real)\n', (18133, 18144), True, 'import numpy as np\n'), ((18177, 18196), 'numpy.array', 'np.array', (['Imes.imag'], {}), '(Imes.imag)\n', (18185, 18196), True, 'import numpy as np\n'), ((19700, 19714), 'numpy.array', 'np.array', (['Vmes'], {}), '(Vmes)\n', (19708, 19714), True, 'import numpy as np\n'), ((19773, 19787), 'numpy.array', 'np.array', (['Vmes'], {}), '(Vmes)\n', (19781, 19787), True, 'import numpy as np\n'), ((1898, 1909), 'numpy.imag', 'np.imag', (['ii'], {}), '(ii)\n', (1905, 1909), True, 'import numpy as np\n'), ((1817, 1828), 'numpy.real', 'np.real', (['ii'], {}), '(ii)\n', (1824, 1828), True, 'import numpy as np\n'), ((10511, 10524), 'numpy.matrix', 'np.matrix', (['V0'], {}), '(V0)\n', (10520, 10524), True, 'import numpy as np\n'), ((17393, 17460), 'numpy.ix_', 'np.ix_', (['[ii for ii in controlbus_index]', '[ii for ii in PVbus_index]'], {}), '([ii for ii in controlbus_index], [ii for ii in PVbus_index])\n', (17399, 17460), True, 'import numpy as np\n'), ((17557, 17624), 'numpy.ix_', 'np.ix_', (['[ii for ii in controlbus_index]', '[ii for ii in PVbus_index]'], {}), '([ii for ii in controlbus_index], [ii for ii in PVbus_index])\n', (17563, 17624), True, 'import numpy as np\n'), ((17730, 17797), 'numpy.ix_', 'np.ix_', (['[ii for ii in controlbus_index]', '[ii for ii in PVbus_index]'], {}), '([ii for ii in controlbus_index], [ii for ii in PVbus_index])\n', (17736, 17797), True, 'import numpy as np\n'), ((17894, 17961), 'numpy.ix_', 'np.ix_', (['[ii for ii in controlbus_index]', '[ii for ii in PVbus_index]'], {}), '([ii for ii in controlbus_index], [ii for ii in PVbus_index])\n', (17900, 17961), True, 'import numpy as np\n'), ((18278, 18346), 'numpy.ix_', 'np.ix_', (['[ii for ii in controlelem_index]', '[ii for ii in PVbus_index]'], {}), '([ii for ii in controlelem_index], [ii for ii in PVbus_index])\n', (18284, 18346), True, 'import numpy as np\n'), ((18428, 18496), 'numpy.ix_', 'np.ix_', (['[ii for ii in controlelem_index]', '[ii for ii in PVbus_index]'], {}), '([ii for ii in controlelem_index], [ii for ii in PVbus_index])\n', (18434, 18496), True, 'import numpy as np\n'), ((18633, 18701), 'numpy.ix_', 'np.ix_', (['[ii for ii in controlelem_index]', '[ii for ii in PVbus_index]'], {}), '([ii for ii in controlelem_index], [ii for ii in PVbus_index])\n', (18639, 18701), True, 'import numpy as np\n'), ((18783, 18851), 'numpy.ix_', 'np.ix_', (['[ii for ii in controlelem_index]', '[ii for ii in PVbus_index]'], {}), '([ii for ii in controlelem_index], [ii for ii in PVbus_index])\n', (18789, 18851), True, 'import numpy as np\n')] |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pyiree.tf.support import tf_test_utils
import tensorflow.compat.v2 as tf
class Conv2dModule(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_1451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_2451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_2451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([3, 2, 2, 1], tf.float32),
])
def conv2d_1452x3221_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 2], tf.float32),
])
def conv2d_1451x1112_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([1, 1, 2, 2], tf.float32),
])
def conv2d_1452x1122_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_2452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf_test_utils.compile_module(Conv2dModule)
class ConvTest(tf_test_utils.SavedModelTestCase):
def test_id_batch_size_1(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_1451x1111_valid(i, k)
r.print().assert_all_close()
def test_id_batch_size_2(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_2451x1111_valid(i, k)
r.print().assert_all_close()
def test_asym_kernel(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_valid(i, k)
r.print().assert_all_close()
def test_padding(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_same(i, k)
r.print().assert_all_close()
def test_batched_padding(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_2451x2311_same(i, k)
r.print().assert_all_close()
def test_feature_reduce(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.ones([3, 2, 2, 1], dtype=np.float32)
r = self.get_module().conv2d_1452x3221_same(i, k)
r.print().assert_all_close()
def test_feature_inflate(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.arange(2, dtype=np.float32).reshape([1, 1, 1, 2])
r = self.get_module().conv2d_1451x1112_same(i, k)
r.print().assert_all_close()
def test_feature_mix(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(4, dtype=np.float32).reshape([1, 1, 2, 2])
r = self.get_module().conv2d_1452x1122_same(i, k)
r.print().assert_all_close()
def test_feature_padded(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_1452x2223_same(i, k)
r.print().assert_all_close()
def test_feature_unpadded(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_1452x2223_valid(i, k)
r.print().assert_all_close()
def test_batched_feature_unpadded(self):
i = np.arange(80, dtype=np.float32).reshape([2, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_2452x2223_valid(i, k)
r.print().assert_all_close()
if __name__ == "__main__":
if hasattr(tf, "enable_v2_behavior"):
tf.enable_v2_behavior()
tf.test.main()
| [
"numpy.ones",
"tensorflow.compat.v2.TensorSpec",
"numpy.array",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.nn.conv2d",
"pyiree.tf.support.tf_test_utils.compile_module",
"numpy.arange",
"tensorflow.compat.v2.enable_v2_behavior"
] | [((3544, 3586), 'pyiree.tf.support.tf_test_utils.compile_module', 'tf_test_utils.compile_module', (['Conv2dModule'], {}), '(Conv2dModule)\n', (3572, 3586), False, 'from pyiree.tf.support import tf_test_utils\n'), ((6449, 6463), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (6461, 6463), True, 'import tensorflow.compat.v2 as tf\n'), ((919, 982), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""VALID"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'VALID', name='result')\n", (931, 982), True, 'import tensorflow.compat.v2 as tf\n'), ((1176, 1239), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""VALID"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'VALID', name='result')\n", (1188, 1239), True, 'import tensorflow.compat.v2 as tf\n'), ((1433, 1496), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""VALID"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'VALID', name='result')\n", (1445, 1496), True, 'import tensorflow.compat.v2 as tf\n'), ((1689, 1751), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""SAME"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'SAME', name='result')\n", (1701, 1751), True, 'import tensorflow.compat.v2 as tf\n'), ((1944, 2006), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""SAME"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'SAME', name='result')\n", (1956, 2006), True, 'import tensorflow.compat.v2 as tf\n'), ((2199, 2261), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""SAME"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'SAME', name='result')\n", (2211, 2261), True, 'import tensorflow.compat.v2 as tf\n'), ((2454, 2516), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""SAME"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'SAME', name='result')\n", (2466, 2516), True, 'import tensorflow.compat.v2 as tf\n'), ((2709, 2771), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""SAME"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'SAME', name='result')\n", (2721, 2771), True, 'import tensorflow.compat.v2 as tf\n'), ((2964, 3026), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""SAME"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'SAME', name='result')\n", (2976, 3026), True, 'import tensorflow.compat.v2 as tf\n'), ((3220, 3283), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""VALID"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'VALID', name='result')\n", (3232, 3283), True, 'import tensorflow.compat.v2 as tf\n'), ((3477, 3540), 'tensorflow.compat.v2.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel', '[1, 1, 1, 1]', '"""VALID"""'], {'name': '"""result"""'}), "(img, kernel, [1, 1, 1, 1], 'VALID', name='result')\n", (3489, 3540), True, 'import tensorflow.compat.v2 as tf\n'), ((3742, 3781), 'numpy.ones', 'np.ones', (['[1, 1, 1, 1]'], {'dtype': 'np.float32'}), '([1, 1, 1, 1], dtype=np.float32)\n', (3749, 3781), True, 'import numpy as np\n'), ((3975, 4014), 'numpy.ones', 'np.ones', (['[1, 1, 1, 1]'], {'dtype': 'np.float32'}), '([1, 1, 1, 1], dtype=np.float32)\n', (3982, 4014), True, 'import numpy as np\n'), ((4988, 5027), 'numpy.ones', 'np.ones', (['[3, 2, 2, 1]'], {'dtype': 'np.float32'}), '([3, 2, 2, 1], dtype=np.float32)\n', (4995, 5027), True, 'import numpy as np\n'), ((6423, 6446), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (6444, 6446), True, 'import tensorflow.compat.v2 as tf\n'), ((766, 805), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 4, 5, 1]', 'tf.float32'], {}), '([1, 4, 5, 1], tf.float32)\n', (779, 805), True, 'import tensorflow.compat.v2 as tf\n'), ((813, 852), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 1, 1, 1]', 'tf.float32'], {}), '([1, 1, 1, 1], tf.float32)\n', (826, 852), True, 'import tensorflow.compat.v2 as tf\n'), ((1023, 1062), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[2, 4, 5, 1]', 'tf.float32'], {}), '([2, 4, 5, 1], tf.float32)\n', (1036, 1062), True, 'import tensorflow.compat.v2 as tf\n'), ((1070, 1109), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 1, 1, 1]', 'tf.float32'], {}), '([1, 1, 1, 1], tf.float32)\n', (1083, 1109), True, 'import tensorflow.compat.v2 as tf\n'), ((1280, 1319), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 4, 5, 1]', 'tf.float32'], {}), '([1, 4, 5, 1], tf.float32)\n', (1293, 1319), True, 'import tensorflow.compat.v2 as tf\n'), ((1327, 1366), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[2, 3, 1, 1]', 'tf.float32'], {}), '([2, 3, 1, 1], tf.float32)\n', (1340, 1366), True, 'import tensorflow.compat.v2 as tf\n'), ((1537, 1576), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 4, 5, 1]', 'tf.float32'], {}), '([1, 4, 5, 1], tf.float32)\n', (1550, 1576), True, 'import tensorflow.compat.v2 as tf\n'), ((1584, 1623), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[2, 3, 1, 1]', 'tf.float32'], {}), '([2, 3, 1, 1], tf.float32)\n', (1597, 1623), True, 'import tensorflow.compat.v2 as tf\n'), ((1792, 1831), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[2, 4, 5, 1]', 'tf.float32'], {}), '([2, 4, 5, 1], tf.float32)\n', (1805, 1831), True, 'import tensorflow.compat.v2 as tf\n'), ((1839, 1878), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[2, 3, 1, 1]', 'tf.float32'], {}), '([2, 3, 1, 1], tf.float32)\n', (1852, 1878), True, 'import tensorflow.compat.v2 as tf\n'), ((2047, 2086), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 4, 5, 2]', 'tf.float32'], {}), '([1, 4, 5, 2], tf.float32)\n', (2060, 2086), True, 'import tensorflow.compat.v2 as tf\n'), ((2094, 2133), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[3, 2, 2, 1]', 'tf.float32'], {}), '([3, 2, 2, 1], tf.float32)\n', (2107, 2133), True, 'import tensorflow.compat.v2 as tf\n'), ((2302, 2341), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 4, 5, 1]', 'tf.float32'], {}), '([1, 4, 5, 1], tf.float32)\n', (2315, 2341), True, 'import tensorflow.compat.v2 as tf\n'), ((2349, 2388), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 1, 1, 2]', 'tf.float32'], {}), '([1, 1, 1, 2], tf.float32)\n', (2362, 2388), True, 'import tensorflow.compat.v2 as tf\n'), ((2557, 2596), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 4, 5, 2]', 'tf.float32'], {}), '([1, 4, 5, 2], tf.float32)\n', (2570, 2596), True, 'import tensorflow.compat.v2 as tf\n'), ((2604, 2643), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 1, 2, 2]', 'tf.float32'], {}), '([1, 1, 2, 2], tf.float32)\n', (2617, 2643), True, 'import tensorflow.compat.v2 as tf\n'), ((2812, 2851), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 4, 5, 2]', 'tf.float32'], {}), '([1, 4, 5, 2], tf.float32)\n', (2825, 2851), True, 'import tensorflow.compat.v2 as tf\n'), ((2859, 2898), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[2, 2, 2, 3]', 'tf.float32'], {}), '([2, 2, 2, 3], tf.float32)\n', (2872, 2898), True, 'import tensorflow.compat.v2 as tf\n'), ((3067, 3106), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 4, 5, 2]', 'tf.float32'], {}), '([1, 4, 5, 2], tf.float32)\n', (3080, 3106), True, 'import tensorflow.compat.v2 as tf\n'), ((3114, 3153), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[2, 2, 2, 3]', 'tf.float32'], {}), '([2, 2, 2, 3], tf.float32)\n', (3127, 3153), True, 'import tensorflow.compat.v2 as tf\n'), ((3324, 3363), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[2, 4, 5, 2]', 'tf.float32'], {}), '([2, 4, 5, 2], tf.float32)\n', (3337, 3363), True, 'import tensorflow.compat.v2 as tf\n'), ((3371, 3410), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[2, 2, 2, 3]', 'tf.float32'], {}), '([2, 2, 2, 3], tf.float32)\n', (3384, 3410), True, 'import tensorflow.compat.v2 as tf\n'), ((3680, 3711), 'numpy.arange', 'np.arange', (['(20)'], {'dtype': 'np.float32'}), '(20, dtype=np.float32)\n', (3689, 3711), True, 'import numpy as np\n'), ((3913, 3944), 'numpy.arange', 'np.arange', (['(40)'], {'dtype': 'np.float32'}), '(40, dtype=np.float32)\n', (3922, 3944), True, 'import numpy as np\n'), ((4142, 4173), 'numpy.arange', 'np.arange', (['(20)'], {'dtype': 'np.float32'}), '(20, dtype=np.float32)\n', (4151, 4173), True, 'import numpy as np\n'), ((4204, 4255), 'numpy.array', 'np.array', (['[[1, 4, 2], [-2, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 4, 2], [-2, 0, 1]], dtype=np.float32)\n', (4212, 4255), True, 'import numpy as np\n'), ((4399, 4430), 'numpy.arange', 'np.arange', (['(20)'], {'dtype': 'np.float32'}), '(20, dtype=np.float32)\n', (4408, 4430), True, 'import numpy as np\n'), ((4461, 4512), 'numpy.array', 'np.array', (['[[1, 4, 2], [-2, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 4, 2], [-2, 0, 1]], dtype=np.float32)\n', (4469, 4512), True, 'import numpy as np\n'), ((4663, 4694), 'numpy.arange', 'np.arange', (['(40)'], {'dtype': 'np.float32'}), '(40, dtype=np.float32)\n', (4672, 4694), True, 'import numpy as np\n'), ((4725, 4776), 'numpy.array', 'np.array', (['[[1, 4, 2], [-2, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 4, 2], [-2, 0, 1]], dtype=np.float32)\n', (4733, 4776), True, 'import numpy as np\n'), ((4926, 4957), 'numpy.arange', 'np.arange', (['(40)'], {'dtype': 'np.float32'}), '(40, dtype=np.float32)\n', (4935, 4957), True, 'import numpy as np\n'), ((5158, 5189), 'numpy.arange', 'np.arange', (['(20)'], {'dtype': 'np.float32'}), '(20, dtype=np.float32)\n', (5167, 5189), True, 'import numpy as np\n'), ((5220, 5250), 'numpy.arange', 'np.arange', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (5229, 5250), True, 'import numpy as np\n'), ((5399, 5430), 'numpy.arange', 'np.arange', (['(40)'], {'dtype': 'np.float32'}), '(40, dtype=np.float32)\n', (5408, 5430), True, 'import numpy as np\n'), ((5461, 5491), 'numpy.arange', 'np.arange', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (5470, 5491), True, 'import numpy as np\n'), ((5643, 5674), 'numpy.arange', 'np.arange', (['(40)'], {'dtype': 'np.float32'}), '(40, dtype=np.float32)\n', (5652, 5674), True, 'import numpy as np\n'), ((5705, 5736), 'numpy.arange', 'np.arange', (['(24)'], {'dtype': 'np.float32'}), '(24, dtype=np.float32)\n', (5714, 5736), True, 'import numpy as np\n'), ((5890, 5921), 'numpy.arange', 'np.arange', (['(40)'], {'dtype': 'np.float32'}), '(40, dtype=np.float32)\n', (5899, 5921), True, 'import numpy as np\n'), ((5952, 5983), 'numpy.arange', 'np.arange', (['(24)'], {'dtype': 'np.float32'}), '(24, dtype=np.float32)\n', (5961, 5983), True, 'import numpy as np\n'), ((6146, 6177), 'numpy.arange', 'np.arange', (['(80)'], {'dtype': 'np.float32'}), '(80, dtype=np.float32)\n', (6155, 6177), True, 'import numpy as np\n'), ((6208, 6239), 'numpy.arange', 'np.arange', (['(24)'], {'dtype': 'np.float32'}), '(24, dtype=np.float32)\n', (6217, 6239), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import os
window_title = "The Input Image"
input_image = "input.jpg"
output_image = os.path.basename(__file__)[:-len(".py")] + ".jpg"
HORIZONTAL = 0
VERTICAL = 1
def read_image(file_name = input_image):
img = cv2.imread(file_name)
return img
def display_image(img,window_title = window_title):
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
cv2.imshow(window_title,img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return
def grayscale(img):
grayscale = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#=6, BGR and not RGB because of how cv2 returns images
return grayscale
def save_to_disk(img,filename=output_image):
cv2.imwrite(filename,img)
def get_dimensions_hw(img):
return img.shape[0:2]
def get_middle_pixels_hw(img, new_height, new_width):
input_img_h,input_img_w = get_dimensions_hw(img)
if new_height > input_img_h:
raise ValueError("Requested new height (" + str(new_height) + ") is greater than image height (" + str(input_img_h) + ").")
if new_width > input_img_w:
raise ValueError("Requested new width (" + str(new_width) + ") is greater than image width (" + str(input_img_w) + ").")
middle_h = round(input_img_h/2)
half_new_height = round(new_height/2)
middle_w = round(input_img_w/2)
half_new_width = round(new_width/2)
middle_pixels = img[middle_h-half_new_height:middle_h+half_new_height,middle_w-half_new_width:middle_w+half_new_width]
return middle_pixels
def set_periodic_pixel(img, frequency, direction, new_pixel):
h,w = get_dimensions_hw(img)
img = np.array(img,copy=True)
if direction == HORIZONTAL:
for i in range(0,h):
for j in range(0,w,frequency):
img[i][j] = new_pixel
elif direction == VERTICAL:
for i in range(0,h,frequency):
for j in range(0,w):
img[i][j] = new_pixel
return img
if __name__ == "__main__":
img = read_image()
revised = set_periodic_pixel(img,10,HORIZONTAL,0)
revised = set_periodic_pixel(revised, 20, VERTICAL, 0)
save_to_disk(revised)
display_image(revised)
#Note: Owing to the large input image used for this example, the program will not show all
#lines unless you zoom in on the saved file (unless your monitor happens to have enough
#resolution...)
| [
"cv2.imwrite",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"os.path.basename",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.namedWindow",
"cv2.imread"
] | [((244, 265), 'cv2.imread', 'cv2.imread', (['file_name'], {}), '(file_name)\n', (254, 265), False, 'import cv2\n'), ((338, 386), 'cv2.namedWindow', 'cv2.namedWindow', (['window_title', 'cv2.WINDOW_NORMAL'], {}), '(window_title, cv2.WINDOW_NORMAL)\n', (353, 386), False, 'import cv2\n'), ((391, 420), 'cv2.imshow', 'cv2.imshow', (['window_title', 'img'], {}), '(window_title, img)\n', (401, 420), False, 'import cv2\n'), ((424, 438), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (435, 438), False, 'import cv2\n'), ((443, 466), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (464, 466), False, 'import cv2\n'), ((515, 552), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (527, 552), False, 'import cv2\n'), ((682, 708), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'img'], {}), '(filename, img)\n', (693, 708), False, 'import cv2\n'), ((1605, 1629), 'numpy.array', 'np.array', (['img'], {'copy': '(True)'}), '(img, copy=True)\n', (1613, 1629), True, 'import numpy as np\n'), ((114, 140), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (130, 140), False, 'import os\n')] |
# coding: utf-8
import numpy as np
x1 = np.asarray([0, 0, 1, 1])
x2 = np.asarray([0, 1, 0, 1])
X = np.row_stack((np.ones(shape=(1, 4)), x1, x2))
print("X:\n%s" % X)
y = np.asarray([0, 1, 1, 0])
W1 = np.asarray([[-1, 2, -2],
[-1, -2, 2]])
W2 = np.asarray([-1, 2, 2])
def sigmoid(input):
return 1 / (1 + np.power(np.e, -10 * (input)))
np.set_printoptions(precision=6, suppress=True)
z1 = np.matmul(W1, X)
print("W1*X = z1:\n%s" % z1)
a1 = np.row_stack((np.ones(shape=(1, 4)), sigmoid(z1)))
print("sigmoid(z1) = a1:\n%s" % a1)
z2 = np.matmul(W2, a1)
print("W2*a1 = z2:\n%s" % z2)
a2 = sigmoid(z2)
print("------------------------")
print("prediction: %s" % a2)
print("target: %s" % y)
print("------------------------")
# output:
# X:
# [[1. 1. 1. 1.]
# [0. 0. 1. 1.]
# [0. 1. 0. 1.]]
# W1*X = z1:
# [[-1. -3. 1. -1.]
# [-1. 1. -3. -1.]]
# sigmoid(z1) = a1:
# [[1. 1. 1. 1. ]
# [0.000045 0. 0.999955 0.000045]
# [0.000045 0.999955 0. 0.000045]]
# W2*a1 = z2:
# [-0.999818 0.999909 0.999909 -0.999818]
# ------------------------
# prediction: [0.000045 0.999955 0.999955 0.000045]
# target: [0 1 1 0]
# ------------------------
| [
"numpy.ones",
"numpy.power",
"numpy.asarray",
"numpy.matmul",
"numpy.set_printoptions"
] | [((42, 66), 'numpy.asarray', 'np.asarray', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (52, 66), True, 'import numpy as np\n'), ((72, 96), 'numpy.asarray', 'np.asarray', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (82, 96), True, 'import numpy as np\n'), ((171, 195), 'numpy.asarray', 'np.asarray', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (181, 195), True, 'import numpy as np\n'), ((201, 239), 'numpy.asarray', 'np.asarray', (['[[-1, 2, -2], [-1, -2, 2]]'], {}), '([[-1, 2, -2], [-1, -2, 2]])\n', (211, 239), True, 'import numpy as np\n'), ((262, 284), 'numpy.asarray', 'np.asarray', (['[-1, 2, 2]'], {}), '([-1, 2, 2])\n', (272, 284), True, 'import numpy as np\n'), ((360, 407), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(6)', 'suppress': '(True)'}), '(precision=6, suppress=True)\n', (379, 407), True, 'import numpy as np\n'), ((413, 429), 'numpy.matmul', 'np.matmul', (['W1', 'X'], {}), '(W1, X)\n', (422, 429), True, 'import numpy as np\n'), ((556, 573), 'numpy.matmul', 'np.matmul', (['W2', 'a1'], {}), '(W2, a1)\n', (565, 573), True, 'import numpy as np\n'), ((115, 136), 'numpy.ones', 'np.ones', ([], {'shape': '(1, 4)'}), '(shape=(1, 4))\n', (122, 136), True, 'import numpy as np\n'), ((478, 499), 'numpy.ones', 'np.ones', ([], {'shape': '(1, 4)'}), '(shape=(1, 4))\n', (485, 499), True, 'import numpy as np\n'), ((327, 354), 'numpy.power', 'np.power', (['np.e', '(-10 * input)'], {}), '(np.e, -10 * input)\n', (335, 354), True, 'import numpy as np\n')] |
import os
import meshio
import numpy as np
import pandas as pd
import ezdxf
catalogue_columns = ["t0", "m0", "mw", "x", "y", "z", "area", "dt"]
def read_binary(file: str, format: str, endian: str = "little"):
"""
Reads integer values from binary files that are output of RSQSim
:param file: file to read
:param format: either "d" (double) or "i" (integer)
:param endian: usually "little" unless we end up running on a non-standard system
:return:
"""
# Check that parameter supplied for endianness makes sense
assert endian in ("little", "big"), "Must specify either 'big' or 'little' endian"
endian_sign = "<" if endian == "little" else ">"
assert format in ("d", "i")
assert os.path.exists(file)
if format == "d":
numbers = np.fromfile(file, endian_sign + "f8").flatten()
else:
numbers = np.fromfile(file, endian_sign + "i4").flatten()
return numbers
def read_csv_and_array(prefix: str, read_index: bool = True):
assert prefix, "Empty prefix string supplied"
if prefix[-1] != "_":
prefix += "_"
suffixes = ["catalogue.csv", "events.npy", "patches.npy", "slip.npy", "slip_time.npy"]
file_list = [prefix + suffix for suffix in suffixes]
for file, suffix in zip(file_list, suffixes):
if not os.path.exists(file):
raise FileNotFoundError("{} file missing!".format(suffix))
if read_index:
df = pd.read_csv(file_list[0], index_col=0)
else:
df = pd.read_csv(file_list[0])
array_ls = [np.load(file) for file in file_list[1:]]
return [df] + array_ls
def read_earthquakes(earthquake_file: str, get_patch: bool = False, eq_start_index: int = None,
eq_end_index: int = None, endian: str = "little"):
"""
Reads earthquakes, inferring list file names from prefix of earthquake file.
Based on R scripts by <NAME>.
:param earthquake_file: usually has a ".out" suffix
:param get_patch:
:param eq_start_index:
:param eq_end_index:
:param endian:
:return:
"""
assert endian in ("little", "big"), "Must specify either 'big' or 'little' endian"
assert os.path.exists(earthquake_file)
if not any([a is None for a in (eq_start_index, eq_end_index)]):
if eq_start_index >= eq_end_index:
raise ValueError("eq_start index should be smaller than eq_end_index!")
# Get full path to file and working directory
abs_file_path = os.path.abspath(earthquake_file)
file_base_name = os.path.basename(abs_file_path)
# Get file prefix from basename
split_by_dots = file_base_name.split(".")
# Check that filename fits expected format
if not all([split_by_dots[0] == "eqs", split_by_dots[-1] == "out"]):
print("Warning: non-standard file name.")
print("Expecting earthquake file name to have the format: eqs.{prefix}.out")
print("using 'catalogue' as prefix...")
prefix = "catalogue"
else:
# Join prefix back together if necessary, warning if empty
prefix_list = split_by_dots[1:-1]
if len(prefix_list) == 1:
prefix = prefix_list[0]
if prefix.strip() == "":
print("Warning: empty prefix string")
else:
prefix = ".".join(*prefix_list)
# Search for binary files in directory
tau_file = abs_file_path + "/tauDot.{}.out".format(prefix)
sigmat_file = abs_file_path + "/sigmaDot.{}.out".format(prefix)
def read_earthquake_catalogue(catalogue_file: str):
assert os.path.exists(catalogue_file)
with open(catalogue_file, "r") as fid:
data = fid.readlines()
start_eqs = data.index("%%% end input files\n") + 1
data_array = np.loadtxt(data[start_eqs:])
earthquake_catalogue = pd.DataFrame(data_array[:, :8], columns=catalogue_columns)
return earthquake_catalogue
# def read_fault(fault_file_name: str, check_if_grid: bool = True, )
def read_ts_coords(filename):
"""
This script reads in the tsurf (*.ts) files for the SCEC Community Fault Model (cfm)
as a numpy array.
The script is based on the matlab script ReadAndSaveCfm.m by <NAME> available
from http://structure.rc.fas.harvard.edu/cfm/download/meade/ReadAndSaveCfm.m
Copyright <NAME>, July 2014
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
idxVrtx = [idx for idx, l in enumerate(lines)
if 'VRTX' in l or 'PVRTX' in l]
idxTrgl = [idx for idx, l in enumerate(lines) if 'TRGL' in l]
nVrtx = len(idxVrtx)
nTrgl = len(idxTrgl)
vrtx = np.zeros((nVrtx, 4))
trgl = np.zeros((nTrgl, 3), dtype='int')
tri = np.zeros((nTrgl, 9))
for k, iVrtx in enumerate(idxVrtx):
line = lines[iVrtx]
tmp = line.split()
vrtx[k] = [int(tmp[1]), float(tmp[2]), float(tmp[3]), float(tmp[4])]
for k, iTrgl in enumerate(idxTrgl):
line = lines[iTrgl]
tmp = line.split(' ')
trgl[k] = [int(tmp[1]), int(tmp[2]), int(tmp[3])]
for l in range(3):
i1 = l * 3
i2 = 3 * (l + 1)
vertex_i = vrtx[vrtx[:, 0] == trgl[k, l]][0]
tri[k, i1:i2] = vertex_i[1:]
return vrtx, trgl, tri
def read_dxf(dxf_file: str):
"""
Reads mesh and boundary from dxf file exported from move. Returns boundary (as array) and triangles
"""
assert os.path.exists(dxf_file)
dxf = ezdxf.readfile(dxf_file)
msp = dxf.modelspace()
dxftypes = [e.dxftype() for e in msp]
assert all([a in dxftypes for a in ("3DFACE", "POLYLINE")]), "{}: Expected triangles and boundary".format(dxf_file)
if dxftypes.count("POLYLINE") > 1:
raise ValueError("{}: Too many boundaries lines...".format(dxf_file))
triangle_ls = []
boundary_array = None
for entity in msp:
if entity.dxftype() == "3DFACE":
triangle = np.array([vertex.xyz for vertex in entity])
unique_triangle = np.unique(triangle, axis=0).reshape((9,))
triangle_ls.append(unique_triangle)
elif entity.dxftype() == "POLYLINE":
boundary_ls = []
for point in entity.points():
boundary_ls.append(point.xyz)
boundary_array = np.array(boundary_ls)
triangle_array = np.array(triangle_ls)
return triangle_array, boundary_array
def read_stl(stl_file: str):
assert os.path.exists(stl_file)
mesh = meshio.read(stl_file)
assert "triangle" in mesh.cells_dict.keys()
triangles = mesh.cells_dict["triangle"]
point_dict = {i: point for i, point in enumerate(mesh.points)}
mesh_as_array = np.array([np.hstack([point_dict[vertex] for vertex in tri]) for tri in triangles])
return mesh_as_array
| [
"os.path.exists",
"numpy.fromfile",
"numpy.unique",
"pandas.DataFrame",
"pandas.read_csv",
"numpy.hstack",
"ezdxf.readfile",
"numpy.array",
"numpy.zeros",
"meshio.read",
"os.path.basename",
"os.path.abspath",
"numpy.loadtxt",
"numpy.load"
] | [((730, 750), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (744, 750), False, 'import os\n'), ((2171, 2202), 'os.path.exists', 'os.path.exists', (['earthquake_file'], {}), '(earthquake_file)\n', (2185, 2202), False, 'import os\n'), ((2470, 2502), 'os.path.abspath', 'os.path.abspath', (['earthquake_file'], {}), '(earthquake_file)\n', (2485, 2502), False, 'import os\n'), ((2524, 2555), 'os.path.basename', 'os.path.basename', (['abs_file_path'], {}), '(abs_file_path)\n', (2540, 2555), False, 'import os\n'), ((3550, 3580), 'os.path.exists', 'os.path.exists', (['catalogue_file'], {}), '(catalogue_file)\n', (3564, 3580), False, 'import os\n'), ((3730, 3758), 'numpy.loadtxt', 'np.loadtxt', (['data[start_eqs:]'], {}), '(data[start_eqs:])\n', (3740, 3758), True, 'import numpy as np\n'), ((3786, 3844), 'pandas.DataFrame', 'pd.DataFrame', (['data_array[:, :8]'], {'columns': 'catalogue_columns'}), '(data_array[:, :8], columns=catalogue_columns)\n', (3798, 3844), True, 'import pandas as pd\n'), ((4596, 4616), 'numpy.zeros', 'np.zeros', (['(nVrtx, 4)'], {}), '((nVrtx, 4))\n', (4604, 4616), True, 'import numpy as np\n'), ((4628, 4661), 'numpy.zeros', 'np.zeros', (['(nTrgl, 3)'], {'dtype': '"""int"""'}), "((nTrgl, 3), dtype='int')\n", (4636, 4661), True, 'import numpy as np\n'), ((4672, 4692), 'numpy.zeros', 'np.zeros', (['(nTrgl, 9)'], {}), '((nTrgl, 9))\n', (4680, 4692), True, 'import numpy as np\n'), ((5388, 5412), 'os.path.exists', 'os.path.exists', (['dxf_file'], {}), '(dxf_file)\n', (5402, 5412), False, 'import os\n'), ((5423, 5447), 'ezdxf.readfile', 'ezdxf.readfile', (['dxf_file'], {}), '(dxf_file)\n', (5437, 5447), False, 'import ezdxf\n'), ((6290, 6311), 'numpy.array', 'np.array', (['triangle_ls'], {}), '(triangle_ls)\n', (6298, 6311), True, 'import numpy as np\n'), ((6397, 6421), 'os.path.exists', 'os.path.exists', (['stl_file'], {}), '(stl_file)\n', (6411, 6421), False, 'import os\n'), ((6434, 6455), 'meshio.read', 'meshio.read', (['stl_file'], {}), '(stl_file)\n', (6445, 6455), False, 'import meshio\n'), ((1435, 1473), 'pandas.read_csv', 'pd.read_csv', (['file_list[0]'], {'index_col': '(0)'}), '(file_list[0], index_col=0)\n', (1446, 1473), True, 'import pandas as pd\n'), ((1497, 1522), 'pandas.read_csv', 'pd.read_csv', (['file_list[0]'], {}), '(file_list[0])\n', (1508, 1522), True, 'import pandas as pd\n'), ((1539, 1552), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (1546, 1552), True, 'import numpy as np\n'), ((1310, 1330), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (1324, 1330), False, 'import os\n'), ((5890, 5933), 'numpy.array', 'np.array', (['[vertex.xyz for vertex in entity]'], {}), '([vertex.xyz for vertex in entity])\n', (5898, 5933), True, 'import numpy as np\n'), ((6646, 6695), 'numpy.hstack', 'np.hstack', (['[point_dict[vertex] for vertex in tri]'], {}), '([point_dict[vertex] for vertex in tri])\n', (6655, 6695), True, 'import numpy as np\n'), ((791, 828), 'numpy.fromfile', 'np.fromfile', (['file', "(endian_sign + 'f8')"], {}), "(file, endian_sign + 'f8')\n", (802, 828), True, 'import numpy as np\n'), ((867, 904), 'numpy.fromfile', 'np.fromfile', (['file', "(endian_sign + 'i4')"], {}), "(file, endian_sign + 'i4')\n", (878, 904), True, 'import numpy as np\n'), ((6246, 6267), 'numpy.array', 'np.array', (['boundary_ls'], {}), '(boundary_ls)\n', (6254, 6267), True, 'import numpy as np\n'), ((5964, 5991), 'numpy.unique', 'np.unique', (['triangle'], {'axis': '(0)'}), '(triangle, axis=0)\n', (5973, 5991), True, 'import numpy as np\n')] |
"""
Multiple stacked lstm implemeation on the lip movement data.
<NAME>
<EMAIL>
Fall 2016
"""
from __future__ import print_function
import numpy as np
np.random.seed(1337)
#random seed fixing for reproducibility
#data load & preprocessing
X_train = np.load('../data/videopart43.npy').astype('float32')
Y_train = np.load('../data/audiopart43.npy').astype('float32')
#normalizing data
X_train = X_train/255
Y_train = Y_train/32767
X_train = X_train.reshape((826,13,1,53,53)).astype('float32')
Y_train = Y_train.reshape((826,13*4702)).astype('float32')
from keras.models import Sequential
from keras.layers import Dense,Activation,Dropout,TimeDistributed,LSTM,Bidirectional
from keras.layers import Convolution2D,Flatten,MaxPooling2D
import time
print("Building Model.....")
model_time = time.time()
model = Sequential()
model.add(TimeDistributed(Convolution2D(64, 3, 3,border_mode='valid'),batch_input_shape=(14,13,1,53,53),input_shape=(13,1,53,53)))
model.add(Activation('tanh'))
model.add(Dropout(0.25))
model.add(TimeDistributed(Convolution2D(32, 2, 2, border_mode='valid')))
model.add(Activation('tanh'))
model.add(TimeDistributed(Flatten()))
model.add(Bidirectional(LSTM(256,return_sequences=True,stateful=True)))
model.add(Dropout(0.20))
model.add(Bidirectional(LSTM(128,return_sequences=True,stateful=True)))
model.add(Dropout(0.20))
model.add((LSTM(64,stateful=True)))
model.add(Dropout(0.20))
model.add((Dense(512)))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add((Dense(13*4702)))
model.add(Activation('tanh'))
model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])
#checkpoint import
from keras.callbacks import ModelCheckpoint
from os.path import isfile, join
#weight file name
weight_file = '../weights/time-dis-cnn_weight.h5'
#loading previous weight file for resuming training
if isfile(weight_file):
model.load_weights(weight_file)
#weight-checkmark
checkpoint = ModelCheckpoint(weight_file, monitor='acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
print("model compile time: "+str(time.time()-model_time)+'s')
# fit the model
model.fit(X_train,Y_train, nb_epoch=1, batch_size=14,callbacks=callbacks_list)
pred = model.predict(X_train,batch_size=14,verbose=1)
pred = pred*32767
pred = pred.reshape(826*13,4702)
print('pred shape',pred.shape)
print('pred dtype',pred.dtype)
np.save('../predictions/pred-time-cnn.npy',pred)
| [
"keras.layers.Convolution2D",
"keras.layers.Flatten",
"keras.callbacks.ModelCheckpoint",
"keras.models.Sequential",
"os.path.isfile",
"keras.layers.LSTM",
"keras.layers.Dropout",
"numpy.random.seed",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.load",
"time.time",
"numpy.save"
] | [((153, 173), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (167, 173), True, 'import numpy as np\n'), ((794, 805), 'time.time', 'time.time', ([], {}), '()\n', (803, 805), False, 'import time\n'), ((815, 827), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (825, 827), False, 'from keras.models import Sequential\n'), ((1845, 1864), 'os.path.isfile', 'isfile', (['weight_file'], {}), '(weight_file)\n', (1851, 1864), False, 'from os.path import isfile, join\n'), ((1931, 2022), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['weight_file'], {'monitor': '"""acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(weight_file, monitor='acc', verbose=1, save_best_only=True,\n mode='max')\n", (1946, 2022), False, 'from keras.callbacks import ModelCheckpoint\n'), ((2378, 2427), 'numpy.save', 'np.save', (['"""../predictions/pred-time-cnn.npy"""', 'pred'], {}), "('../predictions/pred-time-cnn.npy', pred)\n", (2385, 2427), True, 'import numpy as np\n'), ((970, 988), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (980, 988), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1000, 1013), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1007, 1013), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1099, 1117), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (1109, 1117), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1242, 1254), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1249, 1254), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1339, 1351), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1346, 1351), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1365, 1388), 'keras.layers.LSTM', 'LSTM', (['(64)'], {'stateful': '(True)'}), '(64, stateful=True)\n', (1369, 1388), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1400, 1412), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1407, 1412), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1427, 1437), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (1432, 1437), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1450, 1468), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (1460, 1468), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1480, 1492), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1487, 1492), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1506, 1522), 'keras.layers.Dense', 'Dense', (['(13 * 4702)'], {}), '(13 * 4702)\n', (1511, 1522), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1533, 1551), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (1543, 1551), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((253, 287), 'numpy.load', 'np.load', (['"""../data/videopart43.npy"""'], {}), "('../data/videopart43.npy')\n", (260, 287), True, 'import numpy as np\n'), ((316, 350), 'numpy.load', 'np.load', (['"""../data/audiopart43.npy"""'], {}), "('../data/audiopart43.npy')\n", (323, 350), True, 'import numpy as np\n'), ((855, 899), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'border_mode': '"""valid"""'}), "(64, 3, 3, border_mode='valid')\n", (868, 899), False, 'from keras.layers import Convolution2D, Flatten, MaxPooling2D\n'), ((1042, 1086), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(2)', '(2)'], {'border_mode': '"""valid"""'}), "(32, 2, 2, border_mode='valid')\n", (1055, 1086), False, 'from keras.layers import Convolution2D, Flatten, MaxPooling2D\n'), ((1147, 1156), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1154, 1156), False, 'from keras.layers import Convolution2D, Flatten, MaxPooling2D\n'), ((1184, 1231), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'return_sequences': '(True)', 'stateful': '(True)'}), '(256, return_sequences=True, stateful=True)\n', (1188, 1231), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((1281, 1328), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'return_sequences': '(True)', 'stateful': '(True)'}), '(128, return_sequences=True, stateful=True)\n', (1285, 1328), False, 'from keras.layers import Dense, Activation, Dropout, TimeDistributed, LSTM, Bidirectional\n'), ((2084, 2095), 'time.time', 'time.time', ([], {}), '()\n', (2093, 2095), False, 'import time\n')] |
# coding:utf-8
import os
import cv2
import numpy as np
import fire
from glob import glob
from PIL import Image
from torchvision import transforms
CROP_SIZE = (320, 440)
def generate_five_crop(image_path):
image_path = os.path.abspath(image_path)
dirname = os.path.dirname(image_path)
filename = os.path.basename(image_path)
filename = filename[0:filename.rfind('.')]
five_crop = transforms.FiveCrop(CROP_SIZE)
image = Image.open(image_path)
images = five_crop(image)
for i, img in enumerate(images):
img_path = os.path.join(dirname, filename)
img_path += '_' + str(i+1) + '.jpg'
img.save(img_path)
def generate_five_crop_dir(data_dir):
data_dir = os.path.abspath(data_dir)
for i in range(10):
train_dir = os.path.join(data_dir, "train", "c%d"%i)
image_paths = glob(os.path.join(train_dir, "*.jpg"))
for image_path in image_paths:
print('handling "{}" file'.format(image_path))
generate_five_crop(image_path)
def calc_image_gradient_inside(image, X_weight=0.5, Y_weight=0.5):
image = np.array(image)
x = cv2.Sobel(image, cv2.CV_16S, 1, 0)
y = cv2.Sobel(image, cv2.CV_16S, 0, 1)
absX = cv2.convertScaleAbs(x) # 转回uint8
absY = cv2.convertScaleAbs(y)
dst = cv2.addWeighted(absX, X_weight, absY, Y_weight, 0)
return dst
def calc_image_gradient(image_path, show=True):
img = Image.open(image_path)
img = np.array(img)
#img = cv2.imread(image_path, 0)
x = cv2.Sobel(img, cv2.CV_16S, 1, 0)
y = cv2.Sobel(img, cv2.CV_16S, 0, 1)
absX = cv2.convertScaleAbs(x) # 转回uint8
absY = cv2.convertScaleAbs(y)
dst = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
if show is True:
while(1):
cv2.imshow("absX", absX)
cv2.imshow("absY", absY)
cv2.imshow("Result", dst)
k = cv2.waitKey(1) & 0XFF
if k==ord('q'):
break;
cv2.destroyAllWindows()
return dst
if __name__ == '__main__':
#generate_five_crop_dir('../data')
fire.Fire() | [
"PIL.Image.open",
"cv2.convertScaleAbs",
"fire.Fire",
"torchvision.transforms.FiveCrop",
"os.path.join",
"cv2.imshow",
"os.path.dirname",
"numpy.array",
"cv2.addWeighted",
"cv2.destroyAllWindows",
"os.path.basename",
"os.path.abspath",
"cv2.waitKey",
"cv2.Sobel"
] | [((224, 251), 'os.path.abspath', 'os.path.abspath', (['image_path'], {}), '(image_path)\n', (239, 251), False, 'import os\n'), ((266, 293), 'os.path.dirname', 'os.path.dirname', (['image_path'], {}), '(image_path)\n', (281, 293), False, 'import os\n'), ((309, 337), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (325, 337), False, 'import os\n'), ((402, 432), 'torchvision.transforms.FiveCrop', 'transforms.FiveCrop', (['CROP_SIZE'], {}), '(CROP_SIZE)\n', (421, 432), False, 'from torchvision import transforms\n'), ((445, 467), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (455, 467), False, 'from PIL import Image\n'), ((711, 736), 'os.path.abspath', 'os.path.abspath', (['data_dir'], {}), '(data_dir)\n', (726, 736), False, 'import os\n'), ((1104, 1119), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1112, 1119), True, 'import numpy as np\n'), ((1129, 1163), 'cv2.Sobel', 'cv2.Sobel', (['image', 'cv2.CV_16S', '(1)', '(0)'], {}), '(image, cv2.CV_16S, 1, 0)\n', (1138, 1163), False, 'import cv2\n'), ((1172, 1206), 'cv2.Sobel', 'cv2.Sobel', (['image', 'cv2.CV_16S', '(0)', '(1)'], {}), '(image, cv2.CV_16S, 0, 1)\n', (1181, 1206), False, 'import cv2\n'), ((1223, 1245), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['x'], {}), '(x)\n', (1242, 1245), False, 'import cv2\n'), ((1268, 1290), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['y'], {}), '(y)\n', (1287, 1290), False, 'import cv2\n'), ((1302, 1352), 'cv2.addWeighted', 'cv2.addWeighted', (['absX', 'X_weight', 'absY', 'Y_weight', '(0)'], {}), '(absX, X_weight, absY, Y_weight, 0)\n', (1317, 1352), False, 'import cv2\n'), ((1427, 1449), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1437, 1449), False, 'from PIL import Image\n'), ((1460, 1473), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1468, 1473), True, 'import numpy as np\n'), ((1519, 1551), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_16S', '(1)', '(0)'], {}), '(img, cv2.CV_16S, 1, 0)\n', (1528, 1551), False, 'import cv2\n'), ((1560, 1592), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_16S', '(0)', '(1)'], {}), '(img, cv2.CV_16S, 0, 1)\n', (1569, 1592), False, 'import cv2\n'), ((1609, 1631), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['x'], {}), '(x)\n', (1628, 1631), False, 'import cv2\n'), ((1654, 1676), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['y'], {}), '(y)\n', (1673, 1676), False, 'import cv2\n'), ((1692, 1732), 'cv2.addWeighted', 'cv2.addWeighted', (['absX', '(0.5)', 'absY', '(0.5)', '(0)'], {}), '(absX, 0.5, absY, 0.5, 0)\n', (1707, 1732), False, 'import cv2\n'), ((2097, 2108), 'fire.Fire', 'fire.Fire', ([], {}), '()\n', (2106, 2108), False, 'import fire\n'), ((554, 585), 'os.path.join', 'os.path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (566, 585), False, 'import os\n'), ((781, 823), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""', "('c%d' % i)"], {}), "(data_dir, 'train', 'c%d' % i)\n", (793, 823), False, 'import os\n'), ((1986, 2009), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2007, 2009), False, 'import cv2\n'), ((849, 881), 'os.path.join', 'os.path.join', (['train_dir', '"""*.jpg"""'], {}), "(train_dir, '*.jpg')\n", (861, 881), False, 'import os\n'), ((1789, 1813), 'cv2.imshow', 'cv2.imshow', (['"""absX"""', 'absX'], {}), "('absX', absX)\n", (1799, 1813), False, 'import cv2\n'), ((1826, 1850), 'cv2.imshow', 'cv2.imshow', (['"""absY"""', 'absY'], {}), "('absY', absY)\n", (1836, 1850), False, 'import cv2\n'), ((1863, 1888), 'cv2.imshow', 'cv2.imshow', (['"""Result"""', 'dst'], {}), "('Result', dst)\n", (1873, 1888), False, 'import cv2\n'), ((1905, 1919), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1916, 1919), False, 'import cv2\n')] |
'''Focal Related Utilities'''
import re
import warnings
from numba import prange
import numpy as np
from xarray import DataArray
from xrspatial.utils import ngjit
from xrspatial.utils import lnglat_to_meters
warnings.simplefilter('default')
DEFAULT_UNIT = 'meter'
# TODO: Make convolution more generic with numba first-class functions.
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
# modified from https://stackoverflow.com/questions/3943752/the-dateutil-parser-parse-of-distance-strings
class Distance(object):
METER = 1
FOOT = 0.3048
KILOMETER = 1000
MILE = 1609.344
UNITS = {'meter': METER,
'meters': METER,
'm': METER,
'feet': FOOT,
'foot': FOOT,
'ft': FOOT,
'miles': MILE,
'mls': MILE,
'ml': MILE,
'kilometer': KILOMETER,
'kilometers': KILOMETER,
'km': KILOMETER,
}
def __init__(self, s):
self.number, unit = self._get_distance_unit(s)
self._convert(unit)
def _get_distance_unit(self, s):
# spit string into numbers and text
splits = [x for x in re.split(r'(-?\d*\.?\d+)', s) if x != '']
if len(splits) not in [1, 2]:
raise ValueError("Invalid distance.")
number = splits[0]
unit = DEFAULT_UNIT
if len(splits) == 1:
warnings.warn('Raster distance unit not provided. '
'Use meter as default.', Warning)
elif len(splits) == 2:
unit = splits[1]
unit = unit.lower()
unit = unit.replace(' ', '')
if unit not in self.UNITS:
raise ValueError(
"Invalid value.\n"
"Distance unit should be one of the following: \n"
"meter (meter, meters, m),\n"
"kilometer (kilometer, kilometers, km),\n"
"foot (foot, feet, ft),\n"
"mile (mile, miles, ml, mls)")
return number, unit
def _convert(self, unit):
self.number = float(self.number)
if self.UNITS[unit] != 1:
self.number *= self.UNITS[unit]
@property
def meters(self):
return self.number
@meters.setter
def meters(self, v):
self.number = float(v)
@property
def miles(self):
return self.number / self.MILE
@miles.setter
def miles(self, v):
self.number = v
self._convert('miles')
@property
def feet(self):
return self.number / self.FOOT
@feet.setter
def feet(self, v):
self.number = v
self._convert('feet')
@property
def kilometers(self):
return self.number / self.KILOMETER
@kilometers.setter
def kilometers(self, v):
self.number = v
self._convert('KILOMETER')
def _calc_cell_size(raster):
if 'unit' in raster.attrs:
unit = raster.attrs['unit']
else:
unit = DEFAULT_UNIT
warnings.warn('Raster distance unit not provided. '
'Use meter as default.', Warning)
cell_size_x = 1
cell_size_y = 1
# calculate cell size from input `raster`
for dim in raster.dims:
if (dim.lower().count('x')) > 0:
# dimension of x-coordinates
if len(raster[dim]) > 1:
cell_size_x = raster[dim].values[1] - raster[dim].values[0]
elif (dim.lower().count('y')) > 0:
# dimension of y-coordinates
if len(raster[dim]) > 1:
cell_size_y = raster[dim].values[1] - raster[dim].values[0]
lon0, lon1, lat0, lat1 = None, None, None, None
for dim in raster.dims:
if (dim.lower().count('lon')) > 0:
# dimension of x-coordinates
if len(raster[dim]) > 1:
lon0, lon1 = raster[dim].values[0], raster[dim].values[1]
elif (dim.lower().count('lat')) > 0:
# dimension of y-coordinates
if len(raster[dim]) > 1:
lat0, lat1 = raster[dim].values[0], raster[dim].values[1]
# convert lat-lon to meters
if (lon0, lon1, lat0, lat1) != (None, None, None, None):
mx0, my0 = lnglat_to_meters(lon0, lat0)
mx1, my1 = lnglat_to_meters(lon1, lat1)
cell_size_x = mx1 - mx0
cell_size_y = my1 - my0
unit = DEFAULT_UNIT
sx = Distance(str(cell_size_x) + unit)
sy = Distance(str(cell_size_y) + unit)
return sx, sy
def _gen_ellipse_kernel(half_w, half_h):
# x values of interest
x = np.linspace(-half_w, half_w, 2 * half_w + 1)
# y values of interest, as a "column" array
y = np.linspace(-half_h, half_h, 2 * half_h + 1)[:, None]
# True for points inside the ellipse
# (x / a)^2 + (y / b)^2 <= 1, avoid division to avoid rounding issue
ellipse = (x * half_h) ** 2 + (y * half_w) ** 2 <= (half_w * half_h) ** 2
return ellipse.astype(float)
class Kernel:
def __init__(self, shape='circle', radius=10000):
self.shape = shape
self.radius = radius
self._validate_shape()
self._validate_radius()
def _validate_shape(self):
# validate shape
if self.shape not in ['circle']:
raise ValueError(
"Kernel shape must be \'circle\'")
def _validate_radius(self):
# try to convert into Distance object
d = Distance(str(self.radius))
print(d)
def to_array(self, raster):
# calculate cell size over the x and y axis
sx, sy = _calc_cell_size(raster)
# create Distance object of radius
sr = Distance(str(self.radius))
if self.shape == 'circle':
# convert radius (meter) to pixel
kernel_half_w = int(sr.meters / sx.meters)
kernel_half_h = int(sr.meters / sy.meters)
kernel = _gen_ellipse_kernel(kernel_half_w, kernel_half_h)
return kernel
@ngjit
def _mean(data, excludes):
out = np.zeros_like(data)
rows, cols = data.shape
for y in range(1, rows-1):
for x in range(1, cols-1):
exclude = False
for ex in excludes:
if data[y, x] == ex:
exclude = True
break
if not exclude:
a,b,c,d,e,f,g,h,i = [data[y-1, x-1], data[y, x-1], data[y+1, x-1],
data[y-1, x], data[y, x], data[y+1, x],
data[y-1, x+1], data[y, x+1], data[y+1, x+1]]
out[y, x] = (a+b+c+d+e+f+g+h+i) / 9
else:
out[y, x] = data[y, x]
return out
# TODO: add optional name parameter `name='mean'`
def mean(agg, passes=1, excludes=[np.nan], name='mean'):
"""
Returns Mean filtered array using a 3x3 window
Parameters
----------
agg : DataArray
passes : int
number of times to run mean
name : str
output xr.DataArray.name property
Returns
-------
data: DataArray
"""
out = None
for i in range(passes):
if out is None:
out = _mean(agg.data, tuple(excludes))
else:
out = _mean(out, tuple(excludes))
return DataArray(out, name=name, dims=agg.dims,
coords=agg.coords, attrs=agg.attrs)
@ngjit
def calc_mean(array):
return np.nanmean(array)
@ngjit
def calc_sum(array):
return np.nansum(array)
@ngjit
def upper_bound_p_value(zscore):
if abs(zscore) >= 2.33:
return 0.0099
if abs(zscore) >= 1.65:
return 0.0495
if abs(zscore) >= 1.29:
return 0.0985
return 1
@ngjit
def _hot_cold(zscore):
if zscore > 0:
return 1
if zscore < 0:
return -1
return 0
@ngjit
def _confidence(zscore):
p_value = upper_bound_p_value(zscore)
if abs(zscore) > 2.58 and p_value < 0.01:
return 99
if abs(zscore) > 1.96 and p_value < 0.05:
return 95
if abs(zscore) > 1.65 and p_value < 0.1:
return 90
return 0
@ngjit
def _apply(data, kernel_array, func):
out = np.zeros_like(data)
rows, cols = data.shape
krows, kcols = kernel_array.shape
hrows, hcols = int(krows / 2), int(kcols / 2)
kernel_values = np.zeros_like(kernel_array, dtype=data.dtype)
for y in prange(rows):
for x in prange(cols):
# kernel values are all nans at the beginning of each step
kernel_values.fill(np.nan)
for ky in range(y - hrows, y + hrows + 1):
for kx in range(x - hcols, x + hcols + 1):
if ky >= 0 and kx >= 0:
if ky >= 0 and ky < rows and kx >= 0 and kx < cols:
kyidx, kxidx = ky - (y - hrows), kx - (x - hcols)
if kernel_array[kyidx, kxidx] == 1:
kernel_values[kyidx, kxidx] = data[ky, kx]
out[y, x] = func(kernel_values)
return out
def apply(raster, kernel, func=calc_mean):
# validate raster
if not isinstance(raster, DataArray):
raise TypeError("`raster` must be instance of DataArray")
if raster.ndim != 2:
raise ValueError("`raster` must be 2D")
if not (issubclass(raster.values.dtype.type, np.integer) or
issubclass(raster.values.dtype.type, np.float)):
raise ValueError(
"`raster` must be an array of integers or float")
# create kernel mask array
kernel_values = kernel.to_array(raster)
# apply kernel to raster values
out = _apply(raster.values.astype(float), kernel_values, func)
result = DataArray(out,
coords=raster.coords,
dims=raster.dims,
attrs=raster.attrs)
return result
@ngjit
def _hotspots(z_array):
out = np.zeros_like(z_array, dtype=np.int8)
rows, cols = z_array.shape
for y in prange(rows):
for x in prange(cols):
out[y, x] = _hot_cold(z_array[y, x]) * _confidence(z_array[y, x])
return out
def hotspots(raster, kernel):
"""Identify statistically significant hot spots and cold spots in an input
raster. To be a statistically significant hot spot, a feature will have a
high value and be surrounded by other features with high values as well.
Neighborhood of a feature defined by the input kernel, which currently
support a shape of circle and a radius in meters.
The result should be a raster with the following 7 values:
90 for 90% confidence high value cluster
95 for 95% confidence high value cluster
99 for 99% confidence high value cluster
-90 for 90% confidence low value cluster
-95 for 95% confidence low value cluster
-99 for 99% confidence low value cluster
0 for no significance
Parameters
----------
raster: xarray.DataArray
Input raster image with shape=(height, width)
kernel: Kernel
Returns
-------
hotspots: xarray.DataArray
"""
# validate raster
if not isinstance(raster, DataArray):
raise TypeError("`raster` must be instance of DataArray")
if raster.ndim != 2:
raise ValueError("`raster` must be 2D")
if not (issubclass(raster.values.dtype.type, np.integer) or
issubclass(raster.values.dtype.type, np.float)):
raise ValueError(
"`raster` must be an array of integers or float")
# create kernel mask array
kernel_values = kernel.to_array(raster)
# apply kernel to raster values
mean_array = _apply(raster.values.astype(float), kernel_values, calc_mean)
# calculate z-scores
global_mean = np.nanmean(raster.values)
global_std = np.nanstd(raster.values)
if global_std == 0:
raise ZeroDivisionError("Standard deviation "
"of the input raster values is 0.")
z_array = (mean_array - global_mean) / global_std
out = _hotspots(z_array)
result = DataArray(out,
coords=raster.coords,
dims=raster.dims,
attrs=raster.attrs)
return result
| [
"re.split",
"numpy.nanstd",
"xrspatial.utils.lnglat_to_meters",
"numpy.nanmean",
"numpy.linspace",
"warnings.warn",
"xarray.DataArray",
"warnings.simplefilter",
"numba.prange",
"numpy.nansum",
"numpy.zeros_like"
] | [((212, 244), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""'], {}), "('default')\n", (233, 244), False, 'import warnings\n'), ((4623, 4667), 'numpy.linspace', 'np.linspace', (['(-half_w)', 'half_w', '(2 * half_w + 1)'], {}), '(-half_w, half_w, 2 * half_w + 1)\n', (4634, 4667), True, 'import numpy as np\n'), ((6047, 6066), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (6060, 6066), True, 'import numpy as np\n'), ((7288, 7364), 'xarray.DataArray', 'DataArray', (['out'], {'name': 'name', 'dims': 'agg.dims', 'coords': 'agg.coords', 'attrs': 'agg.attrs'}), '(out, name=name, dims=agg.dims, coords=agg.coords, attrs=agg.attrs)\n', (7297, 7364), False, 'from xarray import DataArray\n'), ((7428, 7445), 'numpy.nanmean', 'np.nanmean', (['array'], {}), '(array)\n', (7438, 7445), True, 'import numpy as np\n'), ((7487, 7503), 'numpy.nansum', 'np.nansum', (['array'], {}), '(array)\n', (7496, 7503), True, 'import numpy as np\n'), ((8164, 8183), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (8177, 8183), True, 'import numpy as np\n'), ((8320, 8365), 'numpy.zeros_like', 'np.zeros_like', (['kernel_array'], {'dtype': 'data.dtype'}), '(kernel_array, dtype=data.dtype)\n', (8333, 8365), True, 'import numpy as np\n'), ((8380, 8392), 'numba.prange', 'prange', (['rows'], {}), '(rows)\n', (8386, 8392), False, 'from numba import prange\n'), ((9701, 9775), 'xarray.DataArray', 'DataArray', (['out'], {'coords': 'raster.coords', 'dims': 'raster.dims', 'attrs': 'raster.attrs'}), '(out, coords=raster.coords, dims=raster.dims, attrs=raster.attrs)\n', (9710, 9775), False, 'from xarray import DataArray\n'), ((9907, 9944), 'numpy.zeros_like', 'np.zeros_like', (['z_array'], {'dtype': 'np.int8'}), '(z_array, dtype=np.int8)\n', (9920, 9944), True, 'import numpy as np\n'), ((9989, 10001), 'numba.prange', 'prange', (['rows'], {}), '(rows)\n', (9995, 10001), False, 'from numba import prange\n'), ((11733, 11758), 'numpy.nanmean', 'np.nanmean', (['raster.values'], {}), '(raster.values)\n', (11743, 11758), True, 'import numpy as np\n'), ((11776, 11800), 'numpy.nanstd', 'np.nanstd', (['raster.values'], {}), '(raster.values)\n', (11785, 11800), True, 'import numpy as np\n'), ((12044, 12118), 'xarray.DataArray', 'DataArray', (['out'], {'coords': 'raster.coords', 'dims': 'raster.dims', 'attrs': 'raster.attrs'}), '(out, coords=raster.coords, dims=raster.dims, attrs=raster.attrs)\n', (12053, 12118), False, 'from xarray import DataArray\n'), ((3069, 3155), 'warnings.warn', 'warnings.warn', (['"""Raster distance unit not provided. Use meter as default."""', 'Warning'], {}), "('Raster distance unit not provided. Use meter as default.',\n Warning)\n", (3082, 3155), False, 'import warnings\n'), ((4271, 4299), 'xrspatial.utils.lnglat_to_meters', 'lnglat_to_meters', (['lon0', 'lat0'], {}), '(lon0, lat0)\n', (4287, 4299), False, 'from xrspatial.utils import lnglat_to_meters\n'), ((4319, 4347), 'xrspatial.utils.lnglat_to_meters', 'lnglat_to_meters', (['lon1', 'lat1'], {}), '(lon1, lat1)\n', (4335, 4347), False, 'from xrspatial.utils import lnglat_to_meters\n'), ((4724, 4768), 'numpy.linspace', 'np.linspace', (['(-half_h)', 'half_h', '(2 * half_h + 1)'], {}), '(-half_h, half_h, 2 * half_h + 1)\n', (4735, 4768), True, 'import numpy as np\n'), ((8411, 8423), 'numba.prange', 'prange', (['cols'], {}), '(cols)\n', (8417, 8423), False, 'from numba import prange\n'), ((10020, 10032), 'numba.prange', 'prange', (['cols'], {}), '(cols)\n', (10026, 10032), False, 'from numba import prange\n'), ((1468, 1554), 'warnings.warn', 'warnings.warn', (['"""Raster distance unit not provided. Use meter as default."""', 'Warning'], {}), "('Raster distance unit not provided. Use meter as default.',\n Warning)\n", (1481, 1554), False, 'import warnings\n'), ((1241, 1272), 're.split', 're.split', (['"""(-?\\\\d*\\\\.?\\\\d+)"""', 's'], {}), "('(-?\\\\d*\\\\.?\\\\d+)', s)\n", (1249, 1272), False, 'import re\n')] |
import copy
import os
import random
import numpy as np
import torch
import torch.nn as nn
from torch import fx
from torchvision.models import MNASNet, MobileNetV3, ShuffleNetV2
from torchvision.models.densenet import _DenseLayer
def matches_module_pattern(pattern, node, modules):
if len(node.args) == 0:
return False
nodes = (node.args[0], node)
for expected_type, current_node in zip(pattern, nodes):
if not isinstance(current_node, fx.Node):
return False
if current_node.op != 'call_module':
return False
if not isinstance(current_node.target, str):
return False
if current_node.target not in modules:
return False
if type(modules[current_node.target]) is not expected_type:
return False
return True
def set_seed(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
def get_previous_layer(node, modules):
# print("get_previous_layer")
for input_node in node.all_input_nodes:
# print(input_node.name)
if input_node.target in modules and isinstance(modules[input_node.target], (nn.Conv2d, nn.BatchNorm2d)):
return input_node.target
else:
return get_previous_layer(input_node, modules)
def get_pinned_out(model):
pinned_out = []
try:
fx_model = fx.symbolic_trace(copy.deepcopy(model))
modules = dict(fx_model.named_modules())
last_module = None
for i, node in enumerate(fx_model.graph.nodes):
# print(node.name)
if node.target in modules and isinstance(modules[node.target], nn.Conv2d):
if modules[node.target].groups > 1 and last_module is not None:
if last_module.target is not None and last_module.target not in pinned_out:
pinned_out.append(last_module.target)
last_module = node
if i > 0 and (len(node.all_input_nodes) > 1 or len(node.users) > 1):
for input_node in node.all_input_nodes:
if input_node.target in modules and isinstance(modules[input_node.target],
(nn.Conv2d, nn.BatchNorm2d)):
if input_node.target is not None and input_node.target not in pinned_out:
pinned_out.append(input_node.target)
else:
previous_layer = get_previous_layer(input_node, modules)
if previous_layer is not None and previous_layer not in pinned_out:
pinned_out.append(previous_layer)
except Exception as e:
pass
return pinned_out
def get_bn_folding(model):
bn_folding = []
try:
patterns = [(torch.nn.Conv2d, torch.nn.BatchNorm2d)]
fx_model = fx.symbolic_trace(model)
modules = dict(fx_model.named_modules())
for pattern in patterns:
for node in fx_model.graph.nodes:
if matches_module_pattern(pattern, node, modules):
if len(node.args[0].users) > 1:
continue
bn_folding.append([node.args[0].target, node.target])
except Exception as e:
last_module = None
for name, module in model.named_modules():
if isinstance(module, _DenseLayer):
last_module = None
if isinstance(module, (nn.Linear, nn.Conv2d)):
last_module = (name, module)
if isinstance(module, nn.BatchNorm2d):
if last_module is not None and last_module[1].weight.shape[0] == module.weight.shape[0]:
bn_folding.append([last_module[0], name])
return bn_folding
def get_previous_layer_2(connections, module):
for k in connections:
if any([c == module for c in connections[k]["next"]]):
if not isinstance(connections[k]["class"], (nn.Conv2d, nn.BatchNorm2d)):
return get_previous_layer_2(connections, k)
else:
return k
def get_pinned(model):
fx_model = fx.symbolic_trace(copy.deepcopy(model))
modules = dict(fx_model.named_modules())
connections = {}
# Build dictionary node -> list of connected nodes
for i, node in enumerate(fx_model.graph.nodes):
# print(f"{node.name}->{[str(user) for user in node.users]}")
if node.target in modules:
module = modules[node.target]
else:
module = None
connections[node.name] = {"next": [str(user) for user in node.users], "class": module}
# Remove duplicates and build list of "to-pin" nodes (may contain nodes not CONV nor BN)
same_next = []
for k in connections:
for k2 in connections:
if k != k2:
if "add" in str(set(connections[k]["next"]) & set(connections[k2]["next"])):
same_next.append([k, k2])
same_next = set([item for sublist in same_next for item in sublist])
# Add input node of CONV with grouping, layer.6 for MNASNet and fc2 for MobileNetV3
for i, node in enumerate(fx_model.graph.nodes):
if (isinstance(model, MobileNetV3) and "fc2" in node.name) or \
(isinstance(model, ShuffleNetV2) and (node.name == "conv1_1" or
"branch1_3" in node.name or
"branch2_1" in node.name or
"branch2_6" in node.name)):
same_next.add(str(node.name))
name = node.name.replace("_", ".")
if name in modules:
module = modules[name]
if isinstance(module, nn.Conv2d) and module.groups > 1:
same_next.add(str(node.prev))
# For each node not CONV nor BN recover the closest previous CONV or BN
to_pin = []
for m in same_next:
if not isinstance(connections[m]["class"], (nn.Conv2d, nn.BatchNorm2d)):
to_pin.append(get_previous_layer_2(connections, m))
else:
to_pin.append(m)
return [n.replace("_", ".") for n in list(set(to_pin))]
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.fx.symbolic_trace",
"random.seed",
"numpy.random.seed",
"copy.deepcopy",
"torch.cuda.manual_seed"
] | [((856, 873), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (867, 873), False, 'import random\n'), ((923, 943), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (937, 943), True, 'import numpy as np\n'), ((948, 976), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (970, 976), False, 'import torch\n'), ((981, 1013), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1007, 1013), False, 'import torch\n'), ((1107, 1130), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1124, 1130), False, 'import torch\n'), ((3140, 3164), 'torch.fx.symbolic_trace', 'fx.symbolic_trace', (['model'], {}), '(model)\n', (3157, 3164), False, 'from torch import fx\n'), ((4463, 4483), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (4476, 4483), False, 'import copy\n'), ((1601, 1621), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (1614, 1621), False, 'import copy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 22:32:51 2020
@author: johna
"""
import os
import numpy as np
import datetime
import image_prep
import model
#import predict_pretrained needs rework
import createdicomfile
"""
Main script for handling dataflow for automatic contour generation for clinicians
INPUT RECEIVED FROM BROWSER:
Folder with DICOM image files
"""
def generate_ss(imagefolder,outputfolder,username):
thresholds = {"BrainStem":0.2, "CochleaL":0.2, "CochleaR":0.2,
"ParotidL":0.33, "ParotidR":0.33, "SubmandibularL":0.1,
"SubmandibularR":0.1,"BrachialPlexus":0.1, "Brain":0.66,
"Larynx":0.5, "SpinalCord":0.1}
region = "Head and Neck"
filters = {"bone":[2000, 400], "tissue":[400,40],"none":[4500,1000]}
datalist = image_prep.get_list_of_datasets(imagefolder)
inputarray, heightlist = image_prep.build_array(datalist,image_size=256,pixel_size=1)
if region == "Head and Neck":
ROIlist = ["BrachialPlexus","Brain","CochleaL","CochleaR","Larynx","ParotidL","ParotidR","SpinalCord",
"BrainStem","SubmandibularL","SubmandibularR"]
weightspaths = {"Axial":"F:\\machine learning misc\\weights\\3D\\Axial",
"Coronal":"F:\\machine learning misc\\weights\\3D\\Coronal",
"Sagittal":"F:\\machine learning misc\\weights\\3D\\Sagittal"}
image_size = 256
AxialModel = model.get_unet(image_size)
structuresetdata = []
AxialInput = np.copy(inputarray)
for ROI in ROIlist:
print("Beginning work on",ROI)
threshold = thresholds[ROI]
axialweightspath = os.path.join(weightspaths["Axial"], ROI)
AxialModel.load_weights(os.path.join(axialweightspath,os.listdir(axialweightspath)[0]))
if ROI == "BrachialPlexus" or ROI == "SpinalCord": #this is the spot to edit if we want to change how filters are applied
win_lev = "Bone"
else:
win_lev = "Tissue"
if win_lev == "Tissue":
filtAxialInput = image_prep.apply_window_level(AxialInput,filters["tissue"][0],filters["tissue"][1])
elif win_lev == "Bone":
filtAxialInput = image_prep.apply_window_level(AxialInput,filters["bone"][0],filters["bone"][1])
elif win_lev == "None":
filtAxialInput = image_prep.apply_window_level(AxialInput,filters["none"][0],filters["none"][1])
AxialOutput = AxialModel.predict(filtAxialInput,verbose=0)
structuresetdata.append([ROI,AxialOutput,heightlist]) #if returning to 3D, change AxialOutput to combinedoutput
patient_data,UIDdict = createdicomfile.gather_patient_data(imagefolder)
structure_set = createdicomfile.create_dicom(patient_data,UIDdict,structuresetdata,image_size=256,threshold=threshold)
filename = "RS.%s-CNN.dcm" % username
structure_set.save_as(os.path.join(outputfolder, filename), write_like_original=False)
| [
"numpy.copy",
"os.listdir",
"createdicomfile.gather_patient_data",
"model.get_unet",
"os.path.join",
"image_prep.get_list_of_datasets",
"image_prep.build_array",
"image_prep.apply_window_level",
"createdicomfile.create_dicom"
] | [((839, 883), 'image_prep.get_list_of_datasets', 'image_prep.get_list_of_datasets', (['imagefolder'], {}), '(imagefolder)\n', (870, 883), False, 'import image_prep\n'), ((913, 975), 'image_prep.build_array', 'image_prep.build_array', (['datalist'], {'image_size': '(256)', 'pixel_size': '(1)'}), '(datalist, image_size=256, pixel_size=1)\n', (935, 975), False, 'import image_prep\n'), ((1497, 1523), 'model.get_unet', 'model.get_unet', (['image_size'], {}), '(image_size)\n', (1511, 1523), False, 'import model\n'), ((1578, 1597), 'numpy.copy', 'np.copy', (['inputarray'], {}), '(inputarray)\n', (1585, 1597), True, 'import numpy as np\n'), ((2781, 2829), 'createdicomfile.gather_patient_data', 'createdicomfile.gather_patient_data', (['imagefolder'], {}), '(imagefolder)\n', (2816, 2829), False, 'import createdicomfile\n'), ((2850, 2960), 'createdicomfile.create_dicom', 'createdicomfile.create_dicom', (['patient_data', 'UIDdict', 'structuresetdata'], {'image_size': '(256)', 'threshold': 'threshold'}), '(patient_data, UIDdict, structuresetdata,\n image_size=256, threshold=threshold)\n', (2878, 2960), False, 'import createdicomfile\n'), ((1739, 1779), 'os.path.join', 'os.path.join', (["weightspaths['Axial']", 'ROI'], {}), "(weightspaths['Axial'], ROI)\n", (1751, 1779), False, 'import os\n'), ((3031, 3067), 'os.path.join', 'os.path.join', (['outputfolder', 'filename'], {}), '(outputfolder, filename)\n', (3043, 3067), False, 'import os\n'), ((2170, 2260), 'image_prep.apply_window_level', 'image_prep.apply_window_level', (['AxialInput', "filters['tissue'][0]", "filters['tissue'][1]"], {}), "(AxialInput, filters['tissue'][0], filters[\n 'tissue'][1])\n", (2199, 2260), False, 'import image_prep\n'), ((2316, 2402), 'image_prep.apply_window_level', 'image_prep.apply_window_level', (['AxialInput', "filters['bone'][0]", "filters['bone'][1]"], {}), "(AxialInput, filters['bone'][0], filters[\n 'bone'][1])\n", (2345, 2402), False, 'import image_prep\n'), ((1852, 1880), 'os.listdir', 'os.listdir', (['axialweightspath'], {}), '(axialweightspath)\n', (1862, 1880), False, 'import os\n'), ((2458, 2544), 'image_prep.apply_window_level', 'image_prep.apply_window_level', (['AxialInput', "filters['none'][0]", "filters['none'][1]"], {}), "(AxialInput, filters['none'][0], filters[\n 'none'][1])\n", (2487, 2544), False, 'import image_prep\n')] |
from vispy import scene, color as coloring
from SimulationState import GridDimensions, BoundaryConditionType, BoundaryConditionSetter
import numpy as np
class QuantumSimulationLines():
def __init__(self, gridExtents : GridDimensions):
self.Real = EditableSimulationLine.InitialLine(1, "blue", gridExtents)
self.Imaginary = EditableSimulationLine.InitialLine(2, "red", gridExtents)
self.Hamiltonian = EditableSimulationLine.InitialLine(3, "green", gridExtents)
self.ModulusSquared = scene.visuals.Line(pos=np.zeros((gridExtents.NumPoints, 3), dtype=np.float32), color='w', width=3,
antialias=True, method='gl')
def AddToView(self, view: scene.widgets.ViewBox):
view.add(self.Real)
view.add(self.Imaginary)
view.add(self.Hamiltonian)
view.add(self.ModulusSquared)
class EditableSimulationLine(scene.visuals.Line):
"""
Mouse editable extension to the Line visual.
This class adds mouse picking for line points, mouse_move handling for dragging existing points.
"""
def __init__(self, buttonToUseIndex = -1, gridExtents = GridDimensions(0, 0), edgeColor ="red",
boundaryConditionSetter = BoundaryConditionSetter(BoundaryConditionType.Default), *args, **kwargs):
scene.visuals.Line.__init__(self, *args, **kwargs)
# initialize point markers
self.edgeColor = coloring.Color(edgeColor)
self.markers = scene.visuals.Markers()
self.marker_colors = np.ones((len(self.pos), 4), dtype=np.float32)
self.markers.set_data(pos=self.pos, symbol="s", edge_color=edgeColor, size=6)
self.selected_point = None
self.selected_index = -1
self.buttonToUse = buttonToUseIndex
self.gridExtents = gridExtents
self.boundaryConditionSetter = boundaryConditionSetter
self.beforeSimulation = True
@classmethod
def InitialLine(cls, buttonToUseIndex, edgeColor, gridExtents, *args, **kwargs):
return cls(buttonToUseIndex, gridExtents, edgeColor, BoundaryConditionSetter(BoundaryConditionType.SquareWellWavefunction),
pos=np.zeros((gridExtents.NumPoints, 3), dtype=np.float32), color=edgeColor, width=3, antialias=True, method='gl')
def draw(self, transforms):
# draw line and markers
scene.visuals.Line.draw(self, transforms)
self.markers.draw(transforms)
def select_point_by_x_position_and_mouse_button(self, event):
""" If the appropriate mouse button is pressed, get the relevant point of this line from the x-position of the mouse """
if event.button == self.buttonToUse:
# position in scene/document coordinates
pos_scene = event.pos[:3]
return self.get_closest_point_by_x(pos_scene)
# no point found, return None
return None, -1
def get_closest_point_by_x(self, pos):
"""Finds the closest point by x-position, within the point grid spacing"""
xPos = pos[0]
index = (xPos - self.gridExtents.BoxMin) * (self.gridExtents.NumPoints - 1) / (self.gridExtents.BoxWidth)
index = int(round(index))
if(index >= 0 and index < self.gridExtents.NumPoints):
return self.pos[index], index
else:
return None, index
def select_point(self, event):
"""Selects a point on this line, if appropriate"""
return self.select_point_by_x_position_and_mouse_button(event)
def update_markers(self, selected_index=-1):
""" update marker colors, and highlight a marker with a given color """
self.marker_colors.fill(1)#fill with white
# default shape and size (non-highlighted)
shape = "o"
size = 6
if 0 <= selected_index < len(self.marker_colors):
self.marker_colors[selected_index] = self.edgeColor.rgba
# if there is a highlighted marker, change all marker shapes to a square shape and larger size
shape = "s"
size = 8
self.markers.set_data(pos=self.pos, symbol=shape, edge_color=self.edgeColor,
size=size, face_color=self.marker_colors)
def on_mouse_press(self, event):
if self.beforeSimulation:
pos_scene = event.pos[:3]
if event.button == self.buttonToUse:
# find closest point to mouse and select it
self.selected_point, self.selected_index = self.select_point(event)
self.update_markers(self.selected_index)
self.lastMouseDownPosition = pos_scene
def on_mouse_release(self, event):
if self.beforeSimulation:
#self.print_mouse_event(event, 'Mouse release')
self.selected_point = None
self.update_markers()
self.lastMouseDownPosition = None
def on_mouse_move(self, event):
if self.beforeSimulation:
eventPos = event.pos[:3]
if event.button == self.buttonToUse:
if self.lastMouseDownPosition is not None:
lastPoint, lastIndex = self.get_closest_point_by_x(self.lastMouseDownPosition)
newPoint, newIndex = self.get_closest_point_by_x(eventPos)
indexDifference = newIndex - lastIndex
if indexDifference == 0:
self.try_select_and_update_point(event, eventPos)
else:
step = -1 if indexDifference < 0 else 1
indicesToTry = range(lastIndex, newIndex, step)
for index in indicesToTry:
if index >= 0 and index < self.gridExtents.NumPoints:
fraction = (index - lastIndex) / (newIndex - lastIndex)
interpolated = self.lastMouseDownPosition + (eventPos - self.lastMouseDownPosition) * fraction
self.selected_point = self.pos[index]
self.selected_index = index
self.updatePoint(interpolated)
else:
# find closest point to mouse and select it
self.try_select_and_update_point(event, eventPos)
self.lastMouseDownPosition = eventPos
else:
self.lastMouseDownPosition = None
def try_select_and_update_point(self, event, eventPos):
"""Try to select and update a point based on event position"""
self.selected_point, self.selected_index = self.select_point(event)
if self.selected_point is not None:
self.updatePoint(eventPos)
def updatePoint(self, pos_scene):
"""Update currently-selected point to new position given by scene position"""
self.selected_point[1] = pos_scene[1]
self.boundaryConditionSetter.UpdateLineBoundaryConditions(self)
self.set_data(pos=self.pos)
self.update_markers(self.selected_index)
def update_after_simulation(self):
self.set_data(pos=self.pos)
self.markers.set_data(pos=self.pos, symbol='o', edge_color=self.edgeColor, size=6, face_color=self.marker_colors) | [
"SimulationState.BoundaryConditionSetter",
"vispy.scene.visuals.Line.draw",
"vispy.scene.visuals.Line.__init__",
"SimulationState.GridDimensions",
"numpy.zeros",
"vispy.color.Color",
"vispy.scene.visuals.Markers"
] | [((1151, 1171), 'SimulationState.GridDimensions', 'GridDimensions', (['(0)', '(0)'], {}), '(0, 0)\n', (1165, 1171), False, 'from SimulationState import GridDimensions, BoundaryConditionType, BoundaryConditionSetter\n'), ((1234, 1288), 'SimulationState.BoundaryConditionSetter', 'BoundaryConditionSetter', (['BoundaryConditionType.Default'], {}), '(BoundaryConditionType.Default)\n', (1257, 1288), False, 'from SimulationState import GridDimensions, BoundaryConditionType, BoundaryConditionSetter\n'), ((1316, 1366), 'vispy.scene.visuals.Line.__init__', 'scene.visuals.Line.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1343, 1366), False, 'from vispy import scene, color as coloring\n'), ((1428, 1453), 'vispy.color.Color', 'coloring.Color', (['edgeColor'], {}), '(edgeColor)\n', (1442, 1453), True, 'from vispy import scene, color as coloring\n'), ((1477, 1500), 'vispy.scene.visuals.Markers', 'scene.visuals.Markers', ([], {}), '()\n', (1498, 1500), False, 'from vispy import scene, color as coloring\n'), ((2355, 2396), 'vispy.scene.visuals.Line.draw', 'scene.visuals.Line.draw', (['self', 'transforms'], {}), '(self, transforms)\n', (2378, 2396), False, 'from vispy import scene, color as coloring\n'), ((2077, 2146), 'SimulationState.BoundaryConditionSetter', 'BoundaryConditionSetter', (['BoundaryConditionType.SquareWellWavefunction'], {}), '(BoundaryConditionType.SquareWellWavefunction)\n', (2100, 2146), False, 'from SimulationState import GridDimensions, BoundaryConditionType, BoundaryConditionSetter\n'), ((542, 596), 'numpy.zeros', 'np.zeros', (['(gridExtents.NumPoints, 3)'], {'dtype': 'np.float32'}), '((gridExtents.NumPoints, 3), dtype=np.float32)\n', (550, 596), True, 'import numpy as np\n'), ((2171, 2225), 'numpy.zeros', 'np.zeros', (['(gridExtents.NumPoints, 3)'], {'dtype': 'np.float32'}), '((gridExtents.NumPoints, 3), dtype=np.float32)\n', (2179, 2225), True, 'import numpy as np\n')] |
# @author <NAME> <<EMAIL>>, Interactive Robotics Lab, Arizona State University
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
# import tensorflow_probability as tfp
import sys
import numpy as np
from utils.graphs import TBoardGraphs
import shutil
import pycurl
import pickle
import os
class Network():
def __init__(self, model, logname, lr, lw_atn, lw_w, lw_trj, lw_dt, lw_phs, log_freq=25):
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr, beta_1=0.9, beta_2=0.999)
self.model = model
self.total_steps = 0
self.logname = logname
if self.logname.startswith("Intel$"):
self.instance_name = self.logname.split("$")[1]
self.logname = self.logname.split("$")[0]
else:
self.instance_name = None
self.tboard = TBoardGraphs(self.logname)
self.loss = tf.keras.losses.CategoricalCrossentropy()
self.global_best_loss = 10000
self.last_written_step = -1
self.log_freq = log_freq
self.lw_atn = lw_atn
self.lw_w = lw_w
self.lw_trj = lw_trj
self.lw_dt = lw_dt
self.lw_phs = lw_phs
def setDatasets(self, train, validate):
self.train_ds = train.ds
self.val_ds = validate.ds
def train(self, epochs):
self.global_step = 0
for epoch in range(epochs):
print("Epoch: {:3d}/{:3d}".format(epoch+1, epochs))
validation_loss = 0.0
train_loss = []
for step, (d_in, d_out) in enumerate(self.train_ds):
if step % 100 == 0:
validation_loss = self.runValidation(quick=True, pnt=False)
train_loss.append(self.step(d_in, d_out, train=True))
self.loadingBar(step, self.total_steps, 25, addition="Loss: {:.6f} | {:.6f}".format(np.mean(train_loss[-10:]), validation_loss))
if epoch == 0:
self.total_steps += 1
self.global_step += 1
self.loadingBar(self.total_steps, self.total_steps, 25, addition="Loss: {:.6f}".format(np.mean(train_loss)), end=True)
self.runValidation(quick=False)
self.model.saveModelToFile(self.logname + "/")
if epoch % self.log_freq == 0 and self.instance_name is not None:
self._uploadToCloud(epoch)
if self.instance_name is not None:
self._uploadToCloud()
def _uploadToCloud(self, epoch=None):
# Not available in public version
pass
def _curlUpload(self, path):
# Not available in public version
pass
def runValidation(self, quick=False, pnt=True):
if not quick:
print("Running full validation...")
val_loss = []
for step, (d_in, d_out) in enumerate(self.val_ds):
val_loss.append(self.step(d_in, d_out, train=False))
if quick:
break
d_in_graphs = (tf.tile(tf.expand_dims(d_in[0][0], 0),[50,1]), tf.tile(tf.expand_dims(d_in[1][0], 0),[50,1,1]), tf.tile(tf.expand_dims(d_in[2][0], 0),[50,1,1]))
d_out_graphs = (tf.tile(tf.expand_dims(d_out[0][0], 0),[50,1,1]), tf.tile(tf.expand_dims(d_out[1][0], 0),[50,1]),
tf.tile(tf.expand_dims([d_out[2][0]], 0),[50,1]), tf.tile(tf.expand_dims(d_out[3][0], 0),[50,1,1]))
self.createGraphs((d_in[0][0], d_in[1][0], d_in[2][0]),
(d_out[0][0], d_out[1][0], d_out[2][0], d_out[3][0]),
self.model(d_in_graphs, training=True, use_dropout=True))
if pnt:
print(" Validation Loss: {:.6f}".format(np.mean(val_loss)))
return np.mean(val_loss)
def step(self, d_in, d_out, train):
with tf.GradientTape() as tape:
result = self.model(d_in, training=train)
loss, (atn, trj, dt, phs, wght) = self.calculateLoss(d_out, result, train)
if train:
gradients = tape.gradient(loss, self.model.getVariables(self.global_step))
self.optimizer.apply_gradients(zip(gradients, self.model.getVariables(self.global_step)))
self.tboard.addTrainScalar("Loss", loss, self.global_step)
self.tboard.addTrainScalar("Loss Attention", atn, self.global_step)
self.tboard.addTrainScalar("Loss Trajectory", trj, self.global_step)
self.tboard.addTrainScalar("Loss Phase", phs, self.global_step)
self.tboard.addTrainScalar("Loss Weight", wght, self.global_step)
self.tboard.addTrainScalar("Loss Delta T", dt, self.global_step)
else:
if self.last_written_step != self.global_step:
self.last_written_step = self.global_step
self.tboard.addValidationScalar("Loss", loss, self.global_step)
self.tboard.addValidationScalar("Loss Attention", atn, self.global_step)
self.tboard.addValidationScalar("Loss Trajectory", trj, self.global_step)
self.tboard.addValidationScalar("Loss Phase", phs, self.global_step)
self.tboard.addValidationScalar("Loss Weight", wght, self.global_step)
self.tboard.addValidationScalar("Loss Delta T", dt, self.global_step)
if loss < self.global_best_loss:
self.global_best_loss = loss
self.model.saveModelToFile(self.logname + "/best/")
return loss.numpy()
def interpolateTrajectory(self, trj, target):
batch_size = trj.shape[0]
current_length = trj.shape[1]
dimensions = trj.shape[2]
result = np.zeros((batch_size, target, dimensions), dtype=np.float32)
for b in range(batch_size):
for i in range(dimensions):
result[b,:,i] = np.interp(np.linspace(0.0, 1.0, num=target), np.linspace(0.0, 1.0, num=current_length), trj[b,:,i])
return result
def calculateMSEWithPaddingMask(self, y_true, y_pred, mask):
mse = tf.math.pow(y_true - y_pred, 2.0)
mse = tf.math.multiply(mse, mask)
n = mse.shape[-1]
mse = (1.0 / n) * tf.reduce_sum(mse, axis=-1)
return mse
def calculateLoss(self, d_out, result, train):
gen_trj, (atn, dmp_dt, phs, wght) = result
generated, attention, delta_t, weights, phase, loss_atn = d_out
weight_dim = [3.0, 3.0, 3.0, 1.0, 0.5, 1.0, 0.1]
atn_loss = self.loss(y_true=attention, y_pred=atn)
dt_loss = tf.math.reduce_mean(tf.keras.metrics.mean_squared_error(delta_t, dmp_dt[:,0]))
trj_loss = self.calculateMSEWithPaddingMask(generated, gen_trj, tf.tile([[weight_dim]], [16, 350, 1]))
trj_loss = tf.reduce_mean(tf.math.multiply(trj_loss, loss_atn), axis=1)
trj_loss = tf.reduce_mean(trj_loss)
phs_loss = tf.math.reduce_mean(self.calculateMSEWithPaddingMask(phase, phs[:,:,0], loss_atn))
weight_loss = tf.math.reduce_mean(tf.keras.metrics.mean_squared_error(wght[:,:-1,:,:], tf.roll(wght, shift=-1, axis=1)[:,:-1,:,:]), axis=-1)
weight_loss = tf.math.reduce_mean(tf.math.multiply(weight_loss, loss_atn[:,:-1]))
return (atn_loss * self.lw_atn +
trj_loss * self.lw_trj +
phs_loss * self.lw_phs +
weight_loss * self.lw_w +
dt_loss * self.lw_dt,
(atn_loss, trj_loss, dt_loss, phs_loss, weight_loss)
)
def loadingBar(self, count, total, size, addition="", end=False):
if total == 0:
percent = 0
else:
percent = float(count) / float(total)
full = int(percent * size)
fill = size - full
print("\r {:5d}/{:5d} [".format(count, total) + "#" * full + " " * fill + "] " + addition, end="")
if end:
print("")
sys.stdout.flush()
def createGraphs(self, d_in, d_out, result):
language, image, robot_states = d_in
target_trj, attention, delta_t, weights = d_out
gen_trj, (atn, dmp_dt, phase, wght) = result
self.tboard.plotClassAccuracy(attention, tf.math.reduce_mean(atn, axis=0), tf.math.reduce_std(atn, axis=0), language, stepid=self.global_step)
self.tboard.plotDMPTrajectory(target_trj, tf.math.reduce_mean(gen_trj, axis=0), tf.math.reduce_std(gen_trj, axis=0),
tf.math.reduce_mean(phase, axis=0), delta_t, tf.math.reduce_mean(dmp_dt, axis=0), stepid=self.global_step)
| [
"tensorflow.keras.metrics.mean_squared_error",
"numpy.mean",
"tensorflow.math.pow",
"tensorflow.tile",
"tensorflow.expand_dims",
"tensorflow.roll",
"tensorflow.reduce_sum",
"tensorflow.keras.optimizers.Adam",
"tensorflow.GradientTape",
"numpy.zeros",
"tensorflow.math.multiply",
"tensorflow.mat... | [((491, 559), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr', 'beta_1': '(0.9)', 'beta_2': '(0.999)'}), '(learning_rate=lr, beta_1=0.9, beta_2=0.999)\n', (515, 559), True, 'import tensorflow as tf\n'), ((928, 954), 'utils.graphs.TBoardGraphs', 'TBoardGraphs', (['self.logname'], {}), '(self.logname)\n', (940, 954), False, 'from utils.graphs import TBoardGraphs\n'), ((988, 1029), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (1027, 1029), True, 'import tensorflow as tf\n'), ((3868, 3885), 'numpy.mean', 'np.mean', (['val_loss'], {}), '(val_loss)\n', (3875, 3885), True, 'import numpy as np\n'), ((5819, 5879), 'numpy.zeros', 'np.zeros', (['(batch_size, target, dimensions)'], {'dtype': 'np.float32'}), '((batch_size, target, dimensions), dtype=np.float32)\n', (5827, 5879), True, 'import numpy as np\n'), ((6204, 6237), 'tensorflow.math.pow', 'tf.math.pow', (['(y_true - y_pred)', '(2.0)'], {}), '(y_true - y_pred, 2.0)\n', (6215, 6237), True, 'import tensorflow as tf\n'), ((6252, 6279), 'tensorflow.math.multiply', 'tf.math.multiply', (['mse', 'mask'], {}), '(mse, mask)\n', (6268, 6279), True, 'import tensorflow as tf\n'), ((7023, 7047), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['trj_loss'], {}), '(trj_loss)\n', (7037, 7047), True, 'import tensorflow as tf\n'), ((8085, 8103), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8101, 8103), False, 'import sys\n'), ((3940, 3957), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3955, 3957), True, 'import tensorflow as tf\n'), ((6334, 6361), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mse'], {'axis': '(-1)'}), '(mse, axis=-1)\n', (6347, 6361), True, 'import tensorflow as tf\n'), ((6745, 6803), 'tensorflow.keras.metrics.mean_squared_error', 'tf.keras.metrics.mean_squared_error', (['delta_t', 'dmp_dt[:, 0]'], {}), '(delta_t, dmp_dt[:, 0])\n', (6780, 6803), True, 'import tensorflow as tf\n'), ((6885, 6922), 'tensorflow.tile', 'tf.tile', (['[[weight_dim]]', '[16, 350, 1]'], {}), '([[weight_dim]], [16, 350, 1])\n', (6892, 6922), True, 'import tensorflow as tf\n'), ((6958, 6994), 'tensorflow.math.multiply', 'tf.math.multiply', (['trj_loss', 'loss_atn'], {}), '(trj_loss, loss_atn)\n', (6974, 6994), True, 'import tensorflow as tf\n'), ((7346, 7393), 'tensorflow.math.multiply', 'tf.math.multiply', (['weight_loss', 'loss_atn[:, :-1]'], {}), '(weight_loss, loss_atn[:, :-1])\n', (7362, 7393), True, 'import tensorflow as tf\n'), ((8375, 8407), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['atn'], {'axis': '(0)'}), '(atn, axis=0)\n', (8394, 8407), True, 'import tensorflow as tf\n'), ((8409, 8440), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['atn'], {'axis': '(0)'}), '(atn, axis=0)\n', (8427, 8440), True, 'import tensorflow as tf\n'), ((8527, 8563), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['gen_trj'], {'axis': '(0)'}), '(gen_trj, axis=0)\n', (8546, 8563), True, 'import tensorflow as tf\n'), ((8565, 8600), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['gen_trj'], {'axis': '(0)'}), '(gen_trj, axis=0)\n', (8583, 8600), True, 'import tensorflow as tf\n'), ((8640, 8674), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['phase'], {'axis': '(0)'}), '(phase, axis=0)\n', (8659, 8674), True, 'import tensorflow as tf\n'), ((8685, 8720), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['dmp_dt'], {'axis': '(0)'}), '(dmp_dt, axis=0)\n', (8704, 8720), True, 'import tensorflow as tf\n'), ((3135, 3164), 'tensorflow.expand_dims', 'tf.expand_dims', (['d_in[0][0]', '(0)'], {}), '(d_in[0][0], 0)\n', (3149, 3164), True, 'import tensorflow as tf\n'), ((3182, 3211), 'tensorflow.expand_dims', 'tf.expand_dims', (['d_in[1][0]', '(0)'], {}), '(d_in[1][0], 0)\n', (3196, 3211), True, 'import tensorflow as tf\n'), ((3231, 3260), 'tensorflow.expand_dims', 'tf.expand_dims', (['d_in[2][0]', '(0)'], {}), '(d_in[2][0], 0)\n', (3245, 3260), True, 'import tensorflow as tf\n'), ((3308, 3338), 'tensorflow.expand_dims', 'tf.expand_dims', (['d_out[0][0]', '(0)'], {}), '(d_out[0][0], 0)\n', (3322, 3338), True, 'import tensorflow as tf\n'), ((3358, 3388), 'tensorflow.expand_dims', 'tf.expand_dims', (['d_out[1][0]', '(0)'], {}), '(d_out[1][0], 0)\n', (3372, 3388), True, 'import tensorflow as tf\n'), ((3435, 3467), 'tensorflow.expand_dims', 'tf.expand_dims', (['[d_out[2][0]]', '(0)'], {}), '([d_out[2][0]], 0)\n', (3449, 3467), True, 'import tensorflow as tf\n'), ((3485, 3515), 'tensorflow.expand_dims', 'tf.expand_dims', (['d_out[3][0]', '(0)'], {}), '(d_out[3][0], 0)\n', (3499, 3515), True, 'import tensorflow as tf\n'), ((3833, 3850), 'numpy.mean', 'np.mean', (['val_loss'], {}), '(val_loss)\n', (3840, 3850), True, 'import numpy as np\n'), ((6003, 6036), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': 'target'}), '(0.0, 1.0, num=target)\n', (6014, 6036), True, 'import numpy as np\n'), ((6038, 6079), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': 'current_length'}), '(0.0, 1.0, num=current_length)\n', (6049, 6079), True, 'import numpy as np\n'), ((7250, 7281), 'tensorflow.roll', 'tf.roll', (['wght'], {'shift': '(-1)', 'axis': '(1)'}), '(wght, shift=-1, axis=1)\n', (7257, 7281), True, 'import tensorflow as tf\n'), ((2254, 2273), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (2261, 2273), True, 'import numpy as np\n'), ((1999, 2024), 'numpy.mean', 'np.mean', (['train_loss[-10:]'], {}), '(train_loss[-10:])\n', (2006, 2024), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helper module for dealing with datasets loaded from TFDS."""
import copy
import enum
from typing import List, Dict, Optional, Text, Any, Tuple, Callable
import attr
import cv2
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
def tfds_load_dataset(dataset_name, *args, **kwargs):
"""Helper function used to bridge internal google, and the external world."""
data_dir = kwargs.pop("data_dir", None)
return tfds.load(
dataset_name, *args, data_dir=data_dir, download=True, **kwargs)
class Split(enum.Enum):
"""Enum representing different splits of data for cross validation.
Two validation sets are needed for meta-learning optimizers.
"""
TRAIN = "TRAIN"
VALID_INNER = "VALID_INNER"
VALID_OUTER = "VALID_OUTER"
TEST = "TEST"
def split_dataset(
dataset,
num_per_split,
num_splits = 3,
):
"""Helper to split a dataset for cross validaton.
The first num_splits-1 datasets contain num_per_split examples.
The last dataset contains the remaining number of examples.
This often used to split a training set into more validation sets:
e.g. train_old --> [valid_inner, valid_outer, train]
Args:
dataset: name of tfds dataset
num_per_split: number of examples to have for each of the split off dataset.
num_splits: number of splits to create.
Returns:
A list of the requested datasets.
"""
new_datasets = []
# make the first n_split-1 splits containing num_per_split examples
for i in range(num_splits - 1):
new_datasets.append(dataset.skip(num_per_split * i).take(num_per_split))
# The remainder of the dataset
new_datasets.append(dataset.skip(num_per_split * (num_splits - 1)))
return new_datasets
def _add_onehot_label_to_dict(d,
num_label):
"""Returns a new dictionary with a label_onehot key."""
d = copy.copy(d)
d["label_onehot"] = tf.one_hot(d["label"], num_label)
return d
def _process_image_in_dict(d):
"""Returns a new dict with a uint8 image converted to 0-1 scaled image."""
d = copy.copy(d)
image = d["image"]
if image.dtype != tf.uint8:
raise ValueError("Only supports uint8 images")
d["image"] = tf.cast(image, tf.float32) / 255.
return d
@attr.s
class Datasets(object):
train = attr.ib(Any)
valid_inner = attr.ib(Any)
valid_outer = attr.ib(Any)
test = attr.ib(Any)
def get_image_datasets(
dataset_name,
batch_size,
num_per_valid = 3000,
num_train = None,
cache_dataset = True,
shuffle_buffer = None,
data_dir = None,
augmentation_fn = None,
):
"""Get an image `Datasets` instance that is ready to train with.
This includes caching for speed, repeating, shuffling, preprocessing, and
batching for each of the 4 splits.
Args:
dataset_name: Name of tfds dataset.
batch_size: Batch size to use.
num_per_valid: Number of validation images.
num_train: Number of training examples to use. If None, use all.
cache_dataset: Optionally cache the dataset for speed.
shuffle_buffer: Size of shuffle buffer. If none, use the full train set
size.
data_dir: Location of tfds data_dir.
augmentation_fn: Function to apply before batching for augmentation.
Returns:
`Datasets` ready to train with.
"""
# TODO(lmetz) pin all versions of datasets so they are consistent in time.
splits, info = tfds_load_dataset(
dataset_name, with_info=True, data_dir=data_dir)
num_classes = info.features["label"].num_classes
# Some datasets have different splits defined. For meta-learning we need 4
# splits. The following takes the splits that are defined, and tries to use
# them when possible. For missing splits, examples are taken off of the train
# dataset.
if set(splits.keys()) == set(["train", "validation", "test"]):
train = splits["train"]
test = splits["test"]
valid_outer = splits["validation"]
# pylint: disable=unbalanced-tuple-unpacking
valid_inner, train = split_dataset(
train, num_per_split=num_per_valid, num_splits=2)
num_test = info.splits["test"].num_examples
total_num_train = info.splits["train"].num_examples
num_valid = info.splits["validation"].num_examples
elif (set(splits.keys()) == set(["train", "test"]) or
set(splits.keys()) == set(["train", "validation"])):
train = splits["train"]
# pylint: disable=unbalanced-tuple-unpacking
valid_inner, valid_outer, train = split_dataset(
train, num_per_split=num_per_valid, num_splits=3)
if "test" in info.splits:
heldout_split = info.splits["test"]
else:
heldout_split = info.splits["validation"]
num_test = heldout_split.num_examples
test = splits["test"] if "test" in splits else splits["validation"]
total_num_train = info.splits["train"].num_examples - num_per_valid * 2
num_valid = num_per_valid
elif set(splits.keys()) == set(["train"]):
train = splits["train"]
# pylint: disable=unbalanced-tuple-unpacking
valid_inner, valid_outer, test, train = split_dataset(
train, num_per_split=num_per_valid, num_splits=4)
total_num_train = info.splits["train"].num_examples - num_per_valid * 3
num_test = num_per_valid
num_valid = num_per_valid
else:
raise ValueError("Unsure how to manage the following splits: %s" %
str(list(splits.keys())))
if num_train:
train = train.take(num_train)
else:
num_train = total_num_train
datasets = Datasets(
train=train, valid_inner=valid_inner, valid_outer=valid_outer, test=test)
if cache_dataset:
datasets = tf.nest.map_structure(lambda ds: ds.cache(), datasets)
datasets = tf.nest.map_structure(lambda ds: ds.repeat(), datasets)
train_shuffle = shuffle_buffer if shuffle_buffer else num_train
valid_shuffle = shuffle_buffer if shuffle_buffer else num_valid
test_shuffle = shuffle_buffer if shuffle_buffer else num_test
datasets = Datasets(
train=datasets.train.shuffle(train_shuffle),
valid_inner=datasets.valid_inner.shuffle(valid_shuffle),
valid_outer=datasets.valid_outer.shuffle(valid_shuffle),
test=datasets.test.shuffle(test_shuffle))
def pre_process(example):
example = _add_onehot_label_to_dict(example, num_classes)
return _process_image_in_dict(example)
datasets = tf.nest.map_structure(lambda ds: ds.map(pre_process), datasets)
if augmentation_fn:
datasets = tf.nest.map_structure(lambda ds: ds.map(augmentation_fn),
datasets)
return tf.nest.map_structure(
lambda ds: ds.batch(batch_size, drop_remainder=True), datasets)
def _random_slice(example,
length):
"""Extract a random slice or pad to make all sequences a fixed length.
For example -- if one passes in [1,2,3,4] with length=2, this would return
one of the following: [1,2], [2,3], [3,4].
If the input is [1, 2] with length=4, this would return [1, 2, 0, 0].
Args:
example: Dictionary containing a single example with the "text" key. This
"text" key should be a vector with an integer type.
length: Length of the slice.
Returns:
An example containing only a fixed slice of text.
"""
input_length = tf.shape(example["text"])[0]
max_idx = input_length - length
# pylint: disable=g-long-lambda
start_idx = tf.cond(
tf.greater(max_idx, 0), lambda: tf.random_uniform(
[], tf.to_int32(0), tf.cast(max_idx, tf.int32), dtype=tf.int32),
lambda: 0)
# pylint: enable=g-long-lambda
to_pad = tf.maximum(length - input_length, 0)
pad_input = tf.pad(example["text"], [[0, to_pad]])
# copy to prevent a mutation of inputs.
example = copy.copy(example)
example["text"] = pad_input[start_idx:start_idx + length]
example["text"].set_shape([length])
pad_mask = tf.pad(tf.ones([input_length]), [[0, to_pad]])
example["mask"] = pad_mask[start_idx:start_idx + length]
example["mask"].set_shape([length])
return example
def random_slice_text_data(
dataset_name,
batch_size,
num_train = None,
patch_length = 128,
num_per_valid = 3000,
cache_dataset = False,
shuffle_buffer = None,
):
"""Gets a text dataset ready to train on.
This splits the dataset into 4 cross validation splits, takes a random slice
to make all entries the same length, and batches the examples.
Args:
dataset_name: tensorflow_dataset's dataset name.
batch_size: batch size.
num_train: number of training examples. If None use all examples.
patch_length: length of patch to extract.
num_per_valid: number of images for each validation set.
cache_dataset: Cache the dataset or not.
shuffle_buffer: Shuffle buffer size. If None, use dataset size.
Returns:
Datasets object containing tf.Dataset.
"""
train, info = tfds_load_dataset(
dataset_name, split="train", with_info=True, shuffle_files=True)
total_num_train = info.splits["train"].num_examples
num_test = info.splits["test"].num_examples
# pylint: disable=unbalanced-tuple-unpacking
valid_inner, valid_outer, train = split_dataset(
train, num_per_split=num_per_valid)
# pylint: enable=unbalanced-tuple-unpacking
if num_train:
train = train.take(num_train)
test = tfds_load_dataset(dataset_name, split="test", shuffle_files=True)
datasets = Datasets(
train=train, valid_inner=valid_inner, valid_outer=valid_outer, test=test)
if cache_dataset:
datasets = tf.nest.map_structure(lambda ds: ds.cache(), datasets)
datasets = tf.nest.map_structure(lambda ds: ds.repeat(), datasets)
train_shuffle = shuffle_buffer if shuffle_buffer else total_num_train - num_per_valid * 2
valid_shuffle = shuffle_buffer if shuffle_buffer else num_per_valid
test_shuffle = shuffle_buffer if shuffle_buffer else num_test
datasets = Datasets(
train=datasets.train.shuffle(train_shuffle),
valid_inner=datasets.valid_inner.shuffle(valid_shuffle),
valid_outer=datasets.valid_outer.shuffle(valid_shuffle),
test=datasets.test.shuffle(test_shuffle))
def pre_process(example):
"""Preprocess example by adding onehot label, and taking a random slice."""
if "label" in info.features:
num_classes = info.features["label"].num_classes
example = _add_onehot_label_to_dict(example, num_classes)
return _random_slice(example, patch_length)
datasets = tf.nest.map_structure(lambda ds: ds.map(pre_process), datasets)
return tf.nest.map_structure(
lambda ds: ds.batch(batch_size, drop_remainder=True), datasets)
class ResizedDataset(tfds.core.GeneratorBasedBuilder):
"""Base class for a resized image tensorflow dataset."""
def __init__(self, parent_builder,
size, *args, **kwargs):
"""Initialize the resized image dataset builder.
Args:
parent_builder: The builder to build the resized image dataset from.
size: size to resize each example to.
*args: args passed super class.
**kwargs: kwargs passed super class.
"""
parent_builder.download_and_prepare()
self._builder = parent_builder
self._size = size
super(ResizedDataset, self).__init__(*args, **kwargs)
def _info(self):
info = self._builder.info
description = "\n This dataset has been resized to %dx%d!" % (self._size[0],
self._size[1])
new_feature_dict = {k: v for k, v in info.features.items()}
new_feature_dict["image"] = tfds.features.Image(
shape=list(self._size) + [3])
return tfds.core.DatasetInfo(
builder=self,
description=info.description + description,
homepage=info.homepage,
features=tfds.features.FeaturesDict(new_feature_dict),
supervised_keys=info.supervised_keys,
citation=info.citation)
def _split_generators(self, dl_manager):
return [
tfds.core.SplitGenerator(
name=split, num_shards=4, gen_kwargs=dict(split=split))
for split in self._builder.info.splits.keys()
]
def _generate_examples(self, split):
for exi, ex in enumerate(
tfds.as_numpy(self._builder.as_dataset(split=split))):
ex = self._process_example(ex)
yield exi, ex
def _process_example(self, example):
# As of now, this simply converts the image to the passed in size.
# TODO(lmetz) It might also make sense to resize then crop out the center.
example["image"] = cv2.resize(
example["image"], dsize=self._size, interpolation=cv2.INTER_CUBIC)
return example
class Food101_32x32(ResizedDataset): # pylint: disable=invalid-name
"""The Food101 dataset resized to be 32x32."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("food101", version="1.0.0")
super(Food101_32x32, self).__init__(
*args, parent_builder=parent_builder, size=(32, 32), **kwargs)
class Food101_64x64(ResizedDataset): # pylint: disable=invalid-name
"""The Food101 dataset resized to be 64x64."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("food101", version="1.0.0")
super(Food101_64x64, self).__init__(
*args, parent_builder=parent_builder, size=(64, 64), **kwargs)
class Coil100_32x32(ResizedDataset): # pylint: disable=invalid-name
"""The coil100 dataset resized to be 32x32."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("coil100", version="1.0.0")
super(Coil100_32x32, self).__init__(
*args, parent_builder=parent_builder, size=(32, 32), **kwargs)
class ColorectalHistology_32x32(ResizedDataset): # pylint: disable=invalid-name
"""The colorectal_histology dataset resized to be 32x32."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("colorectal_histology", version="2.*.*")
super(ColorectalHistology_32x32, self).__init__(
*args, parent_builder=parent_builder, size=(32, 32), **kwargs)
class DeepWeeds_32x32(ResizedDataset): # pylint: disable=invalid-name
"""The deep_weeds dataset resized to be 32x32."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("deep_weeds", version="1.0.0")
super(DeepWeeds_32x32, self).__init__(
*args, parent_builder=parent_builder, size=(32, 32), **kwargs)
class Sun397_32x32(ResizedDataset): # pylint: disable=invalid-name
"""The sun397/tfds dataset resized to be 32x32."""
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
parent_builder = tfds.builder("sun397/tfds", version="4.0.0")
super(Sun397_32x32, self).__init__(
*args, parent_builder=parent_builder, size=(32, 32), **kwargs)
class TokenizedConfig(tfds.core.BuilderConfig):
"""BuilderConfig for tokenized text datasets."""
def __init__(self, version=None, text_encoder_config=None, **kwargs):
"""BuilderConfig for tokenized text datasets.
Args:
version (string): version as string.
text_encoder_config: `tfds.features.text.TextEncoderConfig`, configuration
for the `tfds.features.text.TextEncoder` used for the `"text"` feature.
**kwargs: keyword arguments forwarded to super.
"""
super(TokenizedConfig, self).__init__(
version=tfds.core.Version(version), **kwargs)
self.text_encoder_config = (
text_encoder_config or tfds.features.text.TextEncoderConfig())
# This is an arbitrarily chosen subset of languages.
WIKIPEDIA_PREFIX = [
"20190301.zh", "20190301.ru", "20190301.ja", "20190301.hsb", "20190301.en"
]
def _get_builder_configs(base_configs):
"""Get the builder configs for tokenized datasets."""
configs = []
for prefix in base_configs:
configs.append(
TokenizedConfig(
name="%s_bytes" % prefix,
version="0.0.1",
description=("Uses byte-level text encoding with "
"`tfds.features.text.ByteTextEncoder`"),
text_encoder_config=tfds.features.text.TextEncoderConfig(
encoder=tfds.features.text.ByteTextEncoder()),
))
configs.append(
TokenizedConfig(
name="%s_subwords8k" % prefix,
version="0.0.1",
description=("Uses `tfds.features.text.SubwordTextEncoder` with 8k "
"vocab size"),
text_encoder_config=tfds.features.text.TextEncoderConfig(
encoder_cls=tfds.features.text.SubwordTextEncoder,
vocab_size=8192),
))
return configs
class TokenizedWikipedia(tfds.core.GeneratorBasedBuilder):
"""Builder which tokenizes the tfds wikipedia datasets.
This dataset returns 1 paragraph (split via new line) per example
extracted from the articles. We additionally filter examples to have more than
5 bytes. Encoding is either bytes, or subwords. The vocab is constructed out
of the first 200k examples. While this is likely not perfect this should be
sufficient for meta-learning optimizers.
Additionally, we make a train and test split by hashing the article seed.
Finally, for computational reasons we only use 1 millon articles. For the size
of the models we are training here this should be plenty.
"""
BUILDER_CONFIGS = _get_builder_configs(WIKIPEDIA_PREFIX)
def __init__(self, config=None, **kwargs):
"""Initialize the resized image dataset builder.
Args:
config: str Config string specified to build dataset with.
**kwargs: kwargs passed super class.
"""
# extract the base dataset.
base, _ = config.split("_")
self._builder = tfds.builder("wikipedia/%s" % base)
super(TokenizedWikipedia, self).__init__(config=config, **kwargs)
self._perc_train = 0.7
self._max_num_articles = 1000000
# Number of examples used to build the tokenizer.
self._examples_for_tokenizer = 200000
def _info(self):
info = self._builder.info
description = "\n This dataset has been tokenized!"
return tfds.core.DatasetInfo(
builder=self,
description=info.description + description,
features=tfds.features.FeaturesDict({
"title":
tfds.features.Text(),
"text":
tfds.features.Text(
encoder_config=self.builder_config.text_encoder_config),
}),
supervised_keys=("text", "text"),
homepage=info.homepage,
citation=info.citation)
def _split_generators(self, dl_manager):
self.info.features["text"].maybe_build_from_corpus(self._vocab_text_gen())
return [
tfds.core.SplitGenerator(
name=split, num_shards=10, gen_kwargs=dict(split=split))
for split in ["train", "test"]
]
def _split_article(self, ex):
for i, split in enumerate(ex["text"].split("\n")):
if len(split.strip()) > 5:
yield i, {"title": ex["title"], "text": split}
def _generate_examples(self, split):
hasher = tfds.core.hashing.Hasher("token_wikipedia_salt")
for exi, example in enumerate(
tfds.as_numpy(self._builder.as_dataset(split="train"))):
if exi > self._max_num_articles:
return
# To make a train test split we first hash the key and convert it to a
# floating point value between 0-1. Depending on this value we either
# yield the example or not depending on the split.
p = hasher.hash_key(exi) % 100000 / 100000.
if split == "train" and p < self._perc_train:
for i, sub_example in self._split_article(example):
key = (exi, i)
yield key, sub_example
elif split == "test" and p >= self._perc_train:
for i, sub_example in self._split_article(example):
key = (exi, i)
yield key, sub_example
def _vocab_text_gen(self):
for i, (_, ex) in enumerate(self._generate_examples("train")):
# Only yield a subset of the data used for tokenization for
# performance reasons.
if self._examples_for_tokenizer > i:
yield ex["text"]
else:
return
# Arbitrary subset of datasets.
AMAZON_PRODUCTS = ["Books_v1_02", "Camera_v1_00", "Home_v1_00", "Video_v1_00"]
class TokenizedAmazonReviews(tfds.core.GeneratorBasedBuilder):
"""Builder which tokenizes the tfds amazon reviews datasets.
For compute reasons we only tokenize with 200000 examples.
We make a train and test split by hashing the example index.
"""
BUILDER_CONFIGS = _get_builder_configs(AMAZON_PRODUCTS)
def __init__(self, config=None, **kwargs):
"""Initialize the resized image dataset builder.
Args:
config: str Config string specified to build dataset with.
**kwargs: kwargs passed super class.
"""
# extract the base dataset.
base = "_".join(config.split("_")[0:-1])
self._builder = tfds.builder("amazon_us_reviews/%s" % base)
super(TokenizedAmazonReviews, self).__init__(config=config, **kwargs)
self._perc_train = 0.7
self._examples_for_tokenizer = 200000
def _info(self):
info = self._builder.info
description = "\n This dataset has been tokenized!"
return tfds.core.DatasetInfo(
builder=self,
description=info.description + description,
features=tfds.features.FeaturesDict({
# 1-5 stars are the labels.
"label":
tfds.features.ClassLabel(num_classes=5),
"text":
tfds.features.Text(
encoder_config=self.builder_config.text_encoder_config),
}),
supervised_keys=("text", "label"),
homepage=info.homepage,
citation=info.citation)
def _split_generators(self, dl_manager):
self.info.features["text"].maybe_build_from_corpus(self._vocab_text_gen())
return [
tfds.core.SplitGenerator(
name=split, num_shards=10, gen_kwargs=dict(split=split))
for split in ["train", "test"]
]
def _generate_examples(self, split):
hasher = tfds.core.hashing.Hasher("token_wikipedia_salt")
for exi, example in enumerate(
tfds.as_numpy(self._builder.as_dataset(split="train"))):
p = hasher.hash_key(exi) % 1000 / 1000.
example = {
"text": example["data"]["review_body"],
# subtract one to zero index.
"label": example["data"]["star_rating"] - 1
}
if split == "train" and p < self._perc_train:
yield exi, example
elif split == "test" and p > self._perc_train:
yield exi, example
def _vocab_text_gen(self):
for i, (_, ex) in enumerate(self._generate_examples("train")):
if self._examples_for_tokenizer > i:
yield ex["text"]
else:
return
def _single_associative_retrieval(batch_size=128, num_pairs=5, num_tokens=10):
"""See associative_retrieval."""
def _onehot_pack(inp, out, loss_mask):
inp_seq, outputs, loss_mask = (tf.one_hot(inp, num_tokens + 2),
tf.one_hot(out, num_tokens + 2), loss_mask)
return {"input": inp_seq, "output": outputs, "loss_mask": loss_mask}
def _py_make_example():
"""Iterator that makes single examples in python."""
while True:
keys = np.random.choice(num_tokens, size=num_pairs, replace=False)
values = np.random.choice(num_tokens, size=num_pairs, replace=True)
empty_token_idx = num_tokens
query_token_idx = num_tokens + 1
input_seq = []
output_seq = []
for k, v in zip(keys, values):
input_seq.extend([k, v])
output_seq.extend([empty_token_idx, empty_token_idx])
input_seq.append(query_token_idx)
output_seq.append(empty_token_idx)
query_key = np.random.randint(0, num_pairs)
input_seq.append(keys[query_key])
output_seq.append(values[query_key])
loss_mask = np.zeros(2 * num_pairs + 2, dtype=np.float32)
loss_mask[-1] = 1.
input_seq = np.asarray(input_seq, dtype=np.int32)
output_seq = np.asarray(output_seq, dtype=np.int32)
yield input_seq, output_seq, loss_mask
# per pair, there is a key and a value. Extra 2 account for query indicator
# and query key.
seq_len = 2 * num_pairs + 2
dataset = tf.data.Dataset.from_generator(_py_make_example,
(tf.int32, tf.int32, tf.float32),
([seq_len], [seq_len], [seq_len]))
dataset = dataset.map(_onehot_pack)
return dataset.batch(batch_size, drop_remainder=True)
def associative_sequence(batch_size=128, num_pairs=5, num_tokens=10):
"""Associative Retrieval datasets.
The inputs consist of pairs of key and value sequentially followed by an
indicator token and then a retrieval token.
Output consists of the value associated with the retrieval key in the final
step of the sequence, preceded by empty tokens.
The problem can be perfectly solved, as in the 'key' tokens will be unique.
There can be duplicate values, however, for different keys.
Example (using characters instead of the onehot representations):
input: A1B2C3D4?A
output: _________1
loss_mask: 0000000001
The outputs are represented using a one-hot encoding.
The problem is based off of the one used in
https://arxiv.org/pdf/1610.06258.pdf.
Args:
batch_size: int
num_pairs: int, number of pairs to put into memory.
num_tokens: int, number of possible tokens to choose from.
Returns:
datasets: Datasets object with each split containing the same data
generating process.
"""
fn = lambda: _single_associative_retrieval(batch_size, num_pairs, num_tokens)
return Datasets(train=fn(), valid_inner=fn(), valid_outer=fn(), test=fn())
def _single_copy_sequence(batch_size=128,
sequence_length=5,
num_separator=1,
num_tokens=10):
"""See copy_sequence for docs."""
def _build_batch(_):
"""Construct a batch.
Args:
_: tf.Tensor Needed to construct a tf.data.Dataset that iteratively calls
this function. This is a dummy value that never changes.
Returns:
batch: SequencePrediction, containing a batch of sequences.
"""
inp = tf.random_uniform([batch_size, sequence_length],
0,
num_tokens,
dtype=tf.int32)
sep = tf.ones([batch_size, num_separator], dtype=tf.int32) * num_tokens
emit = tf.ones([batch_size, sequence_length], dtype=tf.int32) * (
num_tokens + 1)
inp_seq_pre_onehot = tf.concat([inp, sep, emit], axis=1)
inp_seq = tf.one_hot(inp_seq_pre_onehot, num_tokens + 2)
loss_mask = tf.concat([
tf.zeros([batch_size, sequence_length + num_separator]),
tf.ones([batch_size, sequence_length])
],
axis=1)
outputs_pre_onehot = tf.concat(
[tf.zeros_like(inp), tf.zeros_like(sep), inp], axis=1)
outputs = tf.one_hot(outputs_pre_onehot, num_tokens + 2)
return {"input": inp_seq, "output": outputs, "loss_mask": loss_mask}
return tf.data.Dataset.from_tensor_slices([0]).repeat().map(_build_batch)
def copy_sequence(batch_size=128,
sequence_length=5,
num_separator=1,
num_tokens=10):
"""A simple input copy to output task.
Input consists of `seq_len` tokens drawn from a vocab size of `num_tokens`
followed by `n_sep` separation tokens, followed by 3 empty tokens.
The output consists of `seq_len + n_sep` empty tokens followed by the same
input tokens from the input.
All token outputs are onehot.
A sample input output pair for seq_len=3, num_tokens=3, n_sep=1
input:: <tokenA><tokenB><tokenC><sep> <empty> <empty> <empty>
output:: <empty> <empty> <empty> <empty><tokenA><tokenB><tokenC>
loss_mask:: 0. 0. 0. 0. 1. 1. 1.
Args:
batch_size: int
sequence_length: int, length of sequence to copy
num_separator: int, number of empty tokens separating input from output
num_tokens: int, number of tokens to build input from
Returns:
dataset: tf.Data.Dataset
"""
def fn():
return _single_copy_sequence(batch_size, sequence_length, num_separator,
num_tokens)
return Datasets(train=fn(), valid_inner=fn(), valid_outer=fn(), test=fn())
| [
"tensorflow.compat.v1.one_hot",
"tensorflow_datasets.features.Text",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.pad",
"tensorflow_datasets.features.text.TextEncoderConfig",
"tensorflow_datasets.features.ClassLabel",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.concat",
"tensorflow.... | [((1082, 1156), 'tensorflow_datasets.load', 'tfds.load', (['dataset_name', '*args'], {'data_dir': 'data_dir', 'download': '(True)'}), '(dataset_name, *args, data_dir=data_dir, download=True, **kwargs)\n', (1091, 1156), True, 'import tensorflow_datasets as tfds\n'), ((2500, 2512), 'copy.copy', 'copy.copy', (['d'], {}), '(d)\n', (2509, 2512), False, 'import copy\n'), ((2535, 2568), 'tensorflow.compat.v1.one_hot', 'tf.one_hot', (["d['label']", 'num_label'], {}), "(d['label'], num_label)\n", (2545, 2568), True, 'import tensorflow.compat.v1 as tf\n'), ((2696, 2708), 'copy.copy', 'copy.copy', (['d'], {}), '(d)\n', (2705, 2708), False, 'import copy\n'), ((2915, 2927), 'attr.ib', 'attr.ib', (['Any'], {}), '(Any)\n', (2922, 2927), False, 'import attr\n'), ((2944, 2956), 'attr.ib', 'attr.ib', (['Any'], {}), '(Any)\n', (2951, 2956), False, 'import attr\n'), ((2973, 2985), 'attr.ib', 'attr.ib', (['Any'], {}), '(Any)\n', (2980, 2985), False, 'import attr\n'), ((2995, 3007), 'attr.ib', 'attr.ib', (['Any'], {}), '(Any)\n', (3002, 3007), False, 'import attr\n'), ((8180, 8216), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['(length - input_length)', '(0)'], {}), '(length - input_length, 0)\n', (8190, 8216), True, 'import tensorflow.compat.v1 as tf\n'), ((8231, 8269), 'tensorflow.compat.v1.pad', 'tf.pad', (["example['text']", '[[0, to_pad]]'], {}), "(example['text'], [[0, to_pad]])\n", (8237, 8269), True, 'import tensorflow.compat.v1 as tf\n'), ((8324, 8342), 'copy.copy', 'copy.copy', (['example'], {}), '(example)\n', (8333, 8342), False, 'import copy\n'), ((24831, 24953), 'tensorflow.compat.v1.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['_py_make_example', '(tf.int32, tf.int32, tf.float32)', '([seq_len], [seq_len], [seq_len])'], {}), '(_py_make_example, (tf.int32, tf.int32, tf.\n float32), ([seq_len], [seq_len], [seq_len]))\n', (24861, 24953), True, 'import tensorflow.compat.v1 as tf\n'), ((2826, 2852), 'tensorflow.compat.v1.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (2833, 2852), True, 'import tensorflow.compat.v1 as tf\n'), ((7866, 7891), 'tensorflow.compat.v1.shape', 'tf.shape', (["example['text']"], {}), "(example['text'])\n", (7874, 7891), True, 'import tensorflow.compat.v1 as tf\n'), ((7992, 8014), 'tensorflow.compat.v1.greater', 'tf.greater', (['max_idx', '(0)'], {}), '(max_idx, 0)\n', (8002, 8014), True, 'import tensorflow.compat.v1 as tf\n'), ((8462, 8485), 'tensorflow.compat.v1.ones', 'tf.ones', (['[input_length]'], {}), '([input_length])\n', (8469, 8485), True, 'import tensorflow.compat.v1 as tf\n'), ((13085, 13162), 'cv2.resize', 'cv2.resize', (["example['image']"], {'dsize': 'self._size', 'interpolation': 'cv2.INTER_CUBIC'}), "(example['image'], dsize=self._size, interpolation=cv2.INTER_CUBIC)\n", (13095, 13162), False, 'import cv2\n'), ((13393, 13433), 'tensorflow_datasets.builder', 'tfds.builder', (['"""food101"""'], {'version': '"""1.0.0"""'}), "('food101', version='1.0.0')\n", (13405, 13433), True, 'import tensorflow_datasets as tfds\n'), ((13748, 13788), 'tensorflow_datasets.builder', 'tfds.builder', (['"""food101"""'], {'version': '"""1.0.0"""'}), "('food101', version='1.0.0')\n", (13760, 13788), True, 'import tensorflow_datasets as tfds\n'), ((14103, 14143), 'tensorflow_datasets.builder', 'tfds.builder', (['"""coil100"""'], {'version': '"""1.0.0"""'}), "('coil100', version='1.0.0')\n", (14115, 14143), True, 'import tensorflow_datasets as tfds\n'), ((14483, 14536), 'tensorflow_datasets.builder', 'tfds.builder', (['"""colorectal_histology"""'], {'version': '"""2.*.*"""'}), "('colorectal_histology', version='2.*.*')\n", (14495, 14536), True, 'import tensorflow_datasets as tfds\n'), ((14868, 14911), 'tensorflow_datasets.builder', 'tfds.builder', (['"""deep_weeds"""'], {'version': '"""1.0.0"""'}), "('deep_weeds', version='1.0.0')\n", (14880, 14911), True, 'import tensorflow_datasets as tfds\n'), ((15231, 15275), 'tensorflow_datasets.builder', 'tfds.builder', (['"""sun397/tfds"""'], {'version': '"""4.0.0"""'}), "('sun397/tfds', version='4.0.0')\n", (15243, 15275), True, 'import tensorflow_datasets as tfds\n'), ((18275, 18310), 'tensorflow_datasets.builder', 'tfds.builder', (["('wikipedia/%s' % base)"], {}), "('wikipedia/%s' % base)\n", (18287, 18310), True, 'import tensorflow_datasets as tfds\n'), ((19626, 19674), 'tensorflow_datasets.core.hashing.Hasher', 'tfds.core.hashing.Hasher', (['"""token_wikipedia_salt"""'], {}), "('token_wikipedia_salt')\n", (19650, 19674), True, 'import tensorflow_datasets as tfds\n'), ((21477, 21520), 'tensorflow_datasets.builder', 'tfds.builder', (["('amazon_us_reviews/%s' % base)"], {}), "('amazon_us_reviews/%s' % base)\n", (21489, 21520), True, 'import tensorflow_datasets as tfds\n'), ((22634, 22682), 'tensorflow_datasets.core.hashing.Hasher', 'tfds.core.hashing.Hasher', (['"""token_wikipedia_salt"""'], {}), "('token_wikipedia_salt')\n", (22658, 22682), True, 'import tensorflow_datasets as tfds\n'), ((26850, 26929), 'tensorflow.compat.v1.random_uniform', 'tf.random_uniform', (['[batch_size, sequence_length]', '(0)', 'num_tokens'], {'dtype': 'tf.int32'}), '([batch_size, sequence_length], 0, num_tokens, dtype=tf.int32)\n', (26867, 26929), True, 'import tensorflow.compat.v1 as tf\n'), ((27209, 27244), 'tensorflow.compat.v1.concat', 'tf.concat', (['[inp, sep, emit]'], {'axis': '(1)'}), '([inp, sep, emit], axis=1)\n', (27218, 27244), True, 'import tensorflow.compat.v1 as tf\n'), ((27259, 27305), 'tensorflow.compat.v1.one_hot', 'tf.one_hot', (['inp_seq_pre_onehot', '(num_tokens + 2)'], {}), '(inp_seq_pre_onehot, num_tokens + 2)\n', (27269, 27305), True, 'import tensorflow.compat.v1 as tf\n'), ((27602, 27648), 'tensorflow.compat.v1.one_hot', 'tf.one_hot', (['outputs_pre_onehot', '(num_tokens + 2)'], {}), '(outputs_pre_onehot, num_tokens + 2)\n', (27612, 27648), True, 'import tensorflow.compat.v1 as tf\n'), ((16049, 16087), 'tensorflow_datasets.features.text.TextEncoderConfig', 'tfds.features.text.TextEncoderConfig', ([], {}), '()\n', (16085, 16087), True, 'import tensorflow_datasets as tfds\n'), ((23546, 23577), 'tensorflow.compat.v1.one_hot', 'tf.one_hot', (['inp', '(num_tokens + 2)'], {}), '(inp, num_tokens + 2)\n', (23556, 23577), True, 'import tensorflow.compat.v1 as tf\n'), ((23614, 23645), 'tensorflow.compat.v1.one_hot', 'tf.one_hot', (['out', '(num_tokens + 2)'], {}), '(out, num_tokens + 2)\n', (23624, 23645), True, 'import tensorflow.compat.v1 as tf\n'), ((23844, 23903), 'numpy.random.choice', 'np.random.choice', (['num_tokens'], {'size': 'num_pairs', 'replace': '(False)'}), '(num_tokens, size=num_pairs, replace=False)\n', (23860, 23903), True, 'import numpy as np\n'), ((23919, 23977), 'numpy.random.choice', 'np.random.choice', (['num_tokens'], {'size': 'num_pairs', 'replace': '(True)'}), '(num_tokens, size=num_pairs, replace=True)\n', (23935, 23977), True, 'import numpy as np\n'), ((24328, 24359), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_pairs'], {}), '(0, num_pairs)\n', (24345, 24359), True, 'import numpy as np\n'), ((24461, 24506), 'numpy.zeros', 'np.zeros', (['(2 * num_pairs + 2)'], {'dtype': 'np.float32'}), '(2 * num_pairs + 2, dtype=np.float32)\n', (24469, 24506), True, 'import numpy as np\n'), ((24550, 24587), 'numpy.asarray', 'np.asarray', (['input_seq'], {'dtype': 'np.int32'}), '(input_seq, dtype=np.int32)\n', (24560, 24587), True, 'import numpy as np\n'), ((24607, 24645), 'numpy.asarray', 'np.asarray', (['output_seq'], {'dtype': 'np.int32'}), '(output_seq, dtype=np.int32)\n', (24617, 24645), True, 'import numpy as np\n'), ((27024, 27076), 'tensorflow.compat.v1.ones', 'tf.ones', (['[batch_size, num_separator]'], {'dtype': 'tf.int32'}), '([batch_size, num_separator], dtype=tf.int32)\n', (27031, 27076), True, 'import tensorflow.compat.v1 as tf\n'), ((27101, 27155), 'tensorflow.compat.v1.ones', 'tf.ones', (['[batch_size, sequence_length]'], {'dtype': 'tf.int32'}), '([batch_size, sequence_length], dtype=tf.int32)\n', (27108, 27155), True, 'import tensorflow.compat.v1 as tf\n'), ((8057, 8071), 'tensorflow.compat.v1.to_int32', 'tf.to_int32', (['(0)'], {}), '(0)\n', (8068, 8071), True, 'import tensorflow.compat.v1 as tf\n'), ((8073, 8099), 'tensorflow.compat.v1.cast', 'tf.cast', (['max_idx', 'tf.int32'], {}), '(max_idx, tf.int32)\n', (8080, 8099), True, 'import tensorflow.compat.v1 as tf\n'), ((12339, 12383), 'tensorflow_datasets.features.FeaturesDict', 'tfds.features.FeaturesDict', (['new_feature_dict'], {}), '(new_feature_dict)\n', (12365, 12383), True, 'import tensorflow_datasets as tfds\n'), ((15947, 15973), 'tensorflow_datasets.core.Version', 'tfds.core.Version', (['version'], {}), '(version)\n', (15964, 15973), True, 'import tensorflow_datasets as tfds\n'), ((27343, 27398), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[batch_size, sequence_length + num_separator]'], {}), '([batch_size, sequence_length + num_separator])\n', (27351, 27398), True, 'import tensorflow.compat.v1 as tf\n'), ((27408, 27446), 'tensorflow.compat.v1.ones', 'tf.ones', (['[batch_size, sequence_length]'], {}), '([batch_size, sequence_length])\n', (27415, 27446), True, 'import tensorflow.compat.v1 as tf\n'), ((27534, 27552), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['inp'], {}), '(inp)\n', (27547, 27552), True, 'import tensorflow.compat.v1 as tf\n'), ((27554, 27572), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['sep'], {}), '(sep)\n', (27567, 27572), True, 'import tensorflow.compat.v1 as tf\n'), ((17044, 17153), 'tensorflow_datasets.features.text.TextEncoderConfig', 'tfds.features.text.TextEncoderConfig', ([], {'encoder_cls': 'tfds.features.text.SubwordTextEncoder', 'vocab_size': '(8192)'}), '(encoder_cls=tfds.features.text.\n SubwordTextEncoder, vocab_size=8192)\n', (17080, 17153), True, 'import tensorflow_datasets as tfds\n'), ((27733, 27772), 'tensorflow.compat.v1.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['[0]'], {}), '([0])\n', (27767, 27772), True, 'import tensorflow.compat.v1 as tf\n'), ((18839, 18859), 'tensorflow_datasets.features.Text', 'tfds.features.Text', ([], {}), '()\n', (18857, 18859), True, 'import tensorflow_datasets as tfds\n'), ((18897, 18971), 'tensorflow_datasets.features.Text', 'tfds.features.Text', ([], {'encoder_config': 'self.builder_config.text_encoder_config'}), '(encoder_config=self.builder_config.text_encoder_config)\n', (18915, 18971), True, 'import tensorflow_datasets as tfds\n'), ((22003, 22042), 'tensorflow_datasets.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(5)'}), '(num_classes=5)\n', (22027, 22042), True, 'import tensorflow_datasets as tfds\n'), ((22080, 22154), 'tensorflow_datasets.features.Text', 'tfds.features.Text', ([], {'encoder_config': 'self.builder_config.text_encoder_config'}), '(encoder_config=self.builder_config.text_encoder_config)\n', (22098, 22154), True, 'import tensorflow_datasets as tfds\n'), ((16724, 16760), 'tensorflow_datasets.features.text.ByteTextEncoder', 'tfds.features.text.ByteTextEncoder', ([], {}), '()\n', (16758, 16760), True, 'import tensorflow_datasets as tfds\n')] |
import numpy as np
from astropy import constants, units
from skypy.gravitational_wave import b_band_merger_rate
from skypy.gravitational_wave.merger_rate import abadie_table_III
def test_abadie_rates():
# Check the number of merger rates returned
luminosity = 10.**(-0.4*(-20.5 + np.random.randn(1000)))
rates = b_band_merger_rate(luminosity, population='NS-NS', optimism='low')
assert len(rates) == len(luminosity)
# Test that a luminosity of L_10 (in units of solar luminosity) returns a
# rate that matches the value in Abadie Table III
L_10 = 1e10 * 2.16e33 / constants.L_sun.to_value('erg/s')
L_B_rate = b_band_merger_rate(L_10, population='NS-NS', optimism='low')
table_value = abadie_table_III['NS-NS']['low']
assert np.isclose(L_B_rate.to_value(1/units.year), table_value, rtol=1e-5)
| [
"astropy.constants.L_sun.to_value",
"skypy.gravitational_wave.b_band_merger_rate",
"numpy.random.randn"
] | [((327, 393), 'skypy.gravitational_wave.b_band_merger_rate', 'b_band_merger_rate', (['luminosity'], {'population': '"""NS-NS"""', 'optimism': '"""low"""'}), "(luminosity, population='NS-NS', optimism='low')\n", (345, 393), False, 'from skypy.gravitational_wave import b_band_merger_rate\n'), ((645, 705), 'skypy.gravitational_wave.b_band_merger_rate', 'b_band_merger_rate', (['L_10'], {'population': '"""NS-NS"""', 'optimism': '"""low"""'}), "(L_10, population='NS-NS', optimism='low')\n", (663, 705), False, 'from skypy.gravitational_wave import b_band_merger_rate\n'), ((596, 629), 'astropy.constants.L_sun.to_value', 'constants.L_sun.to_value', (['"""erg/s"""'], {}), "('erg/s')\n", (620, 629), False, 'from astropy import constants, units\n'), ((291, 312), 'numpy.random.randn', 'np.random.randn', (['(1000)'], {}), '(1000)\n', (306, 312), True, 'import numpy as np\n')] |
from __future__ import print_function
from tqdm import tqdm
import math
from termcolor import colored
import numpy as np
from openrec.tf1.legacy.utils.evaluators import ImplicitEvalManager
import sys
import json
import pickle
class ImplicitModelTrainer(object):
"""
The ImplicitModelTrainer class implements logics for basic recommender training and evaluation using users'
*implicit feedback*.
Parameters
----------
batch_size: int
Training batch size.
test_batch_size: int
Test/Evaluation batch size (number of users per testing batch).
train_dataset: Dataset
Dataset for model training.
model: Recommender
The target recommender.
sampler: Sampler
The sampler for model training.
item_serving_size: int, optional
Test/Evaluation batch size (number of items per testing batch).
Notes
-----
The function :code:`train` should be called for model training and evaluation.
"""
def __init__(self, batch_size, test_batch_size, train_dataset, model, sampler, item_serving_size=None, eval_save_prefix=None):
self._batch_size = batch_size
self._test_batch_size = test_batch_size
self._item_serving_size = item_serving_size
self._eval_save_prefix = eval_save_prefix
self._train_dataset = train_dataset
self._max_item = self._train_dataset.max_item()
self._model = model
self._sampler = sampler
def train(self, num_itr, display_itr, eval_datasets=[], evaluators=[], num_negatives=None, seed=10):
"""Train and evaluate a recommender.
Parameters
----------
num_itr: int
total number of training iterations.
display_itr: int
Evaluation/testing period.
eval_datasets: list of Dataset
A list of datasets for evaluation/testing.
evaluators: list of Evaluator
A list of evaluators for evaluation/testing.
num_negatives: int, optional
If specified, a given number of items NOT interacted with each user will be sampled (as negative items) for evaluations.
"""
acc_loss = 0
self._eval_manager = ImplicitEvalManager(evaluators=evaluators)
self._num_negatives = num_negatives
self._exclude_positives(eval_datasets)
if self._num_negatives is None:
eval_func = self._evaluate_full
print(colored('== Start training with FULL evaluation ==', 'blue'))
else:
eval_func = self._evaluate_partial
self._sample_negatives(seed=seed)
print(colored('== Start training with sampled evaluation, sample size: %d ==' % num_negatives, 'blue'))
for itr in range(num_itr):
batch_data = self._sampler.next_batch()
loss = self._model.train(batch_data)
acc_loss += loss
if itr % (display_itr // 10) == 0 and itr > 0:
print(colored('[Itr %d] Finished' % itr, 'blue'))
if itr % display_itr == 0 and itr > 0:
if self._eval_save_prefix:
self._model.save(self._eval_save_prefix, itr)
print(colored('[Itr %d]' % itr, 'red'), 'loss: %f' % (acc_loss/display_itr))
for dataset in eval_datasets:
print(colored('..(dataset: %s) evaluation' % dataset.name, 'green'))
sys.stdout.flush()
eval_results = eval_func(eval_dataset=dataset)
for key, result in eval_results.items():
average_result = np.mean(result, axis=0)
if type(average_result) is np.ndarray:
print(colored('..(dataset: %s)' % dataset.name, 'green'), \
key, ' '.join([str(s) for s in average_result]))
else:
print(colored('..(dataset: %s)' % dataset.name, 'green'), \
key, average_result)
acc_loss = 0
def _score_full_items(self, users):
if self._item_serving_size is None:
return self._model.serve({'user_id_input': users,
'item_id_input': np.arange(self._max_item)})
else:
scores = []
item_id_input = np.zeros(self._item_serving_size, np.int32)
for ibatch in range(int(math.ceil(float(self._max_item) / self._item_serving_size))):
item_id_list = range(ibatch*self._item_serving_size,
min((ibatch+1)*self._item_serving_size, self._max_item))
item_id_input[:len(item_id_list)] = item_id_list
scores.append(self._model.serve({'user_id_input': users,
'item_id_input': item_id_input})[:len(item_id_list)])
return np.concatenate(scores, axis=1)
def _score_partial_items(self, user, items):
if self._item_serving_size is None:
return self._model.serve({'user_id_input': [user],
'item_id_input': np.arange(self._max_item)})[0][np.array(items)]
else:
return self._model.serve({'user_id_input': [user],
'item_id_input': np.array(items)})[0]
def _evaluate_full(self, eval_dataset):
metric_results = {}
for evaluator in self._eval_manager.evaluators:
metric_results[evaluator.name] = []
for itr in tqdm(range(int(math.ceil(float(eval_dataset.unique_user_count()) / self._test_batch_size)))):
users = eval_dataset.get_unique_user_list()[itr * self._test_batch_size:(itr + 1) * self._test_batch_size]
scores = self._score_full_items(users=users)
for u_ind, user in enumerate(users):
result = self._eval_manager.full_eval(
pos_samples=list(eval_dataset.get_interactions_by_user_gb_item(user)),
excl_pos_samples=self._excluded_positives[user],
predictions=scores[u_ind])
for key in result:
metric_results[key].append(result[key])
return metric_results
def _evaluate_partial(self, eval_dataset):
metric_results = {}
for evaluator in self._eval_manager.evaluators:
metric_results[evaluator.name] = []
to_be_saved = dict()
to_be_saved["num_negatives"] = self._num_negatives
to_be_saved["users"] = list()
to_be_saved["user_items"] = dict()
to_be_saved["results"] = dict()
for user in tqdm(eval_dataset.get_unique_user_list()):
to_be_saved["users"].append(int(user))
items = self._sampled_negatives[user] + list(eval_dataset.get_interactions_by_user_gb_item(user))
to_be_saved["user_items"][int(user)] = items
scores = self._score_partial_items(user, items)
result = self._eval_manager.partial_eval(pos_scores=scores[self._num_negatives:], neg_scores=scores[:self._num_negatives])
to_be_saved["results"][int(user)] = scores
for key in result:
metric_results[key].append(result[key])
if self._eval_save_prefix:
with open(self._eval_save_prefix + "_evaluate_partial.pickle", 'wb') as tmpf:
pickle.dump(to_be_saved, tmpf)
return metric_results
def _exclude_positives(self, eval_datasets):
self._excluded_positives = {}
user_set = set()
for dataset in eval_datasets:
user_set = user_set.union(dataset.get_unique_user_list())
for user in user_set:
self._excluded_positives[user] = set()
for user in user_set:
if self._train_dataset.contain_user(user):
self._excluded_positives[user] = self._excluded_positives[user].union(self._train_dataset.get_interactions_by_user_gb_item(user))
for dataset in eval_datasets:
if dataset.contain_user(user):
self._excluded_positives[user] = self._excluded_positives[user].union(dataset.get_interactions_by_user_gb_item(user))
def _sample_negatives(self, seed):
print(colored('[Subsampling negative items]', 'red'))
np.random.seed(seed=seed)
self._sampled_negatives = {}
for user in tqdm(self._excluded_positives, leave=False):
shuffled_items = np.random.permutation(self._max_item)
subsamples = []
for item in shuffled_items:
if item not in self._excluded_positives[user]:
subsamples.append(item)
if len(subsamples) == self._num_negatives:
break
self._sampled_negatives[user] = subsamples
| [
"numpy.mean",
"termcolor.colored",
"pickle.dump",
"tqdm.tqdm",
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"numpy.concatenate",
"sys.stdout.flush",
"openrec.tf1.legacy.utils.evaluators.ImplicitEvalManager",
"numpy.arange",
"numpy.random.permutation"
] | [((2223, 2265), 'openrec.tf1.legacy.utils.evaluators.ImplicitEvalManager', 'ImplicitEvalManager', ([], {'evaluators': 'evaluators'}), '(evaluators=evaluators)\n', (2242, 2265), False, 'from openrec.tf1.legacy.utils.evaluators import ImplicitEvalManager\n'), ((8458, 8483), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (8472, 8483), True, 'import numpy as np\n'), ((8541, 8584), 'tqdm.tqdm', 'tqdm', (['self._excluded_positives'], {'leave': '(False)'}), '(self._excluded_positives, leave=False)\n', (8545, 8584), False, 'from tqdm import tqdm\n'), ((4401, 4444), 'numpy.zeros', 'np.zeros', (['self._item_serving_size', 'np.int32'], {}), '(self._item_serving_size, np.int32)\n', (4409, 4444), True, 'import numpy as np\n'), ((4961, 4991), 'numpy.concatenate', 'np.concatenate', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (4975, 4991), True, 'import numpy as np\n'), ((8402, 8448), 'termcolor.colored', 'colored', (['"""[Subsampling negative items]"""', '"""red"""'], {}), "('[Subsampling negative items]', 'red')\n", (8409, 8448), False, 'from termcolor import colored\n'), ((8615, 8652), 'numpy.random.permutation', 'np.random.permutation', (['self._max_item'], {}), '(self._max_item)\n', (8636, 8652), True, 'import numpy as np\n'), ((2460, 2520), 'termcolor.colored', 'colored', (['"""== Start training with FULL evaluation =="""', '"""blue"""'], {}), "('== Start training with FULL evaluation ==', 'blue')\n", (2467, 2520), False, 'from termcolor import colored\n'), ((2647, 2747), 'termcolor.colored', 'colored', (["('== Start training with sampled evaluation, sample size: %d ==' %\n num_negatives)", '"""blue"""'], {}), "('== Start training with sampled evaluation, sample size: %d ==' %\n num_negatives, 'blue')\n", (2654, 2747), False, 'from termcolor import colored\n'), ((5234, 5249), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (5242, 5249), True, 'import numpy as np\n'), ((7514, 7544), 'pickle.dump', 'pickle.dump', (['to_be_saved', 'tmpf'], {}), '(to_be_saved, tmpf)\n', (7525, 7544), False, 'import pickle\n'), ((2992, 3034), 'termcolor.colored', 'colored', (["('[Itr %d] Finished' % itr)", '"""blue"""'], {}), "('[Itr %d] Finished' % itr, 'blue')\n", (2999, 3034), False, 'from termcolor import colored\n'), ((3236, 3268), 'termcolor.colored', 'colored', (["('[Itr %d]' % itr)", '"""red"""'], {}), "('[Itr %d]' % itr, 'red')\n", (3243, 3268), False, 'from termcolor import colored\n'), ((3462, 3480), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3478, 3480), False, 'import sys\n'), ((4307, 4332), 'numpy.arange', 'np.arange', (['self._max_item'], {}), '(self._max_item)\n', (4316, 4332), True, 'import numpy as np\n'), ((3379, 3440), 'termcolor.colored', 'colored', (["('..(dataset: %s) evaluation' % dataset.name)", '"""green"""'], {}), "('..(dataset: %s) evaluation' % dataset.name, 'green')\n", (3386, 3440), False, 'from termcolor import colored\n'), ((3650, 3673), 'numpy.mean', 'np.mean', (['result'], {'axis': '(0)'}), '(result, axis=0)\n', (3657, 3673), True, 'import numpy as np\n'), ((5377, 5392), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (5385, 5392), True, 'import numpy as np\n'), ((5203, 5228), 'numpy.arange', 'np.arange', (['self._max_item'], {}), '(self._max_item)\n', (5212, 5228), True, 'import numpy as np\n'), ((3771, 3821), 'termcolor.colored', 'colored', (["('..(dataset: %s)' % dataset.name)", '"""green"""'], {}), "('..(dataset: %s)' % dataset.name, 'green')\n", (3778, 3821), False, 'from termcolor import colored\n'), ((3970, 4020), 'termcolor.colored', 'colored', (["('..(dataset: %s)' % dataset.name)", '"""green"""'], {}), "('..(dataset: %s)' % dataset.name, 'green')\n", (3977, 4020), False, 'from termcolor import colored\n')] |
import time
import numpy as np
import matplotlib.pyplot as plt
import sectionproperties.pre.sections as sections
from sectionproperties.analysis.cross_section import CrossSection
# create a rectangular section
geometry = sections.RectangularSection(d=100, b=50)
# create a list of mesh sizes to analyse
mesh_sizes = [1.5, 2, 2.5, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50, 75, 100]
j_calc = [] # list to store torsion constants
t_calc = [] # list to store computation times
# loop through mesh sizes
for mesh_size in mesh_sizes:
mesh = geometry.create_mesh(mesh_sizes=[mesh_size]) # create mesh
section = CrossSection(geometry, mesh) # create a CrossSection object
start_time = time.time() # start timing
# calculate the frame properties
(_, _, _, _, j, _) = section.calculate_frame_properties()
t = time.time() - start_time # stop timing
t_calc.append(t) # save the time
j_calc.append(j) # save the torsion constant
# print the result
str = "Mesh Size: {0}; ".format(mesh_size)
str += "Solution Time {0:.5f} s; ".format(t)
str += "Torsion Constant: {0:.12e}".format(j)
print(str)
correct_val = j_calc[0] # assume the finest mesh gives the 'correct' value
j_np = np.array(j_calc) # convert results to a numpy array
error_vals = (j_calc - correct_val) / j_calc * 100 # compute the error
# produce a plot of the accuracy of the torsion constant with computation time
plt.loglog(t_calc[1:], error_vals[1:], "kx-")
plt.xlabel("Solver Time [s]")
plt.ylabel("Torsion Constant Error [%]")
plt.show()
| [
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.ylabel",
"sectionproperties.analysis.cross_section.CrossSection",
"matplotlib.pyplot.xlabel",
"sectionproperties.pre.sections.RectangularSection",
"numpy.array",
"time.time",
"matplotlib.pyplot.show"
] | [((222, 262), 'sectionproperties.pre.sections.RectangularSection', 'sections.RectangularSection', ([], {'d': '(100)', 'b': '(50)'}), '(d=100, b=50)\n', (249, 262), True, 'import sectionproperties.pre.sections as sections\n'), ((1222, 1238), 'numpy.array', 'np.array', (['j_calc'], {}), '(j_calc)\n', (1230, 1238), True, 'import numpy as np\n'), ((1427, 1472), 'matplotlib.pyplot.loglog', 'plt.loglog', (['t_calc[1:]', 'error_vals[1:]', '"""kx-"""'], {}), "(t_calc[1:], error_vals[1:], 'kx-')\n", (1437, 1472), True, 'import matplotlib.pyplot as plt\n'), ((1473, 1502), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Solver Time [s]"""'], {}), "('Solver Time [s]')\n", (1483, 1502), True, 'import matplotlib.pyplot as plt\n'), ((1503, 1543), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Torsion Constant Error [%]"""'], {}), "('Torsion Constant Error [%]')\n", (1513, 1543), True, 'import matplotlib.pyplot as plt\n'), ((1544, 1554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1552, 1554), True, 'import matplotlib.pyplot as plt\n'), ((613, 641), 'sectionproperties.analysis.cross_section.CrossSection', 'CrossSection', (['geometry', 'mesh'], {}), '(geometry, mesh)\n', (625, 641), False, 'from sectionproperties.analysis.cross_section import CrossSection\n'), ((691, 702), 'time.time', 'time.time', ([], {}), '()\n', (700, 702), False, 'import time\n'), ((826, 837), 'time.time', 'time.time', ([], {}), '()\n', (835, 837), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created in January 2020
@author: <NAME> (<EMAIL>)
[DL-course: Utilitary functions for exercise 1]
"""
import time
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# Colors for plots
colors = ["b", "r", "g", "o", "p"]
def plot_2d_dataset(
points_training,
labels_training,
points_validation = None,
labels_validation = None,
backgroundImage = None):
""" Generate a visualization of the dataset
"""
spShape = (1, 2) # h,w
_figure = plt.figure(figsize=(6,3), facecolor=(1,1,1)) # w,h
splt = lambda y,x,h=1,w=1,**kwargs: plt.subplot2grid(spShape, (y,x), colspan=w, rowspan=h, **kwargs)
# Other parameters
scatter_size = 15
ax = splt(0,0) #-- y,x
plt.title("Training Set")
if np.any(backgroundImage):
ax.imshow(
backgroundImage[::-1,], #-- y-axis flip
extent=[-2,2,-2,2])
labels_dict = {}
for label in labels_training:
if label not in labels_dict:
labels_dict[label] = len(labels_dict)
for label_c in labels_dict:
x_values = [pt[1] for pt, className in zip(points_training, labels_training) if className == label_c]
y_values = [pt[0] for pt, className in zip(points_training, labels_training) if className == label_c]
plt.scatter(
x_values,
y_values,
marker = "o",
edgecolors = "w",
s = scatter_size,
color = colors[labels_dict[label_c]],
label = "Class {}".format(label_c))
# Legend
plt.legend()
# Axes
plt.xlabel("$x_1$", fontsize=10)
plt.ylabel("$x_2$", fontsize=10)
ax.tick_params(
top = False,
bottom = True,
left = True,
right = False,
labelleft = True,
labelbottom = True)
if np.any(points_validation) and np.any(labels_validation):
ax = splt(0,1) #-- y,x
plt.title("Test Set")
if np.any(backgroundImage):
ax.imshow(
backgroundImage[::-1,], #-- y-axis flip
extent=[-2,2, -2,2]) #-- left, right, bottom, top
labels_dict = {}
for label in labels_training:
if label not in labels_dict:
labels_dict[label] = len(labels_dict)
for label_c in labels_dict:
x_values = [pt[1] for pt, className in zip(points_validation, labels_validation) if className == label_c]
y_values = [pt[0] for pt, className in zip(points_validation, labels_validation) if className == label_c]
plt.scatter(
x_values,
y_values,
marker = "o",
edgecolors = "w",
s = scatter_size,
color = colors[labels_dict[label_c]],
label = "Class {}".format(label_c))
# Legend
plt.legend()
# Axes
plt.xlabel("$x_1$", fontsize=10)
plt.ylabel("$x_2$", fontsize=10)
ax.tick_params(
top = False,
bottom = True,
left = True,
right = False,
labelleft = True,
labelbottom = True)
""" FINAL RENDERING
"""
#plt.gcf().show()
plt.gcf().canvas.draw()
time.sleep(1e-3)
def plot_learned_landscape(
model,
points_training = None,
labels_training = None,
points_validation = None,
labels_validation = None):
""" Generate a grid of points and feed it to the model
Visualize data points on the learned prediction landscape
"""
""" FIRST GENERATE A POINT GRID
"""
bg_size = 100
bg_points = np.mgrid[0:bg_size, 0:bg_size] #-- [(yx), y, x]
bg_points = np.transpose(bg_points, [1,2,0]) #-- [y, x, (yx)]
# Grid flattening
bg_points_lin = np.reshape(bg_points, [bg_size*bg_size, -1])
bg_points_lin = 2.0 * (2*bg_points_lin/(bg_size-1) - 1.0)
# Application on bg grid
bg_pred_flat = model.predict(
bg_points_lin,
batch_size = 32)
bg_pred = np.reshape(bg_pred_flat[:, 0], [bg_size, bg_size])
color_start = 255.0 * np.array([0.5, 0.5, 1.0])
color_end = 255.0 * np.array([1.0, 0.5, 0.5])
bg_pred = np.stack([bg_pred]*3, axis=2)
bg_pred = (1.0 - bg_pred) * color_start + bg_pred * color_end
bg_pred = bg_pred.astype(np.uint8)
""" VISUALIZATION
"""
plot_2d_dataset(
points_training,
labels_training,
points_validation,
labels_validation,
bg_pred)
from IPython import display
class Monitoring(tf.keras.callbacks.Callback):
""" Monitoring class herited from keras Callback structure
Generates a dynamic plotting matplotlib figure to track the training losses.
"""
def __init__(self,
y_range=[-0.1, 1.0],
x_range=[0, 10000],
refresh_steps = 100):
super(tf.keras.callbacks.Callback, self).__init__()
self._y_range = y_range
self._x_range = x_range
self._refresh_steps = refresh_steps
def on_train_begin(self, logs=None):
self._batch_count = 0
self._train_loss_tracking = []
self._val_loss_tracking = []
_figure = plt.figure(figsize=(12,3)) # w,h
splt = lambda y,x,h=1,w=1,**kwargs: plt.subplot2grid((1,1), (y,x), colspan=w, rowspan=h, **kwargs)
self._lossPlot = splt(0,0)
def on_train_batch_begin(self, batch, logs=None):
self._batch_count += 1
def on_train_batch_end(self, batch, logs=None):
train_loss_c = logs["loss"]
self._train_loss_tracking.append([self._batch_count, train_loss_c])
#print("Batch [{}] : loss = {}".format(self._batch_count, train_loss_c))
def on_test_batch_begin(self, batch, logs=None):
pass
def on_test_batch_end(self, batch, logs=None):
test_loss_c = logs["loss"]
self._val_loss_tracking.append([self._batch_count, test_loss_c])
if self._batch_count % self._refresh_steps == 0:
self.updateFigure()
def updateFigure(self):
plt.clf() # Clear figure
# Training loss
plot_pts = self._train_loss_tracking
pt_freq = 1
plot_x = [pt[0] for pt in plot_pts[::pt_freq]]
plot_y = [pt[1] for pt in plot_pts[::pt_freq]]
plt.plot(plot_x, plot_y, label="Training Loss")
# Test loss
plot_pts = self._val_loss_tracking
pt_freq = 1
plot_x = [pt[0] for pt in plot_pts[::pt_freq]]
plot_y = [pt[1] for pt in plot_pts[::pt_freq]]
plt.plot(plot_x, plot_y, label="Test Loss")
plt.xlim(self._x_range)
plt.ylim(self._y_range)
plt.xlabel("iteration batches", fontsize=10)
plt.ylabel("loss values", fontsize=10)
# Update figure rendering
plt.legend()
plt.gcf().canvas.draw()
display.clear_output(wait=True)
display.display(plt.gcf())
time.sleep(1e-3)
| [
"numpy.reshape",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gcf",
"numpy.any",
"time.sleep",
"IPython.display.clear_output",
"numpy.stack",
"matplotlib.pyplot.figure",
"numpy.array",
... | [((550, 597), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)', 'facecolor': '(1, 1, 1)'}), '(figsize=(6, 3), facecolor=(1, 1, 1))\n', (560, 597), True, 'import matplotlib.pyplot as plt\n'), ((789, 814), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Set"""'], {}), "('Training Set')\n", (798, 814), True, 'import matplotlib.pyplot as plt\n'), ((823, 846), 'numpy.any', 'np.any', (['backgroundImage'], {}), '(backgroundImage)\n', (829, 846), True, 'import numpy as np\n'), ((1629, 1641), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1639, 1641), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1690), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x_1$"""'], {'fontsize': '(10)'}), "('$x_1$', fontsize=10)\n", (1668, 1690), True, 'import matplotlib.pyplot as plt\n'), ((1695, 1727), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x_2$"""'], {'fontsize': '(10)'}), "('$x_2$', fontsize=10)\n", (1705, 1727), True, 'import matplotlib.pyplot as plt\n'), ((3358, 3375), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (3368, 3375), False, 'import time\n'), ((3834, 3868), 'numpy.transpose', 'np.transpose', (['bg_points', '[1, 2, 0]'], {}), '(bg_points, [1, 2, 0])\n', (3846, 3868), True, 'import numpy as np\n'), ((3927, 3973), 'numpy.reshape', 'np.reshape', (['bg_points', '[bg_size * bg_size, -1]'], {}), '(bg_points, [bg_size * bg_size, -1])\n', (3937, 3973), True, 'import numpy as np\n'), ((4161, 4211), 'numpy.reshape', 'np.reshape', (['bg_pred_flat[:, 0]', '[bg_size, bg_size]'], {}), '(bg_pred_flat[:, 0], [bg_size, bg_size])\n', (4171, 4211), True, 'import numpy as np\n'), ((4332, 4363), 'numpy.stack', 'np.stack', (['([bg_pred] * 3)'], {'axis': '(2)'}), '([bg_pred] * 3, axis=2)\n', (4340, 4363), True, 'import numpy as np\n'), ((641, 706), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['spShape', '(y, x)'], {'colspan': 'w', 'rowspan': 'h'}), '(spShape, (y, x), colspan=w, rowspan=h, **kwargs)\n', (657, 706), True, 'import matplotlib.pyplot as plt\n'), ((1907, 1932), 'numpy.any', 'np.any', (['points_validation'], {}), '(points_validation)\n', (1913, 1932), True, 'import numpy as np\n'), ((1937, 1962), 'numpy.any', 'np.any', (['labels_validation'], {}), '(labels_validation)\n', (1943, 1962), True, 'import numpy as np\n'), ((2003, 2024), 'matplotlib.pyplot.title', 'plt.title', (['"""Test Set"""'], {}), "('Test Set')\n", (2012, 2024), True, 'import matplotlib.pyplot as plt\n'), ((2037, 2060), 'numpy.any', 'np.any', (['backgroundImage'], {}), '(backgroundImage)\n', (2043, 2060), True, 'import numpy as np\n'), ((2961, 2973), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2971, 2973), True, 'import matplotlib.pyplot as plt\n'), ((2998, 3030), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x_1$"""'], {'fontsize': '(10)'}), "('$x_1$', fontsize=10)\n", (3008, 3030), True, 'import matplotlib.pyplot as plt\n'), ((3039, 3071), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x_2$"""'], {'fontsize': '(10)'}), "('$x_2$', fontsize=10)\n", (3049, 3071), True, 'import matplotlib.pyplot as plt\n'), ((4239, 4264), 'numpy.array', 'np.array', (['[0.5, 0.5, 1.0]'], {}), '([0.5, 0.5, 1.0])\n', (4247, 4264), True, 'import numpy as np\n'), ((4291, 4316), 'numpy.array', 'np.array', (['[1.0, 0.5, 0.5]'], {}), '([1.0, 0.5, 0.5])\n', (4299, 4316), True, 'import numpy as np\n'), ((5350, 5377), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (5360, 5377), True, 'import matplotlib.pyplot as plt\n'), ((6214, 6223), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6221, 6223), True, 'import matplotlib.pyplot as plt\n'), ((6448, 6495), 'matplotlib.pyplot.plot', 'plt.plot', (['plot_x', 'plot_y'], {'label': '"""Training Loss"""'}), "(plot_x, plot_y, label='Training Loss')\n", (6456, 6495), True, 'import matplotlib.pyplot as plt\n'), ((6699, 6742), 'matplotlib.pyplot.plot', 'plt.plot', (['plot_x', 'plot_y'], {'label': '"""Test Loss"""'}), "(plot_x, plot_y, label='Test Loss')\n", (6707, 6742), True, 'import matplotlib.pyplot as plt\n'), ((6753, 6776), 'matplotlib.pyplot.xlim', 'plt.xlim', (['self._x_range'], {}), '(self._x_range)\n', (6761, 6776), True, 'import matplotlib.pyplot as plt\n'), ((6785, 6808), 'matplotlib.pyplot.ylim', 'plt.ylim', (['self._y_range'], {}), '(self._y_range)\n', (6793, 6808), True, 'import matplotlib.pyplot as plt\n'), ((6817, 6861), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration batches"""'], {'fontsize': '(10)'}), "('iteration batches', fontsize=10)\n", (6827, 6861), True, 'import matplotlib.pyplot as plt\n'), ((6870, 6908), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss values"""'], {'fontsize': '(10)'}), "('loss values', fontsize=10)\n", (6880, 6908), True, 'import matplotlib.pyplot as plt\n'), ((6952, 6964), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6962, 6964), True, 'import matplotlib.pyplot as plt\n'), ((7006, 7037), 'IPython.display.clear_output', 'display.clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (7026, 7037), False, 'from IPython import display\n'), ((7082, 7099), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (7092, 7099), False, 'import time\n'), ((5427, 5491), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 1)', '(y, x)'], {'colspan': 'w', 'rowspan': 'h'}), '((1, 1), (y, x), colspan=w, rowspan=h, **kwargs)\n', (5443, 5491), True, 'import matplotlib.pyplot as plt\n'), ((7062, 7071), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7069, 7071), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3339), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3337, 3339), True, 'import matplotlib.pyplot as plt\n'), ((6973, 6982), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6980, 6982), True, 'import matplotlib.pyplot as plt\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str, super, zip
import collections
import numpy as np
import json
EDGE_INDEX = 0
UNKNOWN_INDEX = 1
################################################################################
class DataSource(object):
def __init__(self, caption_groups, images=None, image_filenames=None, indexes=None):
self.indexes = list(range(len(caption_groups))) if indexes is None else indexes
self.caption_groups = caption_groups
self.first_captions = [ group[0] for group in caption_groups ]
self.images = images
self.image_filenames = image_filenames
self.size = sum(len(group) for group in caption_groups)
def shuffle(self):
seed = np.random.randint(0, 0xFFFFFFFF, dtype=np.uint32)
rand = np.random.RandomState()
rand.seed(seed)
rand.shuffle(self.indexes)
rand.seed(seed)
rand.shuffle(self.caption_groups)
if self.images is not None:
rand.seed(seed)
rand.shuffle(self.images)
def sublist(self, groups_prefix_size):
return DataSource(
caption_groups = self.caption_groups[:groups_prefix_size],
images = self.images[:groups_prefix_size] if self.images is not None else None,
indexes = self.indexes[:groups_prefix_size],
)
def first_caption_only(self):
return DataSource(
caption_groups = [ [ group[0] ] for group in self.caption_groups ],
images = self.images if self.images is not None else None,
indexes = self.indexes,
)
def text_only(self):
return DataSource(
caption_groups = self.caption_groups,
indexes = self.indexes,
)
def get_vocab(self, min_token_freq):
all_tokens = (token for cap_group in self.caption_groups for cap in cap_group for token in cap)
token_freqs = collections.Counter(all_tokens)
vocab = sorted(token_freqs.keys(), key=lambda token:(-token_freqs[token], token))
while token_freqs[vocab[-1]] < min_token_freq:
vocab.pop()
vocab = [ '<EDG>', '<UNK>' ] + sorted(vocab)
assert vocab[EDGE_INDEX] == '<EDG>'
assert vocab[UNKNOWN_INDEX] == '<UNK>'
return vocab
################################################################################
class ProcessedCaptions(object):
def __init__(self, prefixes_indexes, prefixes_lens, targets_indexes):
self.prefixes_indexes = prefixes_indexes
self.prefixes_lens = prefixes_lens
self.targets_indexes = targets_indexes
########################################################################################
class Dataset(object):
def __init__(self, min_token_freq=None, training_datasource=None, validation_datasource=None, testing_datasource=None):
self.min_token_freq = min_token_freq
self.training_datasource = training_datasource
self.validation_datasource = validation_datasource
self.testing_datasource = testing_datasource
self.loaded = False
self.vocab = None
self.vocab_size = None
self.token_to_index = None
self.index_to_token = None
self.training_images = None
self.training_proccaps = None
self.validation_images = None
self.validation_proccaps = None
self.testing_images = None
self.testing_proccaps = None
############################################
def minimal_load(self, data_save_dir):
with open(data_save_dir+'/vocab.json', 'r', encoding='utf-8') as f:
vocab = json.loads(f.read())
assert vocab[EDGE_INDEX] == '<EDG>'
assert vocab[UNKNOWN_INDEX] == '<UNK>'
self.vocab = vocab
self.vocab_size = len(self.vocab)
self.token_to_index = { token: i for (i, token) in enumerate(self.vocab) }
self.index_to_token = { i: token for (i, token) in enumerate(self.vocab) }
self.loaded = True
############################################
def minimal_save(self, data_save_dir):
with open(data_save_dir+'/vocab.json', 'w', encoding='utf-8') as f:
print(str(json.dumps(self.vocab)), file=f)
############################################
def process(self, vocab=None):
if self.training_datasource is None:
raise ValueError('Cannot process a dataset without a training data source')
if self.min_token_freq is None == vocab is None:
raise ValueError('Cannot set or leave out both min_token_freq and vocab')
if vocab is None:
self.vocab = self.training_datasource.get_vocab(self.min_token_freq)
else:
self.vocab = vocab
self.vocab_size = len(self.vocab)
self.token_to_index = { token: i for (i, token) in enumerate(self.vocab) }
self.index_to_token = { i: token for (i, token) in enumerate(self.vocab) }
(self.training_proccaps, self.training_images) = self._process_captions(self.training_datasource)
if self.validation_datasource is not None:
(self.validation_proccaps, self.validation_images) = self._process_captions(self.validation_datasource)
if self.testing_datasource is not None:
(self.testing_proccaps, self.testing_images) = self._process_captions(self.testing_datasource)
############################################
def _process_captions(self, datasource):
raw_indexes = list()
raw_lens = list()
images = list()
for (i, cap_group) in enumerate(datasource.caption_groups):
for cap in cap_group:
if datasource.images is not None:
images.append(datasource.images[i])
cap_indexes = [ self.token_to_index.get(token, UNKNOWN_INDEX) for token in cap ]
raw_indexes.append(cap_indexes)
raw_lens.append(len(cap)+1) #add 1 due to edge token
max_len = max(raw_lens)
prefixes_indexes = np.zeros([datasource.size, max_len], np.int32)
prefixes_lens = np.array(raw_lens, np.int32)
targets_indexes = np.zeros([datasource.size, max_len], np.int32)
for (i, cap_indexes) in enumerate(raw_indexes):
prefixes_indexes[i,:len(cap_indexes)+1] = [EDGE_INDEX]+cap_indexes
targets_indexes [i,:len(cap_indexes)+1] = cap_indexes+[EDGE_INDEX]
return (ProcessedCaptions(prefixes_indexes, prefixes_lens, targets_indexes), np.array(images) if datasource.images is not None else None)
| [
"json.dumps",
"collections.Counter",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"builtins.open",
"numpy.random.RandomState"
] | [((907, 956), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4294967295)'], {'dtype': 'np.uint32'}), '(0, 4294967295, dtype=np.uint32)\n', (924, 956), True, 'import numpy as np\n'), ((972, 995), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (993, 995), True, 'import numpy as np\n'), ((2202, 2233), 'collections.Counter', 'collections.Counter', (['all_tokens'], {}), '(all_tokens)\n', (2221, 2233), False, 'import collections\n'), ((6459, 6505), 'numpy.zeros', 'np.zeros', (['[datasource.size, max_len]', 'np.int32'], {}), '([datasource.size, max_len], np.int32)\n', (6467, 6505), True, 'import numpy as np\n'), ((6533, 6561), 'numpy.array', 'np.array', (['raw_lens', 'np.int32'], {}), '(raw_lens, np.int32)\n', (6541, 6561), True, 'import numpy as np\n'), ((6589, 6635), 'numpy.zeros', 'np.zeros', (['[datasource.size, max_len]', 'np.int32'], {}), '([datasource.size, max_len], np.int32)\n', (6597, 6635), True, 'import numpy as np\n'), ((3928, 3986), 'builtins.open', 'open', (["(data_save_dir + '/vocab.json')", '"""r"""'], {'encoding': '"""utf-8"""'}), "(data_save_dir + '/vocab.json', 'r', encoding='utf-8')\n", (3932, 3986), False, 'from builtins import ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str, super, zip\n'), ((4525, 4583), 'builtins.open', 'open', (["(data_save_dir + '/vocab.json')", '"""w"""'], {'encoding': '"""utf-8"""'}), "(data_save_dir + '/vocab.json', 'w', encoding='utf-8')\n", (4529, 4583), False, 'from builtins import ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str, super, zip\n'), ((6936, 6952), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (6944, 6952), True, 'import numpy as np\n'), ((4610, 4632), 'json.dumps', 'json.dumps', (['self.vocab'], {}), '(self.vocab)\n', (4620, 4632), False, 'import json\n')] |
import os
import sys
import cv2
import argparse
import glob
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import draw, transform
from scipy.optimize import minimize
from scipy.optimize import least_squares
import objs
import utils
#fp is in cam-ceil normal, height is in cam-floor normal
def data2scene(fp_points, height):
# cam-ceiling / cam-floor
scale = (height - 1.6) / 1.6
#layout_fp, fp_points = fit_layout(fp, scale=None, max_cor=12)
size = 512
ratio = 20/size
fp_points = fp_points.astype(float)
fp_points[0] -= size/2
fp_points[1] -= size/2
fp_points *= scale
fp_points[0] += size/2
fp_points[1] += size/2
fp_points = fp_points.astype(int)
scene = objs.Scene()
scene.cameraHeight = 1.6
scene.layoutHeight = height
scene.layoutPoints = []
for i in range(fp_points.shape[1]):
fp_xy = (fp_points[:,i] - size/2) * ratio
xyz = (fp_xy[1], 0, fp_xy[0])
scene.layoutPoints.append(objs.GeoPoint(scene, None, xyz))
scene.genLayoutWallsByPoints(scene.layoutPoints)
scene.updateLayoutGeometry()
return scene
def f1_score(pred, gt):
TP = np.zeros(gt.shape); FP = np.zeros(gt.shape)
FN = np.zeros(gt.shape); TN = np.zeros(gt.shape)
TP[(pred==gt) & (pred == 1)] = 1
FP[(pred!=gt) & (pred == 1)] = 1
FN[(pred!=gt) & (gt == 1)] = 1
TN[(pred==gt) & (pred == 0)] = 1
TP = np.sum(TP); FP = np.sum(FP)
FN = np.sum(FN); TN = np.sum(TN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
accuracy = (TP + TN) / (gt.shape[0]*gt.shape[1])
f1_score = 2 / ((1 / precision) + (1 / recall))
return f1_score
def fit_layout(data, max_cor=12):
#find max connective component
ret, data_thresh = cv2.threshold(data, 0.5, 1,0)
data_thresh = np.uint8(data_thresh)
data_img, data_cnt, data_heri = cv2.findContours(data_thresh, 1, 2)
data_cnt.sort(key=lambda x: cv2.contourArea(x), reverse=True)
# crop data sub as f1 true
sub_x, sub_y, w, h = cv2.boundingRect(data_cnt[0])
data_sub = data_thresh[sub_y:sub_y+h,sub_x:sub_x+w]
pred = np.ones(data_sub.shape)
st = 0.25
min_score = 0.1
######
def loss_ul(x):
sample = pred.copy()
sample[0:int(x[0]), 0:int(x[1])] = 0
return -f1_score(sample, data_sub)
res = minimize(loss_ul, np.array([h*st, w*st]), method='nelder-mead',
bounds=[(0,h),(0,w)], options={'xtol': 1e-8, 'disp': False})
ul = res.x.astype(int)
######
def loss_ur(x):
sample = pred.copy()
sample[0:int(x[0]), int(x[1]):w] = 0
return -f1_score(sample, data_sub)
res = minimize(loss_ur, np.array([h*st, w*(1-st)]), method='nelder-mead',
bounds=[(0,h),(0,w)], options={'xtol': 1e-8, 'disp': False})
ur = res.x.astype(int)
######
def loss_dr(x):
sample = pred.copy()
sample[int(x[0]):h, int(x[1]):w] = 0
return -f1_score(sample, data_sub)
res = minimize(loss_dr, np.array([h*(1-st), w*(1-st)]), method='nelder-mead',
bounds=[(0,h),(0,w)], options={'xtol': 1e-8, 'disp': False})
dr = res.x.astype(int)
######
def loss_dl(x):
sample = pred.copy()
sample[int(x[0]):h, 0:int(x[1])] = 0
return -f1_score(sample, data_sub)
res = minimize(loss_dl, np.array([h*(1-st), w*st]), method='nelder-mead',
bounds=[(0,h),(0,w)], options={'xtol': 1e-8, 'disp': False})
dl = res.x.astype(int)
#print([ul, ur, dr, dl])
s_ul = ul[0]*ul[1] / np.sum(data_sub)
s_ur = ur[0]*(w-ur[1]) / np.sum(data_sub)
s_dr = (h-dr[0])*(w-dr[1]) / np.sum(data_sub)
s_dl = (h-dl[0])*dl[1] / np.sum(data_sub)
#print([s_ul, s_ur, s_dr, s_dl])
sort_idx = list(np.argsort([s_ul, s_ur, s_dr, s_dl])[::-1])
assert max_cor in [4, 6, 8, 10, 12]
max_idx = (max_cor-4)/2
if s_ul > min_score and (sort_idx.index(0) < max_idx):
pred[0:int(ul[0]), 0:int(ul[1])] = 0
if s_ur > min_score and (sort_idx.index(1) < max_idx):
pred[0:int(ur[0]), int(ur[1]):w] = 0
if s_dr > min_score and (sort_idx.index(2) < max_idx):
pred[int(dr[0]):h, int(dr[1]):w] = 0
if s_dl > min_score and (sort_idx.index(3) < max_idx):
pred[int(dl[0]):h, 0:int(dl[1])] = 0
pred = np.uint8(pred)
pred_img, pred_cnt, pred_heri = cv2.findContours(pred, 1, 3)
polygon = [(p[0][1], p[0][0]) for p in pred_cnt[0][::-1]]
Y = np.array([p[0]+sub_y for p in polygon])
X = np.array([p[1]+sub_x for p in polygon])
fp_points = np.concatenate( (Y[np.newaxis,:],X[np.newaxis,:]), axis=0)
layout_fp = np.zeros(data.shape)
rr, cc = draw.polygon(fp_points[0], fp_points[1])
rr = np.clip(rr, 0, data.shape[0]-1)
cc = np.clip(cc, 0, data.shape[1]-1)
layout_fp[rr,cc] = 1
if False:
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
ax1.imshow(data_sub)
ax2 = fig.add_subplot(1,2,2)
ax2.imshow(pred)
plt.show()
return layout_fp, fp_points
'''
def fit_layout(data, scale=None, max_cor=12):
ret, data_thresh = cv2.threshold(data, 0.5, 1,0)
data_thresh = np.uint8(data_thresh)
data_img, data_cnt, data_heri = cv2.findContours(data_thresh, 1, 2)
data_cnt.sort(key=lambda x: cv2.contourArea(x), reverse=True)
sub_x,sub_y,w,h = cv2.boundingRect(data_cnt[0])
data_sub = data_thresh[sub_y:sub_y+h,sub_x:sub_x+w]
if False:
data_sub_invert = np.uint8(np.ones(data_sub.shape) - data_sub)
label_num, labels = cv2.connectedComponents(data_sub_invert)
for i in range(1, label_num):
score = np.count_nonzero(labels == i) / (data_sub.shape[0]*data_sub.shape[1])
if score < 0.05:
data_sub[labels==i] = 1
#utils.showImage(data_sub)
#img = data_sub[:,:,np.newaxis] * 255
#data_sub_img = np.concatenate([img, img, img], axis=2)
dp = np.zeros(data_sub.shape)
score_map = np.zeros(data_sub.shape)
score_map[data_sub == 1] = -10
score_map[data_sub == 0] = 1
size_h = data_sub.shape[0]-1
size_w = data_sub.shape[1]-1
ul = [(0,0), (size_h, size_w)]
ur = [(0,size_w), (size_h, 0)]
dl = [(size_h,0), (0, size_w)]
dr = [(size_h,size_w), (0, 0)]
def find_rect_pt(box):
start, end = box[0], box[1]
vec = np.clip([end[0]-start[0], end[1]-start[1]], -1, 1)
dp = np.zeros( (data_sub.shape[0], data_sub.shape[1]) )
for i in np.arange(start[0]+vec[0], end[0], vec[0]):
for j in np.arange(start[1]+vec[1], end[1], vec[1]):
dp[i][j] = dp[i-vec[0]][j] + dp[i][j-vec[1]] - dp[i-vec[0]][j-vec[1]] + score_map[i][j]
score = dp.max() / (data_sub.shape[0]*data_sub.shape[1])
if score <= 0.05:
return None, 0
point = np.argwhere(dp.max() == dp)[0]
return point, score
polygon = []
p_ul, s_ul = find_rect_pt(ul)
p_ur, s_ur = find_rect_pt(ur)
p_dr, s_dr = find_rect_pt(dr)
p_dl, s_dl = find_rect_pt(dl)
sort_idx = list(np.argsort([s_ul, s_ur, s_dr, s_dl])[::-1])
assert max_cor in [4, 6, 8, 10, 12]
max_idx = (max_cor-4)/2
if (p_ul is None) or (sort_idx.index(0) >= max_idx) :
polygon.append(ul[0])
else:
polygon += [(p_ul[0],ul[0][1]), tuple(p_ul) ,(ul[0][0], p_ul[1])]
if p_ur is None or (sort_idx.index(1) >= max_idx) :
polygon.append(ur[0])
else:
polygon += [(ur[0][0], p_ur[1]), tuple(p_ur) ,(p_ur[0], ur[0][1])]
if p_dr is None or (sort_idx.index(2) >= max_idx) :
polygon.append(dr[0])
else:
polygon += [(p_dr[0], dr[0][1]), tuple(p_dr) ,(dr[0][0], p_dr[1])]
if p_dl is None or (sort_idx.index(3) >= max_idx) :
polygon.append(dl[0])
else:
polygon += [(dl[0][0], p_dl[1]), tuple(p_dl) ,(p_dl[0], dl[0][1])]
Y = np.array([p[0]+sub_y for p in polygon])
X = np.array([p[1]+sub_x for p in polygon])
fp_points = np.concatenate( (Y[np.newaxis,:],X[np.newaxis,:]), axis=0)
if scale is not None:
fp_points = fp_points.astype(float)
fp_points[0] -= data.shape[0]/2
fp_points[1] -= data.shape[1]/2
fp_points *= scale
fp_points[0] += data.shape[0]/2
fp_points[1] += data.shape[1]/2
fp_points = fp_points.astype(int)
layout_fp = np.zeros(data.shape)
rr, cc = draw.polygon(fp_points[0],fp_points[1])
rr = np.clip(rr, 0, data.shape[0]-1)
cc = np.clip(cc, 0, data.shape[1]-1)
layout_fp[rr,cc] = 1
return layout_fp, fp_points
'''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--i', required=True)
args = parser.parse_args()
data_path = args.i
#for filepath in glob.iglob(data_path + '/*.npy'):
#for i in range(404):
for i in [91, 104, 145, 159, 167, 194, 215, 223, 253, 256, 261, 266, 300, 304, 357, 358]:
filepath = os.path.join(data_path, '{0}.npy'.format(i))
print(filepath)
data = np.load(filepath, encoding = 'bytes')[()]
#color = data['color']
#fp_floor = data['fp_floor']
fp_pred = data['pred_fp_merge']
layout_fp, fp_points = fit_layout(fp_pred)
#print(fp_points)
if True:
fig = plt.figure()
ax3 = fig.add_subplot(2,1,1)
ax3.imshow(fp_pred)
ax4 = fig.add_subplot(2,1,2)
ax4.imshow(layout_fp)
plt.show() | [
"numpy.uint8",
"numpy.clip",
"numpy.argsort",
"numpy.array",
"argparse.ArgumentParser",
"cv2.threshold",
"cv2.contourArea",
"numpy.concatenate",
"numpy.ones",
"matplotlib.pyplot.show",
"skimage.draw.polygon",
"numpy.sum",
"numpy.zeros",
"objs.Scene",
"matplotlib.pyplot.figure",
"cv2.fi... | [((778, 790), 'objs.Scene', 'objs.Scene', ([], {}), '()\n', (788, 790), False, 'import objs\n'), ((1237, 1255), 'numpy.zeros', 'np.zeros', (['gt.shape'], {}), '(gt.shape)\n', (1245, 1255), True, 'import numpy as np\n'), ((1262, 1280), 'numpy.zeros', 'np.zeros', (['gt.shape'], {}), '(gt.shape)\n', (1270, 1280), True, 'import numpy as np\n'), ((1291, 1309), 'numpy.zeros', 'np.zeros', (['gt.shape'], {}), '(gt.shape)\n', (1299, 1309), True, 'import numpy as np\n'), ((1316, 1334), 'numpy.zeros', 'np.zeros', (['gt.shape'], {}), '(gt.shape)\n', (1324, 1334), True, 'import numpy as np\n'), ((1499, 1509), 'numpy.sum', 'np.sum', (['TP'], {}), '(TP)\n', (1505, 1509), True, 'import numpy as np\n'), ((1516, 1526), 'numpy.sum', 'np.sum', (['FP'], {}), '(FP)\n', (1522, 1526), True, 'import numpy as np\n'), ((1537, 1547), 'numpy.sum', 'np.sum', (['FN'], {}), '(FN)\n', (1543, 1547), True, 'import numpy as np\n'), ((1554, 1564), 'numpy.sum', 'np.sum', (['TN'], {}), '(TN)\n', (1560, 1564), True, 'import numpy as np\n'), ((1861, 1891), 'cv2.threshold', 'cv2.threshold', (['data', '(0.5)', '(1)', '(0)'], {}), '(data, 0.5, 1, 0)\n', (1874, 1891), False, 'import cv2\n'), ((1910, 1931), 'numpy.uint8', 'np.uint8', (['data_thresh'], {}), '(data_thresh)\n', (1918, 1931), True, 'import numpy as np\n'), ((1969, 2004), 'cv2.findContours', 'cv2.findContours', (['data_thresh', '(1)', '(2)'], {}), '(data_thresh, 1, 2)\n', (1985, 2004), False, 'import cv2\n'), ((2134, 2163), 'cv2.boundingRect', 'cv2.boundingRect', (['data_cnt[0]'], {}), '(data_cnt[0])\n', (2150, 2163), False, 'import cv2\n'), ((2235, 2258), 'numpy.ones', 'np.ones', (['data_sub.shape'], {}), '(data_sub.shape)\n', (2242, 2258), True, 'import numpy as np\n'), ((4494, 4508), 'numpy.uint8', 'np.uint8', (['pred'], {}), '(pred)\n', (4502, 4508), True, 'import numpy as np\n'), ((4546, 4574), 'cv2.findContours', 'cv2.findContours', (['pred', '(1)', '(3)'], {}), '(pred, 1, 3)\n', (4562, 4574), False, 'import cv2\n'), ((4651, 4694), 'numpy.array', 'np.array', (['[(p[0] + sub_y) for p in polygon]'], {}), '([(p[0] + sub_y) for p in polygon])\n', (4659, 4694), True, 'import numpy as np\n'), ((4700, 4743), 'numpy.array', 'np.array', (['[(p[1] + sub_x) for p in polygon]'], {}), '([(p[1] + sub_x) for p in polygon])\n', (4708, 4743), True, 'import numpy as np\n'), ((4757, 4817), 'numpy.concatenate', 'np.concatenate', (['(Y[np.newaxis, :], X[np.newaxis, :])'], {'axis': '(0)'}), '((Y[np.newaxis, :], X[np.newaxis, :]), axis=0)\n', (4771, 4817), True, 'import numpy as np\n'), ((4835, 4855), 'numpy.zeros', 'np.zeros', (['data.shape'], {}), '(data.shape)\n', (4843, 4855), True, 'import numpy as np\n'), ((4870, 4910), 'skimage.draw.polygon', 'draw.polygon', (['fp_points[0]', 'fp_points[1]'], {}), '(fp_points[0], fp_points[1])\n', (4882, 4910), False, 'from skimage import draw, transform\n'), ((4921, 4954), 'numpy.clip', 'np.clip', (['rr', '(0)', '(data.shape[0] - 1)'], {}), '(rr, 0, data.shape[0] - 1)\n', (4928, 4954), True, 'import numpy as np\n'), ((4963, 4996), 'numpy.clip', 'np.clip', (['cc', '(0)', '(data.shape[1] - 1)'], {}), '(cc, 0, data.shape[1] - 1)\n', (4970, 4996), True, 'import numpy as np\n'), ((8981, 9006), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9004, 9006), False, 'import argparse\n'), ((2479, 2505), 'numpy.array', 'np.array', (['[h * st, w * st]'], {}), '([h * st, w * st])\n', (2487, 2505), True, 'import numpy as np\n'), ((2814, 2846), 'numpy.array', 'np.array', (['[h * st, w * (1 - st)]'], {}), '([h * st, w * (1 - st)])\n', (2822, 2846), True, 'import numpy as np\n'), ((3153, 3191), 'numpy.array', 'np.array', (['[h * (1 - st), w * (1 - st)]'], {}), '([h * (1 - st), w * (1 - st)])\n', (3161, 3191), True, 'import numpy as np\n'), ((3500, 3532), 'numpy.array', 'np.array', (['[h * (1 - st), w * st]'], {}), '([h * (1 - st), w * st])\n', (3508, 3532), True, 'import numpy as np\n'), ((3714, 3730), 'numpy.sum', 'np.sum', (['data_sub'], {}), '(data_sub)\n', (3720, 3730), True, 'import numpy as np\n'), ((3761, 3777), 'numpy.sum', 'np.sum', (['data_sub'], {}), '(data_sub)\n', (3767, 3777), True, 'import numpy as np\n'), ((3812, 3828), 'numpy.sum', 'np.sum', (['data_sub'], {}), '(data_sub)\n', (3818, 3828), True, 'import numpy as np\n'), ((3859, 3875), 'numpy.sum', 'np.sum', (['data_sub'], {}), '(data_sub)\n', (3865, 3875), True, 'import numpy as np\n'), ((5053, 5065), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5063, 5065), True, 'import matplotlib.pyplot as plt\n'), ((5207, 5217), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5215, 5217), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1082), 'objs.GeoPoint', 'objs.GeoPoint', (['scene', 'None', 'xyz'], {}), '(scene, None, xyz)\n', (1064, 1082), False, 'import objs\n'), ((3937, 3973), 'numpy.argsort', 'np.argsort', (['[s_ul, s_ur, s_dr, s_dl]'], {}), '([s_ul, s_ur, s_dr, s_dl])\n', (3947, 3973), True, 'import numpy as np\n'), ((9406, 9441), 'numpy.load', 'np.load', (['filepath'], {'encoding': '"""bytes"""'}), "(filepath, encoding='bytes')\n", (9413, 9441), True, 'import numpy as np\n'), ((9683, 9695), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9693, 9695), True, 'import matplotlib.pyplot as plt\n'), ((9861, 9871), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9869, 9871), True, 'import matplotlib.pyplot as plt\n'), ((2040, 2058), 'cv2.contourArea', 'cv2.contourArea', (['x'], {}), '(x)\n', (2055, 2058), False, 'import cv2\n')] |
import cv2
import dlib
import numpy as np
from PIL import Image
import face_blend_common as fbc
def load_image(image_bytes):
print('Reading image')
img_display = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), -1)
img = cv2.cvtColor(img_display, cv2.COLOR_RGB2BGR)
return img, img_display
def load_models(predictor_path):
print('Initializing the face detector instance')
detector = dlib.get_frontal_face_detector()
print('Loading the shape predictor model')
predictor = dlib.shape_predictor(predictor_path)
return detector, predictor
def get_convex_hull(hull_index, points_source, points_target):
print('Creating convex hull lists')
hull_source = []
hull_target = []
for i in range(0, len(hull_index)):
hull_source.append(points_source[hull_index[i][0]])
hull_target.append(points_target[hull_index[i][0]])
return hull_source, hull_target
def find_centroid(img_target, hull_target):
print('Calculating mask for seamless cloning')
hull8U = []
for i in range(0, len(hull_target)):
hull8U.append((hull_target[i][0], hull_target[i][1]))
mask = np.zeros(img_target.shape, dtype=img_target.dtype)
cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255))
print('Finding centroid')
m = cv2.moments(mask[:,:,1])
center = (int(m['m10'] / m['m00']), int(m['m01'] / m['m00']))
return center, mask
def calculate_triangulation(img_target, hull_source, hull_target, img_source_display, img_target_display):
print('Finding Delaunay traingulation for convex hull points')
img_target_size = img_target.shape
rect = (0, 0, img_target_size[1], img_target_size[0])
dt = fbc.calculateDelaunayTriangles(rect, hull_target)
# If no Delaunay Triangles were found, quit
if len(dt) == 0:
quit()
print('Applying triangulation to images')
img_source_temp = img_source_display.copy()
img_target_temp = img_target_display.copy()
tris_source = []
tris_target = []
for i in range(0, len(dt)):
tri_source = []
tri_target = []
for j in range(0, 3):
tri_source.append(hull_source[dt[i][j]])
tri_target.append(hull_target[dt[i][j]])
tris_source.append(tri_source)
tris_target.append(tri_target)
cv2.polylines(img_source_temp, np.array(tris_source), True, (0, 0, 255), 2)
cv2.polylines(img_target_temp, np.array(tris_target), True, (0, 0, 255), 2)
return tris_source, tris_target
def swap_image(img_source_bytes, img_target_bytes, predictor_path):
detector, predictor = load_models(predictor_path)
img_source, img_source_display = load_image(img_source_bytes)
img_target, img_target_display = load_image(img_target_bytes)
img_source_warped = np.copy(img_target)
# Read array of corresponding points
status, points_source = fbc.getLandmarks(detector, predictor, img_source)
if status=='fail':
return status, points_source
status, points_target = fbc.getLandmarks(detector, predictor, img_target)
if status=='fail':
return status, points_target
# Convex hull
hull_index = cv2.convexHull(np.array(points_target), returnPoints=False)
hull_source, hull_target = get_convex_hull(hull_index, points_source, points_target)
# Calculate Mask and Find Centroid
center, mask = find_centroid(img_target, hull_target)
# Calculate triangulation
tris_source, tris_target = calculate_triangulation(
img_target, hull_source, hull_target, img_source_display, img_target_display
)
# Simple Alpha Blending
print('Applying affine transformation to Delaunay triangles')
for i in range(0, len(tris_source)):
fbc.warpTriangle(img_source, img_source_warped, tris_source[i], tris_target[i])
print('Cloning seamlessly')
output = cv2.seamlessClone(np.uint8(img_source_warped), img_target, mask, center, cv2.NORMAL_CLONE)
return status, Image.fromarray(output) | [
"numpy.uint8",
"numpy.copy",
"PIL.Image.fromarray",
"numpy.int32",
"face_blend_common.warpTriangle",
"dlib.shape_predictor",
"face_blend_common.calculateDelaunayTriangles",
"face_blend_common.getLandmarks",
"dlib.get_frontal_face_detector",
"numpy.zeros",
"numpy.array",
"cv2.cvtColor",
"cv2.... | [((238, 282), 'cv2.cvtColor', 'cv2.cvtColor', (['img_display', 'cv2.COLOR_RGB2BGR'], {}), '(img_display, cv2.COLOR_RGB2BGR)\n', (250, 282), False, 'import cv2\n'), ((414, 446), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (444, 446), False, 'import dlib\n'), ((511, 547), 'dlib.shape_predictor', 'dlib.shape_predictor', (['predictor_path'], {}), '(predictor_path)\n', (531, 547), False, 'import dlib\n'), ((1156, 1206), 'numpy.zeros', 'np.zeros', (['img_target.shape'], {'dtype': 'img_target.dtype'}), '(img_target.shape, dtype=img_target.dtype)\n', (1164, 1206), True, 'import numpy as np\n'), ((1311, 1337), 'cv2.moments', 'cv2.moments', (['mask[:, :, 1]'], {}), '(mask[:, :, 1])\n', (1322, 1337), False, 'import cv2\n'), ((1713, 1762), 'face_blend_common.calculateDelaunayTriangles', 'fbc.calculateDelaunayTriangles', (['rect', 'hull_target'], {}), '(rect, hull_target)\n', (1743, 1762), True, 'import face_blend_common as fbc\n'), ((2812, 2831), 'numpy.copy', 'np.copy', (['img_target'], {}), '(img_target)\n', (2819, 2831), True, 'import numpy as np\n'), ((2902, 2951), 'face_blend_common.getLandmarks', 'fbc.getLandmarks', (['detector', 'predictor', 'img_source'], {}), '(detector, predictor, img_source)\n', (2918, 2951), True, 'import face_blend_common as fbc\n'), ((3040, 3089), 'face_blend_common.getLandmarks', 'fbc.getLandmarks', (['detector', 'predictor', 'img_target'], {}), '(detector, predictor, img_target)\n', (3056, 3089), True, 'import face_blend_common as fbc\n'), ((186, 222), 'numpy.frombuffer', 'np.frombuffer', (['image_bytes', 'np.uint8'], {}), '(image_bytes, np.uint8)\n', (199, 222), True, 'import numpy as np\n'), ((1237, 1253), 'numpy.int32', 'np.int32', (['hull8U'], {}), '(hull8U)\n', (1245, 1253), True, 'import numpy as np\n'), ((2369, 2390), 'numpy.array', 'np.array', (['tris_source'], {}), '(tris_source)\n', (2377, 2390), True, 'import numpy as np\n'), ((2449, 2470), 'numpy.array', 'np.array', (['tris_target'], {}), '(tris_target)\n', (2457, 2470), True, 'import numpy as np\n'), ((3201, 3224), 'numpy.array', 'np.array', (['points_target'], {}), '(points_target)\n', (3209, 3224), True, 'import numpy as np\n'), ((3759, 3838), 'face_blend_common.warpTriangle', 'fbc.warpTriangle', (['img_source', 'img_source_warped', 'tris_source[i]', 'tris_target[i]'], {}), '(img_source, img_source_warped, tris_source[i], tris_target[i])\n', (3775, 3838), True, 'import face_blend_common as fbc\n'), ((3903, 3930), 'numpy.uint8', 'np.uint8', (['img_source_warped'], {}), '(img_source_warped)\n', (3911, 3930), True, 'import numpy as np\n'), ((3996, 4019), 'PIL.Image.fromarray', 'Image.fromarray', (['output'], {}), '(output)\n', (4011, 4019), False, 'from PIL import Image\n')] |
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
import torch
import torch.nn as nn
def parse_skipgram(fname):
with open(fname) as f:
toks = list(f.read().split())
nb_nodes = int(toks[0])
nb_features = int(toks[1])
ret = np.empty((nb_nodes, nb_features))
it = 2
for i in range(nb_nodes):
cur_nd = int(toks[it]) - 1
it += 1
for j in range(nb_features):
cur_ft = float(toks[it])
ret[cur_nd][j] = cur_ft
it += 1
return ret
# Process a (subset of) a TU dataset into standard form
def process_tu(data, nb_nodes):
nb_graphs = len(data)
ft_size = data.num_features
features = np.zeros((nb_graphs, nb_nodes, ft_size))
adjacency = np.zeros((nb_graphs, nb_nodes, nb_nodes))
labels = np.zeros(nb_graphs)
sizes = np.zeros(nb_graphs, dtype=np.int32)
masks = np.zeros((nb_graphs, nb_nodes))
for g in range(nb_graphs):
sizes[g] = data[g].x.shape[0]
features[g, :sizes[g]] = data[g].x
labels[g] = data[g].y[0]
masks[g, :sizes[g]] = 1.0
e_ind = data[g].edge_index
coo = sp.coo_matrix((np.ones(e_ind.shape[1]), (e_ind[0, :], e_ind[1, :])), shape=(nb_nodes, nb_nodes))
adjacency[g] = coo.todense()
return features, adjacency, labels, sizes, masks
def micro_f1(logits, labels):
# Compute predictions
preds = torch.round(nn.Sigmoid()(logits))
# Cast to avoid trouble
preds = preds.long()
labels = labels.long()
# Count true positives, true negatives, false positives, false negatives
tp = torch.nonzero(preds * labels).shape[0] * 1.0
tn = torch.nonzero((preds - 1) * (labels - 1)).shape[0] * 1.0
fp = torch.nonzero(preds * (labels - 1)).shape[0] * 1.0
fn = torch.nonzero((preds - 1) * labels).shape[0] * 1.0
# Compute micro-f1 score
prec = tp / (tp + fp)
rec = tp / (tp + fn)
f1 = (2 * prec * rec) / (prec + rec)
return f1
"""
Prepare adjacency matrix by expanding up to a given neighbourhood.
This will insert loops on every node.
Finally, the matrix is converted to bias vectors.
Expected shape: [graph, nodes, nodes]
"""
def adj_to_bias(adj, sizes, nhood=1):
nb_graphs = adj.shape[0]
mt = np.empty(adj.shape)
for g in range(nb_graphs):
mt[g] = np.eye(adj.shape[1])
for _ in range(nhood):
mt[g] = np.matmul(mt[g], (adj[g] + np.eye(adj.shape[1])))
for i in range(sizes[g]):
for j in range(sizes[g]):
if mt[g][i][j] > 0.0:
mt[g][i][j] = 1.0
return -1e9 * (1.0 - mt)
###############################################
# This section of code adapted from tkipf/gcn #
###############################################
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(dataset_str): # {'pubmed', 'citeseer', 'cora'}
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer' :
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
return adj, features, labels, idx_train, idx_val, idx_test
def sparse_to_tuple(sparse_mx, insert_batch=False):
"""Convert sparse matrix to tuple representation."""
"""Set insert_batch=True if you want to insert a batch dimension."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
if insert_batch:
coords = np.vstack((np.zeros(mx.row.shape[0]), mx.row, mx.col)).transpose()
values = mx.data
shape = (1,) + mx.shape
else:
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def standardize_data(f, train_mask):
"""Standardize feature matrix and convert to tuple representation"""
# standardize data
f = f.todense()
mu = f[train_mask == True, :].mean(axis=0)
sigma = f[train_mask == True, :].std(axis=0)
f = f[:, np.squeeze(np.array(sigma > 0))]
mu = f[train_mask == True, :].mean(axis=0)
sigma = f[train_mask == True, :].std(axis=0)
f = (f - mu) / sigma
return f
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features.todense(), sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
# Perform train-test split
# Takes in adjacency matrix in sparse format
# Returns: adj_train, train_edges, val_edges, val_edges_false,
# test_edges, test_edges_false
def mask_test_edges(adj, test_frac=.1, val_frac=.05, prevent_disconnect=True, verbose=False):
# NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.
"from https://github.com/tkipf/gae"
if verbose == True:
print('preprocessing...')
# Remove diagonal elements
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj.eliminate_zeros()
# Check that diag is zero:
assert np.diag(adj.todense()).sum() == 0
g = nx.from_scipy_sparse_matrix(adj)
orig_num_cc = nx.number_connected_components(g)
adj_triu = sp.triu(adj) # upper triangular portion of adj matrix
adj_tuple = sparse_to_tuple(adj_triu) # (coords, values, shape), edges only 1 way
edges = adj_tuple[0] # all edges, listed only once (not 2 ways)
# edges_all = sparse_to_tuple(adj)[0] # ALL edges (includes both ways)
num_test = int(np.floor(edges.shape[0] * test_frac)) # controls how large the test set should be
num_val = int(np.floor(edges.shape[0] * val_frac)) # controls how alrge the validation set should be
# Store edges in list of ordered tuples (node1, node2) where node1 < node2
edge_tuples = [(min(edge[0], edge[1]), max(edge[0], edge[1])) for edge in edges]
all_edge_tuples = set(edge_tuples)
train_edges = set(edge_tuples) # initialize train_edges to have all edges
test_edges = set()
val_edges = set()
if verbose == True:
print('generating test/val sets...')
# Iterate over shuffled edges, add to train/val sets
np.random.shuffle(edge_tuples)
for edge in edge_tuples:
# print edge
node1 = edge[0]
node2 = edge[1]
# If removing edge would disconnect a connected component, backtrack and move on
g.remove_edge(node1, node2)
if prevent_disconnect == True:
if nx.number_connected_components(g) > orig_num_cc:
g.add_edge(node1, node2)
continue
# Fill test_edges first
if len(test_edges) < num_test:
test_edges.add(edge)
train_edges.remove(edge)
# Then, fill val_edges
elif len(val_edges) < num_val:
val_edges.add(edge)
train_edges.remove(edge)
# Both edge lists full --> break loop
elif len(test_edges) == num_test and len(val_edges) == num_val:
break
if (len(val_edges) < num_val or len(test_edges) < num_test):
print("WARNING: not enough removable edges to perform full train-test split!")
print("Num. (test, val) edges requested: (", num_test, ", ", num_val, ")")
print("Num. (test, val) edges returned: (", len(test_edges), ", ", len(val_edges), ")")
if prevent_disconnect == True:
assert nx.number_connected_components(g) == orig_num_cc
if verbose == True:
print('creating false test edges...')
test_edges_false = set()
while len(test_edges_false) < num_test:
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
false_edge = (min(idx_i, idx_j), max(idx_i, idx_j))
# Make sure false_edge not an actual edge, and not a repeat
if false_edge in all_edge_tuples:
continue
if false_edge in test_edges_false:
continue
test_edges_false.add(false_edge)
if verbose == True:
print('creating false val edges...')
val_edges_false = set()
while len(val_edges_false) < num_val:
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
false_edge = (min(idx_i, idx_j), max(idx_i, idx_j))
# Make sure false_edge in not an actual edge, not in test_edges_false, not a repeat
if false_edge in all_edge_tuples or \
false_edge in test_edges_false or \
false_edge in val_edges_false:
continue
val_edges_false.add(false_edge)
if verbose == True:
print('creating false train edges...')
train_edges_false = set()
while len(train_edges_false) < len(train_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
false_edge = (min(idx_i, idx_j), max(idx_i, idx_j))
# Make sure false_edge in not an actual edge, not in test_edges_false,
# not in val_edges_false, not a repeat
if false_edge in all_edge_tuples or \
false_edge in test_edges_false or \
false_edge in val_edges_false or \
false_edge in train_edges_false:
continue
train_edges_false.add(false_edge)
if verbose == True:
print('final checks for disjointness...')
# assert: false_edges are actually false (not in all_edge_tuples)
assert test_edges_false.isdisjoint(all_edge_tuples)
assert val_edges_false.isdisjoint(all_edge_tuples)
assert train_edges_false.isdisjoint(all_edge_tuples)
# assert: test, val, train false edges disjoint
assert test_edges_false.isdisjoint(val_edges_false)
assert test_edges_false.isdisjoint(train_edges_false)
assert val_edges_false.isdisjoint(train_edges_false)
# assert: test, val, train positive edges disjoint
assert val_edges.isdisjoint(train_edges)
assert test_edges.isdisjoint(train_edges)
assert val_edges.isdisjoint(test_edges)
if verbose == True:
print('creating adj_train...')
# Re-build adj matrix using remaining graph
adj_train = nx.adjacency_matrix(g)
# Convert edge-lists to numpy arrays
train_edges = np.array([list(edge_tuple) for edge_tuple in train_edges])
train_edges_false = np.array([list(edge_tuple) for edge_tuple in train_edges_false])
val_edges = np.array([list(edge_tuple) for edge_tuple in val_edges])
val_edges_false = np.array([list(edge_tuple) for edge_tuple in val_edges_false])
test_edges = np.array([list(edge_tuple) for edge_tuple in test_edges])
test_edges_false = np.array([list(edge_tuple) for edge_tuple in test_edges_false])
if verbose == True:
print('Done with train-test split!')
print('')
# NOTE: these edge lists only contain single direction of edge!
return adj_train, train_edges, train_edges_false, \
val_edges, val_edges_false, test_edges, test_edges_false
| [
"torch.from_numpy",
"numpy.array",
"scipy.sparse.isspmatrix_coo",
"networkx.from_dict_of_lists",
"torch.nn.Sigmoid",
"scipy.sparse.eye",
"numpy.sort",
"networkx.from_scipy_sparse_matrix",
"numpy.empty",
"numpy.vstack",
"scipy.sparse.coo_matrix",
"scipy.sparse.diags",
"numpy.isinf",
"numpy.... | [((347, 380), 'numpy.empty', 'np.empty', (['(nb_nodes, nb_features)'], {}), '((nb_nodes, nb_features))\n', (355, 380), True, 'import numpy as np\n'), ((781, 821), 'numpy.zeros', 'np.zeros', (['(nb_graphs, nb_nodes, ft_size)'], {}), '((nb_graphs, nb_nodes, ft_size))\n', (789, 821), True, 'import numpy as np\n'), ((838, 879), 'numpy.zeros', 'np.zeros', (['(nb_graphs, nb_nodes, nb_nodes)'], {}), '((nb_graphs, nb_nodes, nb_nodes))\n', (846, 879), True, 'import numpy as np\n'), ((893, 912), 'numpy.zeros', 'np.zeros', (['nb_graphs'], {}), '(nb_graphs)\n', (901, 912), True, 'import numpy as np\n'), ((925, 960), 'numpy.zeros', 'np.zeros', (['nb_graphs'], {'dtype': 'np.int32'}), '(nb_graphs, dtype=np.int32)\n', (933, 960), True, 'import numpy as np\n'), ((973, 1004), 'numpy.zeros', 'np.zeros', (['(nb_graphs, nb_nodes)'], {}), '((nb_graphs, nb_nodes))\n', (981, 1004), True, 'import numpy as np\n'), ((2353, 2372), 'numpy.empty', 'np.empty', (['adj.shape'], {}), '(adj.shape)\n', (2361, 2372), True, 'import numpy as np\n'), ((3090, 3101), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (3098, 3101), True, 'import numpy as np\n'), ((3131, 3160), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.bool'}), '(mask, dtype=np.bool)\n', (3139, 3160), True, 'import numpy as np\n'), ((3759, 3784), 'numpy.sort', 'np.sort', (['test_idx_reorder'], {}), '(test_idx_reorder)\n', (3766, 3784), True, 'import numpy as np\n'), ((4568, 4589), 'numpy.vstack', 'np.vstack', (['(ally, ty)'], {}), '((ally, ty))\n', (4577, 4589), True, 'import numpy as np\n'), ((6313, 6328), 'scipy.sparse.diags', 'sp.diags', (['r_inv'], {}), '(r_inv)\n', (6321, 6328), True, 'import scipy.sparse as sp\n'), ((6512, 6530), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (6525, 6530), True, 'import scipy.sparse as sp\n'), ((6678, 6698), 'scipy.sparse.diags', 'sp.diags', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (6686, 6698), True, 'import scipy.sparse as sp\n'), ((7294, 7326), 'torch.from_numpy', 'torch.from_numpy', (['sparse_mx.data'], {}), '(sparse_mx.data)\n', (7310, 7326), False, 'import torch\n'), ((7339, 7366), 'torch.Size', 'torch.Size', (['sparse_mx.shape'], {}), '(sparse_mx.shape)\n', (7349, 7366), False, 'import torch\n'), ((7378, 7426), 'torch.sparse.FloatTensor', 'torch.sparse.FloatTensor', (['indices', 'values', 'shape'], {}), '(indices, values, shape)\n', (7402, 7426), False, 'import torch\n'), ((8141, 8173), 'networkx.from_scipy_sparse_matrix', 'nx.from_scipy_sparse_matrix', (['adj'], {}), '(adj)\n', (8168, 8173), True, 'import networkx as nx\n'), ((8192, 8225), 'networkx.number_connected_components', 'nx.number_connected_components', (['g'], {}), '(g)\n', (8222, 8225), True, 'import networkx as nx\n'), ((8242, 8254), 'scipy.sparse.triu', 'sp.triu', (['adj'], {}), '(adj)\n', (8249, 8254), True, 'import scipy.sparse as sp\n'), ((9190, 9220), 'numpy.random.shuffle', 'np.random.shuffle', (['edge_tuples'], {}), '(edge_tuples)\n', (9207, 9220), True, 'import numpy as np\n'), ((13322, 13344), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['g'], {}), '(g)\n', (13341, 13344), True, 'import networkx as nx\n'), ((2420, 2440), 'numpy.eye', 'np.eye', (['adj.shape[1]'], {}), '(adj.shape[1])\n', (2426, 2440), True, 'import numpy as np\n'), ((4524, 4552), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graph'], {}), '(graph)\n', (4545, 4552), True, 'import networkx as nx\n'), ((6275, 6290), 'numpy.isinf', 'np.isinf', (['r_inv'], {}), '(r_inv)\n', (6283, 6290), True, 'import numpy as np\n'), ((6630, 6650), 'numpy.isinf', 'np.isinf', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (6638, 6650), True, 'import numpy as np\n'), ((8544, 8580), 'numpy.floor', 'np.floor', (['(edges.shape[0] * test_frac)'], {}), '(edges.shape[0] * test_frac)\n', (8552, 8580), True, 'import numpy as np\n'), ((8644, 8679), 'numpy.floor', 'np.floor', (['(edges.shape[0] * val_frac)'], {}), '(edges.shape[0] * val_frac)\n', (8652, 8679), True, 'import numpy as np\n'), ((10626, 10660), 'numpy.random.randint', 'np.random.randint', (['(0)', 'adj.shape[0]'], {}), '(0, adj.shape[0])\n', (10643, 10660), True, 'import numpy as np\n'), ((10677, 10711), 'numpy.random.randint', 'np.random.randint', (['(0)', 'adj.shape[0]'], {}), '(0, adj.shape[0])\n', (10694, 10711), True, 'import numpy as np\n'), ((11216, 11250), 'numpy.random.randint', 'np.random.randint', (['(0)', 'adj.shape[0]'], {}), '(0, adj.shape[0])\n', (11233, 11250), True, 'import numpy as np\n'), ((11267, 11301), 'numpy.random.randint', 'np.random.randint', (['(0)', 'adj.shape[0]'], {}), '(0, adj.shape[0])\n', (11284, 11301), True, 'import numpy as np\n'), ((11887, 11921), 'numpy.random.randint', 'np.random.randint', (['(0)', 'adj.shape[0]'], {}), '(0, adj.shape[0])\n', (11904, 11921), True, 'import numpy as np\n'), ((11938, 11972), 'numpy.random.randint', 'np.random.randint', (['(0)', 'adj.shape[0]'], {}), '(0, adj.shape[0])\n', (11955, 11972), True, 'import numpy as np\n'), ((1510, 1522), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1520, 1522), True, 'import torch.nn as nn\n'), ((4400, 4421), 'scipy.sparse.vstack', 'sp.vstack', (['(allx, tx)'], {}), '((allx, tx))\n', (4409, 4421), True, 'import scipy.sparse as sp\n'), ((5044, 5065), 'scipy.sparse.isspmatrix_coo', 'sp.isspmatrix_coo', (['mx'], {}), '(mx)\n', (5061, 5065), True, 'import scipy.sparse as sp\n'), ((6234, 6254), 'numpy.power', 'np.power', (['rowsum', '(-1)'], {}), '(rowsum, -1)\n', (6242, 6254), True, 'import numpy as np\n'), ((6582, 6604), 'numpy.power', 'np.power', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (6590, 6604), True, 'import numpy as np\n'), ((6947, 6967), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (6953, 6967), True, 'import scipy.sparse as sp\n'), ((10416, 10449), 'networkx.number_connected_components', 'nx.number_connected_components', (['g'], {}), '(g)\n', (10446, 10449), True, 'import networkx as nx\n'), ((1256, 1279), 'numpy.ones', 'np.ones', (['e_ind.shape[1]'], {}), '(e_ind.shape[1])\n', (1263, 1279), True, 'import numpy as np\n'), ((1704, 1733), 'torch.nonzero', 'torch.nonzero', (['(preds * labels)'], {}), '(preds * labels)\n', (1717, 1733), False, 'import torch\n'), ((1758, 1799), 'torch.nonzero', 'torch.nonzero', (['((preds - 1) * (labels - 1))'], {}), '((preds - 1) * (labels - 1))\n', (1771, 1799), False, 'import torch\n'), ((1824, 1859), 'torch.nonzero', 'torch.nonzero', (['(preds * (labels - 1))'], {}), '(preds * (labels - 1))\n', (1837, 1859), False, 'import torch\n'), ((1884, 1919), 'torch.nonzero', 'torch.nonzero', (['((preds - 1) * labels)'], {}), '((preds - 1) * labels)\n', (1897, 1919), False, 'import torch\n'), ((5916, 5935), 'numpy.array', 'np.array', (['(sigma > 0)'], {}), '(sigma > 0)\n', (5924, 5935), True, 'import numpy as np\n'), ((7221, 7262), 'numpy.vstack', 'np.vstack', (['(sparse_mx.row, sparse_mx.col)'], {}), '((sparse_mx.row, sparse_mx.col))\n', (7230, 7262), True, 'import numpy as np\n'), ((9499, 9532), 'networkx.number_connected_components', 'nx.number_connected_components', (['g'], {}), '(g)\n', (9529, 9532), True, 'import networkx as nx\n'), ((2519, 2539), 'numpy.eye', 'np.eye', (['adj.shape[1]'], {}), '(adj.shape[1])\n', (2525, 2539), True, 'import numpy as np\n'), ((3504, 3534), 'pickle.load', 'pkl.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (3512, 3534), True, 'import pickle as pkl\n'), ((3585, 3596), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (3593, 3596), True, 'import pickle as pkl\n'), ((5308, 5335), 'numpy.vstack', 'np.vstack', (['(mx.row, mx.col)'], {}), '((mx.row, mx.col))\n', (5317, 5335), True, 'import numpy as np\n'), ((5152, 5177), 'numpy.zeros', 'np.zeros', (['mx.row.shape[0]'], {}), '(mx.row.shape[0])\n', (5160, 5177), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Use the proper idiom in the main module ...
# NOTE: See https://docs.python.org/3.9/library/multiprocessing.html#multiprocessing-programming
if __name__ == "__main__":
# This is a test suite for “geo.buffer()” with:
# A) a polygon that span the whole numerical range;
# B) a polygon that cross the equator;
# C) a polygon that cross the anti-meridian;
# D) a polygon that cross a pole;
# E) a polygon that cross both the equator and the anti-meridian; and
# F) a polygon that cross both a pole and the anti-meridian.
# Each polygon has a plot with both a top-down projection and a Robinson
# projection so that you can check it, along with an equirectangular plot.
# Import special modules ...
try:
import cartopy
except:
raise Exception("\"cartopy\" is not installed; run \"pip install --user Cartopy\"") from None
try:
import geojson
except:
raise Exception("\"geojson\" is not installed; run \"pip install --user geojson\"") from None
try:
import matplotlib
matplotlib.use("Agg") # NOTE: See https://matplotlib.org/stable/gallery/user_interfaces/canvasagg.html
import matplotlib.pyplot
except:
raise Exception("\"matplotlib\" is not installed; run \"pip install --user matplotlib\"") from None
try:
import numpy
except:
raise Exception("\"numpy\" is not installed; run \"pip install --user numpy\"") from None
try:
import shapely
import shapely.geometry
except:
raise Exception("\"shapely\" is not installed; run \"pip install --user Shapely\"") from None
# Import my modules ...
try:
import pyguymer3
import pyguymer3.geo
import pyguymer3.image
except:
raise Exception("\"pyguymer3\" is not installed; you need to have the Python module from https://github.com/Guymer/PyGuymer3 located somewhere in your $PYTHONPATH") from None
print(f"Testing \"{pyguymer3.__path__[0]}\" ...")
# Define polygons ...
polys = [
(-180.0, +90.0, 1000000.0, 900000.0), # Satisfies test A, C, D, F
( -90.0, +45.0, 1000000.0, 900000.0), # Satisfies test A
( 0.0, 0.0, 1000000.0, 900000.0), # Satisfies test A, B
( +90.0, -45.0, 1000000.0, 900000.0), # Satisfies test A
(+180.0, -90.0, 1000000.0, 900000.0), # Satisfies test A, C, D, F
(+170.0, +10.0, 1000000.0, 4000000.0), # Satisfies test B, C, E
(+170.0, +80.0, 1000000.0, 4000000.0), # Satisfies test C, D, F
]
# Loop over polygons ...
for i, (lon, lat, dist1, dist2) in enumerate(polys):
# Determine file names ...
fname = f"buffer{i:d}.png"
jname = f"buffer{i:d}.geojson"
print(f" > Making \"{jname}\" and \"{fname}\" ...")
# Create figure ...
fg = matplotlib.pyplot.figure(figsize = (6, 6), dpi = 150)
# Create first subplot ...
ax1 = fg.add_subplot(2, 2, 1, projection = cartopy.crs.Robinson())
ax1.set_global()
pyguymer3.geo.add_map_background(ax1)
ax1.coastlines(resolution = "110m", color = "black", linewidth = 0.1)
# Create second subplot ...
ax2 = fg.add_subplot(2, 2, 2, projection = cartopy.crs.Orthographic(central_longitude = lon, central_latitude = lat))
ax2.set_global()
pyguymer3.geo.add_map_background(ax2)
ax2.coastlines(resolution = "110m", color = "black", linewidth = 0.1)
# Create third subplot ...
ax3 = fg.add_subplot(2, 2, (3, 4))
ax3.grid()
ax3.set_aspect("equal")
ax3.set_xlabel("Longitude [°]")
ax3.set_xlim(-180, +180)
ax3.set_xticks([-180, -135, -90, -45, 0, +45, +90, +135, +180])
ax3.set_ylabel("Latitude [°]")
ax3.set_ylim(-90, +90)
ax3.set_yticks([-90, -45, 0, +45, +90])
# Buffer Point and plot it thrice ...
buff0 = pyguymer3.geo.buffer(shapely.geometry.point.Point(lon, lat), dist1 + dist2, debug = True, nang = 361, simp = -1.0)
ax1.add_geometries([buff0], cartopy.crs.PlateCarree(), edgecolor = (1.0, 0.0, 0.0, 1.0), facecolor = "none", linewidth = 1.0)
ax2.add_geometries([buff0], cartopy.crs.PlateCarree(), edgecolor = (1.0, 0.0, 0.0, 1.0), facecolor = "none", linewidth = 1.0)
for poly in pyguymer3.geo.extract_polys(buff0):
coords = numpy.array(poly.exterior.coords)
ax3.plot(coords[:, 0], coords[:, 1], color = (1.0, 0.0, 0.0, 1.0))
del coords
# Clean up ...
del buff0
# Buffer Point and plot it twice ...
buff1 = pyguymer3.geo.buffer(shapely.geometry.point.Point(lon, lat), dist1, debug = True, nang = 361, simp = -1.0)
ax1.add_geometries([buff1], cartopy.crs.PlateCarree(), edgecolor = (0.0, 1.0, 0.0, 1.0), facecolor = "none", linewidth = 1.0)
ax2.add_geometries([buff1], cartopy.crs.PlateCarree(), edgecolor = (0.0, 1.0, 0.0, 1.0), facecolor = "none", linewidth = 1.0)
for poly in pyguymer3.geo.extract_polys(buff1):
coords = numpy.array(poly.exterior.coords)
ax3.plot(coords[:, 0], coords[:, 1], color = (0.0, 1.0, 0.0, 1.0))
del coords
# Buffer Polygon and plot it thrice ...
buff2 = pyguymer3.geo.buffer(buff1, dist2, debug = True, nang = 361, simp = -1.0)
ax1.add_geometries([buff2], cartopy.crs.PlateCarree(), edgecolor = (0.0, 0.0, 1.0, 1.0), facecolor = (0.0, 0.0, 1.0, 0.5), linewidth = 1.0)
ax2.add_geometries([buff2], cartopy.crs.PlateCarree(), edgecolor = (0.0, 0.0, 1.0, 1.0), facecolor = (0.0, 0.0, 1.0, 0.5), linewidth = 1.0)
for poly in pyguymer3.geo.extract_polys(buff2):
coords = numpy.array(poly.exterior.coords)
ax3.plot(coords[:, 0], coords[:, 1], color = (0.0, 0.0, 1.0, 1.0))
del coords
# Clean up ...
del buff1
# Save GeoJSON ...
geojson.dump(buff2, open(jname, "wt"), indent = 4, sort_keys = True)
# Clean up ...
del buff2
# Save figure ...
fg.suptitle(f"({lon:.1f},{lat:.1f}) buffered by {0.001 * dist1:,.1f}km & {0.001 * dist2:,.1f}km")
fg.savefig(fname, bbox_inches = "tight", dpi = 150, pad_inches = 0.1)
pyguymer3.image.optimize_image(fname, strip = True)
matplotlib.pyplot.close(fg)
| [
"pyguymer3.geo.extract_polys",
"matplotlib.use",
"cartopy.crs.Orthographic",
"cartopy.crs.PlateCarree",
"pyguymer3.geo.buffer",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"pyguymer3.image.optimize_image",
"pyguymer3.geo.add_map_background",
"cartopy.crs.Robinson",
"s... | [((1120, 1141), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1134, 1141), False, 'import matplotlib\n'), ((2958, 3007), 'matplotlib.pyplot.figure', 'matplotlib.pyplot.figure', ([], {'figsize': '(6, 6)', 'dpi': '(150)'}), '(figsize=(6, 6), dpi=150)\n', (2982, 3007), False, 'import matplotlib\n'), ((3156, 3193), 'pyguymer3.geo.add_map_background', 'pyguymer3.geo.add_map_background', (['ax1'], {}), '(ax1)\n', (3188, 3193), False, 'import pyguymer3\n'), ((3468, 3505), 'pyguymer3.geo.add_map_background', 'pyguymer3.geo.add_map_background', (['ax2'], {}), '(ax2)\n', (3500, 3505), False, 'import pyguymer3\n'), ((4443, 4477), 'pyguymer3.geo.extract_polys', 'pyguymer3.geo.extract_polys', (['buff0'], {}), '(buff0)\n', (4470, 4477), False, 'import pyguymer3\n'), ((5135, 5169), 'pyguymer3.geo.extract_polys', 'pyguymer3.geo.extract_polys', (['buff1'], {}), '(buff1)\n', (5162, 5169), False, 'import pyguymer3\n'), ((5393, 5460), 'pyguymer3.geo.buffer', 'pyguymer3.geo.buffer', (['buff1', 'dist2'], {'debug': '(True)', 'nang': '(361)', 'simp': '(-1.0)'}), '(buff1, dist2, debug=True, nang=361, simp=-1.0)\n', (5413, 5460), False, 'import pyguymer3\n'), ((5783, 5817), 'pyguymer3.geo.extract_polys', 'pyguymer3.geo.extract_polys', (['buff2'], {}), '(buff2)\n', (5810, 5817), False, 'import pyguymer3\n'), ((6384, 6433), 'pyguymer3.image.optimize_image', 'pyguymer3.image.optimize_image', (['fname'], {'strip': '(True)'}), '(fname, strip=True)\n', (6414, 6433), False, 'import pyguymer3\n'), ((6444, 6471), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', (['fg'], {}), '(fg)\n', (6467, 6471), False, 'import matplotlib\n'), ((4061, 4099), 'shapely.geometry.point.Point', 'shapely.geometry.point.Point', (['lon', 'lat'], {}), '(lon, lat)\n', (4089, 4099), False, 'import shapely\n'), ((4191, 4216), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (4214, 4216), False, 'import cartopy\n'), ((4325, 4350), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (4348, 4350), False, 'import cartopy\n'), ((4500, 4533), 'numpy.array', 'numpy.array', (['poly.exterior.coords'], {}), '(poly.exterior.coords)\n', (4511, 4533), False, 'import numpy\n'), ((4761, 4799), 'shapely.geometry.point.Point', 'shapely.geometry.point.Point', (['lon', 'lat'], {}), '(lon, lat)\n', (4789, 4799), False, 'import shapely\n'), ((4883, 4908), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (4906, 4908), False, 'import cartopy\n'), ((5017, 5042), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (5040, 5042), False, 'import cartopy\n'), ((5192, 5225), 'numpy.array', 'numpy.array', (['poly.exterior.coords'], {}), '(poly.exterior.coords)\n', (5203, 5225), False, 'import numpy\n'), ((5503, 5528), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (5526, 5528), False, 'import cartopy\n'), ((5651, 5676), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (5674, 5676), False, 'import cartopy\n'), ((5840, 5873), 'numpy.array', 'numpy.array', (['poly.exterior.coords'], {}), '(poly.exterior.coords)\n', (5851, 5873), False, 'import numpy\n'), ((3099, 3121), 'cartopy.crs.Robinson', 'cartopy.crs.Robinson', ([], {}), '()\n', (3119, 3121), False, 'import cartopy\n'), ((3360, 3429), 'cartopy.crs.Orthographic', 'cartopy.crs.Orthographic', ([], {'central_longitude': 'lon', 'central_latitude': 'lat'}), '(central_longitude=lon, central_latitude=lat)\n', (3384, 3429), False, 'import cartopy\n')] |
import numpy as np
from .. import event
from ..helpers import angle
from ..world import PoseStep
from .actor import Actor
class Odometer(Actor):
def __init__(self):
super().__init__()
self.last_time: float = None
self.steps: list[PoseStep] = []
self.flips: int = 0
self.flip_detection_initialized: bool = False
event.register(event.Id.NEW_MACHINE_DATA, self.handle_velocity)
def handle_velocity(self):
if not self.world.robot.odometry:
return
while self.world.robot.odometry:
velocity = self.world.robot.odometry.pop(0)
if self.last_time is None:
self.last_time = velocity.time
continue
dt = velocity.time - self.last_time
self.last_time = velocity.time
step = PoseStep(linear=dt*velocity.linear, angular=dt*velocity.angular, time=self.world.time)
self.steps.append(step)
self.world.robot.prediction += step
self.world.robot.simulation += step
self.world.robot.current_velocity = velocity
if step.linear or step.angular:
self.world.robot.last_movement = step.time
event.emit(event.Id.ROBOT_MOVED)
self.prune_steps(self.world.time - 10.0)
def handle_detection(self):
if self.world.robot.detection is None or not any(self.steps) or self.world.robot.detection.time < self.steps[0].time:
return
while self.steps[0].time < self.world.robot.detection.time:
del self.steps[0]
# NOTE: attempt to avoid 180-degree flips due to swapped marker points
if self.flip_detection_initialized:
dYaw = sum(step.angular for step in self.steps)
if abs(angle(self.world.robot.prediction.yaw - dYaw, self.world.robot.detection.yaw)) > np.deg2rad(90):
self.flips += 1
for image in self.world.images:
if image.time == self.world.robot.detection.time:
self.log.warning(f'adding {image.id} to upload queue because our position has flipped')
self.world.upload.mark(image)
if self.flips < 3:
self.log.warn('Avoiding flip')
return
else:
self.flips = 0
self.world.robot.prediction = self.world.robot.detection.copy(deep=True)
for step in self.steps:
self.world.robot.prediction += step
self.flip_detection_initialized = True
def prune_steps(self, cut_off_time: float):
while any(self.steps) and self.steps[0].time < cut_off_time:
del self.steps[0]
| [
"numpy.deg2rad"
] | [((1886, 1900), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (1896, 1900), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
pysteps.motion.vet
==================
Variational Echo Tracking (VET) Module
This module implements the VET algorithm presented
by `<NAME> Zawadzki (1995)`_ and used in the
McGill Algorithm for Prediction by Lagrangian Extrapolation (MAPLE) described
in `<NAME> (2002)`_.
.. _`<NAME> (1995)`:\
http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2
.. _`<NAME> (2002)`:\
http://dx.doi.org/10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2
The morphing and the cost functions are implemented in Cython and parallelized
for performance.
.. currentmodule:: pysteps.motion.vet
.. autosummary::
:toctree: ../generated/
vet
vet_cost_function
vet_cost_function_gradient
morph
round_int
ceil_int
get_padding
"""
import numpy
from numpy.ma.core import MaskedArray
from scipy.ndimage.interpolation import zoom
from scipy.optimize import minimize
from pysteps.decorators import check_input_frames
from pysteps.motion._vet import _warp, _cost_function
def round_int(scalar):
"""
Round number to nearest integer. Returns and integer value.
"""
return int(numpy.round(scalar))
def ceil_int(scalar):
"""
Round number to nearest integer. Returns and integer value.
"""
return int(numpy.ceil(scalar))
def get_padding(dimension_size, sectors):
"""
Get the padding at each side of the one dimensions of the image
so the new image dimensions are divided evenly in the
number of *sectors* specified.
Parameters
----------
dimension_size : int
Actual dimension size.
sectors : int
number of sectors over which the the image will be divided.
Returns
-------
pad_before , pad_after: int, int
Padding at each side of the image for the corresponding dimension.
"""
reminder = dimension_size % sectors
if reminder != 0:
pad = sectors - reminder
pad_before = pad // 2
if pad % 2 == 0:
pad_after = pad_before
else:
pad_after = pad_before + 1
return pad_before, pad_after
return 0, 0
def morph(image, displacement, gradient=False):
"""
Morph image by applying a displacement field (Warping).
The new image is created by selecting for each position the values of the
input image at the positions given by the x and y displacements.
The routine works in a backward sense.
The displacement vectors have to refer to their destination.
For more information in Morphing functions see Section 3 in
`Beezley and Mandel (2008)`_.
<NAME>., & <NAME>. (2008).
Morphing ensemble Kalman filters. Tellus A, 60(1), 131-140.
.. _`<NAME> (2008)`: http://dx.doi.org/10.1111/\
j.1600-0870.2007.00275.x
The displacement field in x and y directions and the image must have the
same dimensions.
The morphing is executed in parallel over x axis.
The value of displaced pixels that fall outside the limits takes the
value of the nearest edge. Those pixels are indicated by values greater
than 1 in the output mask.
Parameters
----------
image : ndarray (ndim = 2)
Image to morph
displacement : ndarray (ndim = 3)
Displacement field to be applied (Warping). The first dimension
corresponds to the coordinate to displace.
The dimensions are: displacement [ i/x (0) or j/y (1) ,
i index of pixel, j index of pixel ]
gradient : bool, optional
If True, the gradient of the morphing function is returned.
Returns
-------
image : ndarray (float64 ,ndim = 2)
Morphed image.
mask : ndarray (int8 ,ndim = 2)
Invalid values mask. Points outside the boundaries are masked.
Values greater than 1, indicate masked values.
gradient_values : ndarray (float64 ,ndim = 3), optional
If gradient keyword is True, the gradient of the function is also
returned.
"""
if not isinstance(image, MaskedArray):
_mask = numpy.zeros_like(image, dtype='int8')
else:
_mask = numpy.asarray(numpy.ma.getmaskarray(image),
dtype='int8',
order='C')
_image = numpy.asarray(image, dtype='float64', order='C')
_displacement = numpy.asarray(displacement, dtype='float64', order='C')
return _warp(_image, _mask, _displacement, gradient=gradient)
def vet_cost_function_gradient(*args, **kwargs):
"""Compute the vet cost function gradient.
See :py:func:`vet_cost_function` for more information.
"""
kwargs["gradient"] = True
return vet_cost_function(*args, **kwargs)
def vet_cost_function(sector_displacement_1d,
input_images,
blocks_shape,
mask,
smooth_gain,
debug=False,
gradient=False):
"""
.. _`scipy minimization`: \
https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.optimize.minimize.html
Variational Echo Tracking Cost Function.
This function is designed to be used with the `scipy minimization`_.
The function first argument is the variable to be used in the
minimization procedure.
The sector displacement must be a flat array compatible with the
dimensions of the input image and sectors shape (see parameters section
below for more details).
.. _ndarray:\
https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
Parameters
----------
sector_displacement_1d : ndarray_
Array of displacements to apply to each sector. The dimensions are:
sector_displacement_2d
[ x (0) or y (1) displacement, i index of sector, j index of sector ].
The shape of the sector displacements must be compatible with the
input image and the block shape.
The shape should be (2, mx, my) where mx and my are the numbers of
sectors in the x and the y dimension.
input_images : ndarray_
Input images, sequence of 2D arrays, or 3D arrays.
The first dimension represents the images time dimension.
The template_image (first element in first dimensions) denotes the
reference image used to obtain the displacement (2D array).
The second is the target image.
The expected dimensions are (2,nx,ny).
Be aware the the 2D images dimensions correspond to (lon,lat) or (x,y).
blocks_shape : ndarray_ (ndim=2)
Number of sectors in each dimension (x and y).
blocks_shape.shape = (mx,my)
mask : ndarray_ (ndim=2)
Data mask. If is True, the data is marked as not valid and is not
used in the computations.
smooth_gain : float
Smoothness constrain gain
debug : bool, optional
If True, print debugging information.
gradient : bool, optional
If True, the gradient of the morphing function is returned.
Returns
-------
penalty or gradient values.
penalty : float
Value of the cost function
gradient_values : ndarray (float64 ,ndim = 3), optional
If gradient keyword is True, the gradient of the function is also
returned.
"""
sector_displacement_2d = \
sector_displacement_1d.reshape(*((2,) + tuple(blocks_shape)))
if input_images.shape[0] == 3:
three_times = True
previous_image = input_images[0]
center_image = input_images[1]
next_image = input_images[2]
else:
previous_image = None
center_image = input_images[0]
next_image = input_images[1]
three_times = False
if gradient:
gradient_values = _cost_function(sector_displacement_2d,
center_image,
next_image,
mask,
smooth_gain,
gradient=True)
if three_times:
gradient_values += _cost_function(sector_displacement_2d,
previous_image,
center_image,
mask,
smooth_gain,
gradient=True)
return gradient_values.ravel()
else:
residuals, smoothness_penalty = _cost_function(sector_displacement_2d,
center_image,
next_image,
mask,
smooth_gain,
gradient=False)
if three_times:
_residuals, _smoothness = _cost_function(sector_displacement_2d,
previous_image,
center_image,
mask,
smooth_gain,
gradient=False)
residuals += _residuals
smoothness_penalty += _smoothness
if debug:
print("\nresiduals", residuals)
print("smoothness_penalty", smoothness_penalty)
return residuals + smoothness_penalty
@check_input_frames(2, 3)
def vet(input_images,
sectors=((32, 16, 4, 2), (32, 16, 4, 2)),
smooth_gain=1e6,
first_guess=None,
intermediate_steps=False,
verbose=True,
indexing='yx',
padding=0,
options=None):
"""
Variational Echo Tracking Algorithm presented in
`Laroche and Zawadzki (1995)`_ and used in the McGill Algorithm for
Prediction by Lagrangian Extrapolation (MAPLE) described in
`Germann and Zawadzki (2002)`_.
.. _`Laroche and Zawadzki (1995)`:\
http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2
.. _`Germann and Zawadzki (2002)`:\
http://dx.doi.org/10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2
This algorithm computes the displacement field between two images
( the input_image with respect to the template image).
The displacement is sought by minimizing the sum of the residuals of the
squared differences of the images pixels and the contribution of a
smoothness constraint.
In the case that a MaskedArray is used as input, the residuals term in
the cost function is only computed over areas with non-masked values.
Otherwise, it is computed over the entire domain.
To find the minimum, a scaling guess procedure is applied,
from larger to smaller scales.
This reduces the chances that the minimization procedure
converges to a local minimum.
The first scaling guess is defined by the scaling sectors keyword.
The smoothness of the returned displacement field is controlled by the
smoothness constraint gain (**smooth_gain** keyword).
If a first guess is not given, zero displacements are used as the first
guess.
The cost function is minimized using the `scipy minimization`_ function,
with the 'CG' method by default.
This method proved to give the best results under many different conditions
and is the most similar one to the original VET implementation in
`Laroche and Zawadzki (1995)`_.
The method CG uses a nonlinear conjugate gradient algorithm by Polak and
Ribiere, a variant of the Fletcher-Reeves method described in
Nocedal and Wright (2006), pp. 120-122.
.. _`scipy minimization`: \
https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.optimize.minimize.html
.. _MaskedArray: https://docs.scipy.org/doc/numpy/reference/\
maskedarray.baseclass.html#numpy.ma.MaskedArray
.. _ndarray:\
https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
Parameters
----------
input_images : ndarray_ or MaskedArray
Input images, sequence of 2D arrays, or 3D arrays.
The first dimension represents the images time dimension.
The template_image (first element in first dimensions) denotes the
reference image used to obtain the displacement (2D array).
The second is the target image.
The expected dimensions are (2,ni,nj).
sectors : list or array, optional
Number of sectors on each dimension used in the scaling procedure.
If dimension is 1, the same sectors will be used both image dimensions
(x and y). If **sectors** is a 1D array, the same number of sectors
is used in both dimensions.
smooth_gain : float, optional
Smooth gain factor
first_guess : ndarray_, optional
The shape of the first guess should have the same shape as the initial
sectors shapes used in the scaling procedure.
If first_guess is not present zeros are used as first guess.
E.g.:
If the first sector shape in the scaling procedure is (ni,nj), then
the first_guess should have (2, ni, nj ) shape.
intermediate_steps : bool, optional
If True, also return a list with the first guesses obtained during the
scaling procedure. False, by default.
verbose : bool, optional
Verbosity enabled if True (default).
indexing : str, optional
Input indexing order.'ij' and 'xy' indicates that the
dimensions of the input are (time, longitude, latitude), while
'yx' indicates (time, latitude, longitude).
The displacement field dimensions are ordered accordingly in a way that
the first dimension indicates the displacement along x (0) or y (1).
That is, UV displacements are always returned.
padding : int
Padding width in grid points. A border is added to the input array
to reduce the effects of the minimization at the border.
options : dict, optional
A dictionary of solver options.
See `scipy minimization`_ function for more details.
Returns
-------
displacement_field : ndarray_
Displacement Field (2D array representing the transformation) that
warps the template image into the input image.
The dimensions are (2,ni,nj), where the first
dimension indicates the displacement along x (0) or y (1) in units of
pixels / timestep as given by the input_images array.
intermediate_steps : list of ndarray_
List with the first guesses obtained during the scaling procedure.
References
----------
Lar<NAME>., and <NAME>, 1995:
Retrievals of horizontal winds from single-Doppler clear-air data by
methods of cross-correlation and variational analysis.
J. Atmos. Oceanic Technol., 12, 721–738.
doi: http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2
<NAME>. and <NAME>, 2002:
Scale-Dependence of the Predictability of Precipitation from Continental
Radar Images. Part I: Description of the Methodology.
Mon. Wea. Rev., 130, 2859–2873,
doi: 10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2.
<NAME>, and <NAME>. 2006. Numerical Optimization. Springer New York.
"""
if verbose:
def debug_print(*args, **kwargs):
print(*args, **kwargs)
else:
def debug_print(*args, **kwargs):
del args
del kwargs
if options is None:
options = dict()
else:
options = dict(options)
options.setdefault('eps', 0.1)
options.setdefault('gtol', 0.1)
options.setdefault('maxiter', 100)
options.setdefault('disp', False)
optimization_method = options.pop("method", "CG")
# Set to None to suppress pylint warning.
pad_i = None
pad_j = None
sectors_in_i = None
sectors_in_j = None
debug_print("Running VET algorithm")
valid_indexing = ['yx', 'xy', 'ij']
if indexing not in valid_indexing:
raise ValueError("Invalid indexing values: {0}\n".format(indexing)
+ "Supported values: {0}".format(str(valid_indexing)))
# Get mask
if isinstance(input_images, MaskedArray):
mask = numpy.ma.getmaskarray(input_images)
else:
# Mask invalid data
if padding > 0:
padding_tuple = ((0, 0), (padding, padding), (padding, padding))
input_images = numpy.pad(input_images,
padding_tuple,
'constant',
constant_values=numpy.nan)
input_images = numpy.ma.masked_invalid(input_images)
mask = numpy.ma.getmaskarray(input_images)
input_images.data[mask] = 0 # Remove any Nan from the raw data
# Create a 2D mask with the right data type for _vet
mask = numpy.asarray(numpy.any(mask, axis=0), dtype='int8', order='C')
input_images = numpy.asarray(input_images.data, dtype='float64', order='C')
# Check that the sectors divide the domain
sectors = numpy.asarray(sectors, dtype="int", order='C')
if sectors.ndim == 1:
new_sectors = (numpy.zeros((2,) + sectors.shape, dtype='int', order='C')
+ sectors.reshape((1, sectors.shape[0]))
)
sectors = new_sectors
elif sectors.ndim > 2 or sectors.ndim < 1:
raise ValueError("Incorrect sectors dimensions.\n"
+ "Only 1D or 2D arrays are supported to define"
+ "the number of sectors used in"
+ "the scaling procedure")
# Sort sectors in descending order
sectors[0, :].sort()
sectors[1, :].sort()
# Prepare first guest
first_guess_shape = (2, int(sectors[0, 0]), int(sectors[1, 0]))
if first_guess is None:
first_guess = numpy.zeros(first_guess_shape, order='C')
else:
if first_guess.shape != first_guess_shape:
raise ValueError(
"The shape of the initial guess do not match the number of "
+ "sectors of the first scaling guess\n"
+ "first_guess.shape={}\n".format(str(first_guess.shape))
+ "Expected shape={}".format(str(first_guess_shape)))
else:
first_guess = numpy.asarray(first_guess, order='C', dtype='float64')
scaling_guesses = list()
previous_sectors_in_i = sectors[0, 0]
previous_sectors_in_j = sectors[1, 0]
for n, (sectors_in_i, sectors_in_j) in enumerate(zip(sectors[0, :],
sectors[1, :])):
# Minimize for each sector size
pad_i = get_padding(input_images.shape[1], sectors_in_i)
pad_j = get_padding(input_images.shape[2], sectors_in_j)
if (pad_i != (0, 0)) or (pad_j != (0, 0)):
_input_images = numpy.pad(input_images, ((0, 0), pad_i, pad_j),
'edge')
_mask = numpy.pad(mask, (pad_i, pad_j),
'constant',
constant_values=1)
_mask = numpy.ascontiguousarray(_mask)
if first_guess is None:
first_guess = numpy.pad(first_guess,
((0, 0), pad_i, pad_j),
'edge')
first_guess = numpy.ascontiguousarray(first_guess)
else:
_input_images = input_images
_mask = mask
sector_shape = (_input_images.shape[1] // sectors_in_i,
_input_images.shape[2] // sectors_in_j)
debug_print("original image shape: " + str(input_images.shape))
debug_print("padded image shape: " + str(_input_images.shape))
debug_print("padded template_image image shape: "
+ str(_input_images.shape))
debug_print("\nNumber of sectors: {0:d},{1:d}".format(sectors_in_i,
sectors_in_j))
debug_print("Sector Shape:", sector_shape)
if n > 0:
first_guess = zoom(first_guess,
(1,
sectors_in_i / previous_sectors_in_i,
sectors_in_j / previous_sectors_in_j),
order=1, mode='nearest')
debug_print("Minimizing")
result = minimize(vet_cost_function,
first_guess.flatten(),
jac=vet_cost_function_gradient,
args=(_input_images,
(sectors_in_i, sectors_in_j),
_mask,
smooth_gain),
method=optimization_method,
options=options)
first_guess = result.x.reshape(*first_guess.shape)
if verbose:
vet_cost_function(result.x,
_input_images,
(sectors_in_i, sectors_in_j),
_mask,
smooth_gain,
debug=True)
if indexing == 'yx':
scaling_guesses.append(first_guess[::-1, ...])
else:
scaling_guesses.append(first_guess)
previous_sectors_in_i = sectors_in_i
previous_sectors_in_j = sectors_in_j
first_guess = zoom(first_guess,
(1,
_input_images.shape[1] / sectors_in_i,
_input_images.shape[2] / sectors_in_j),
order=1, mode='nearest')
first_guess = numpy.ascontiguousarray(first_guess)
# Remove the extra padding if any
ni = _input_images.shape[1]
nj = _input_images.shape[2]
first_guess = first_guess[:, pad_i[0]:ni - pad_i[1], pad_j[0]:nj - pad_j[1]]
if indexing == 'yx':
first_guess = first_guess[::-1, ...]
if padding > 0:
first_guess = first_guess[:, padding:-padding, padding:-padding]
if intermediate_steps:
return first_guess, scaling_guesses
return first_guess
| [
"numpy.ceil",
"numpy.ma.getmaskarray",
"pysteps.motion._vet._cost_function",
"pysteps.decorators.check_input_frames",
"numpy.asarray",
"numpy.zeros_like",
"numpy.any",
"numpy.ascontiguousarray",
"numpy.zeros",
"scipy.ndimage.interpolation.zoom",
"numpy.ma.masked_invalid",
"numpy.pad",
"pyste... | [((9630, 9654), 'pysteps.decorators.check_input_frames', 'check_input_frames', (['(2)', '(3)'], {}), '(2, 3)\n', (9648, 9654), False, 'from pysteps.decorators import check_input_frames\n'), ((4264, 4312), 'numpy.asarray', 'numpy.asarray', (['image'], {'dtype': '"""float64"""', 'order': '"""C"""'}), "(image, dtype='float64', order='C')\n", (4277, 4312), False, 'import numpy\n'), ((4333, 4388), 'numpy.asarray', 'numpy.asarray', (['displacement'], {'dtype': '"""float64"""', 'order': '"""C"""'}), "(displacement, dtype='float64', order='C')\n", (4346, 4388), False, 'import numpy\n'), ((4401, 4455), 'pysteps.motion._vet._warp', '_warp', (['_image', '_mask', '_displacement'], {'gradient': 'gradient'}), '(_image, _mask, _displacement, gradient=gradient)\n', (4406, 4455), False, 'from pysteps.motion._vet import _warp, _cost_function\n'), ((17204, 17264), 'numpy.asarray', 'numpy.asarray', (['input_images.data'], {'dtype': '"""float64"""', 'order': '"""C"""'}), "(input_images.data, dtype='float64', order='C')\n", (17217, 17264), False, 'import numpy\n'), ((17327, 17373), 'numpy.asarray', 'numpy.asarray', (['sectors'], {'dtype': '"""int"""', 'order': '"""C"""'}), "(sectors, dtype='int', order='C')\n", (17340, 17373), False, 'import numpy\n'), ((21754, 21884), 'scipy.ndimage.interpolation.zoom', 'zoom', (['first_guess', '(1, _input_images.shape[1] / sectors_in_i, _input_images.shape[2] /\n sectors_in_j)'], {'order': '(1)', 'mode': '"""nearest"""'}), "(first_guess, (1, _input_images.shape[1] / sectors_in_i, _input_images.\n shape[2] / sectors_in_j), order=1, mode='nearest')\n", (21758, 21884), False, 'from scipy.ndimage.interpolation import zoom\n'), ((21993, 22029), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['first_guess'], {}), '(first_guess)\n', (22016, 22029), False, 'import numpy\n'), ((1148, 1167), 'numpy.round', 'numpy.round', (['scalar'], {}), '(scalar)\n', (1159, 1167), False, 'import numpy\n'), ((1288, 1306), 'numpy.ceil', 'numpy.ceil', (['scalar'], {}), '(scalar)\n', (1298, 1306), False, 'import numpy\n'), ((4057, 4094), 'numpy.zeros_like', 'numpy.zeros_like', (['image'], {'dtype': '"""int8"""'}), "(image, dtype='int8')\n", (4073, 4094), False, 'import numpy\n'), ((7784, 7886), 'pysteps.motion._vet._cost_function', '_cost_function', (['sector_displacement_2d', 'center_image', 'next_image', 'mask', 'smooth_gain'], {'gradient': '(True)'}), '(sector_displacement_2d, center_image, next_image, mask,\n smooth_gain, gradient=True)\n', (7798, 7886), False, 'from pysteps.motion._vet import _warp, _cost_function\n'), ((8567, 8670), 'pysteps.motion._vet._cost_function', '_cost_function', (['sector_displacement_2d', 'center_image', 'next_image', 'mask', 'smooth_gain'], {'gradient': '(False)'}), '(sector_displacement_2d, center_image, next_image, mask,\n smooth_gain, gradient=False)\n', (8581, 8670), False, 'from pysteps.motion._vet import _warp, _cost_function\n'), ((16477, 16512), 'numpy.ma.getmaskarray', 'numpy.ma.getmaskarray', (['input_images'], {}), '(input_images)\n', (16498, 16512), False, 'import numpy\n'), ((16893, 16930), 'numpy.ma.masked_invalid', 'numpy.ma.masked_invalid', (['input_images'], {}), '(input_images)\n', (16916, 16930), False, 'import numpy\n'), ((16946, 16981), 'numpy.ma.getmaskarray', 'numpy.ma.getmaskarray', (['input_images'], {}), '(input_images)\n', (16967, 16981), False, 'import numpy\n'), ((17134, 17157), 'numpy.any', 'numpy.any', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (17143, 17157), False, 'import numpy\n'), ((18129, 18170), 'numpy.zeros', 'numpy.zeros', (['first_guess_shape'], {'order': '"""C"""'}), "(first_guess_shape, order='C')\n", (18140, 18170), False, 'import numpy\n'), ((4135, 4163), 'numpy.ma.getmaskarray', 'numpy.ma.getmaskarray', (['image'], {}), '(image)\n', (4156, 4163), False, 'import numpy\n'), ((8143, 8249), 'pysteps.motion._vet._cost_function', '_cost_function', (['sector_displacement_2d', 'previous_image', 'center_image', 'mask', 'smooth_gain'], {'gradient': '(True)'}), '(sector_displacement_2d, previous_image, center_image, mask,\n smooth_gain, gradient=True)\n', (8157, 8249), False, 'from pysteps.motion._vet import _warp, _cost_function\n'), ((9005, 9112), 'pysteps.motion._vet._cost_function', '_cost_function', (['sector_displacement_2d', 'previous_image', 'center_image', 'mask', 'smooth_gain'], {'gradient': '(False)'}), '(sector_displacement_2d, previous_image, center_image, mask,\n smooth_gain, gradient=False)\n', (9019, 9112), False, 'from pysteps.motion._vet import _warp, _cost_function\n'), ((16680, 16757), 'numpy.pad', 'numpy.pad', (['input_images', 'padding_tuple', '"""constant"""'], {'constant_values': 'numpy.nan'}), "(input_images, padding_tuple, 'constant', constant_values=numpy.nan)\n", (16689, 16757), False, 'import numpy\n'), ((17425, 17482), 'numpy.zeros', 'numpy.zeros', (['((2,) + sectors.shape)'], {'dtype': '"""int"""', 'order': '"""C"""'}), "((2,) + sectors.shape, dtype='int', order='C')\n", (17436, 17482), False, 'import numpy\n'), ((18580, 18634), 'numpy.asarray', 'numpy.asarray', (['first_guess'], {'order': '"""C"""', 'dtype': '"""float64"""'}), "(first_guess, order='C', dtype='float64')\n", (18593, 18634), False, 'import numpy\n'), ((19149, 19204), 'numpy.pad', 'numpy.pad', (['input_images', '((0, 0), pad_i, pad_j)', '"""edge"""'], {}), "(input_images, ((0, 0), pad_i, pad_j), 'edge')\n", (19158, 19204), False, 'import numpy\n'), ((19264, 19326), 'numpy.pad', 'numpy.pad', (['mask', '(pad_i, pad_j)', '"""constant"""'], {'constant_values': '(1)'}), "(mask, (pad_i, pad_j), 'constant', constant_values=1)\n", (19273, 19326), False, 'import numpy\n'), ((19407, 19437), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['_mask'], {}), '(_mask)\n', (19430, 19437), False, 'import numpy\n'), ((20417, 20544), 'scipy.ndimage.interpolation.zoom', 'zoom', (['first_guess', '(1, sectors_in_i / previous_sectors_in_i, sectors_in_j / previous_sectors_in_j)'], {'order': '(1)', 'mode': '"""nearest"""'}), "(first_guess, (1, sectors_in_i / previous_sectors_in_i, sectors_in_j /\n previous_sectors_in_j), order=1, mode='nearest')\n", (20421, 20544), False, 'from scipy.ndimage.interpolation import zoom\n'), ((19504, 19558), 'numpy.pad', 'numpy.pad', (['first_guess', '((0, 0), pad_i, pad_j)', '"""edge"""'], {}), "(first_guess, ((0, 0), pad_i, pad_j), 'edge')\n", (19513, 19558), False, 'import numpy\n'), ((19669, 19705), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['first_guess'], {}), '(first_guess)\n', (19692, 19705), False, 'import numpy\n')] |
"""
Sampling in 1D
===============================
We discuss the performances of several Monte Carlo samplers on a toy 1D example.
"""
######################
# Introduction
# -------------------
#
# First of all, some standard imports.
import numpy as np
import torch
from matplotlib import pyplot as plt
plt.rcParams.update({"figure.max_open_warning": 0})
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
################################
# Our sampling space:
from monaco.euclidean import EuclideanSpace
D = 1
space = EuclideanSpace(dimension=D, dtype=dtype)
#######################################
# Our toy target distribution:
from monaco.euclidean import GaussianMixture, UnitPotential
N, M = (10000 if use_cuda else 50), 5
Nlucky = 100 if use_cuda else 2
nruns = 5
test_case = "sophia"
if test_case == "gaussians":
# Let's generate a blend of peaky Gaussians, in the unit square:
m = torch.rand(M, D).type(dtype) # mean
s = torch.rand(M).type(dtype) # deviation
w = torch.rand(M).type(dtype) # weights
m = 0.25 + 0.5 * m
s = 0.005 + 0.1 * (s ** 6)
w = w / w.sum() # normalize weights
distribution = GaussianMixture(space, m, s, w)
elif test_case == "sophia":
m = torch.FloatTensor([0.5, 0.1, 0.2, 0.8, 0.9]).type(dtype)[:, None]
s = torch.FloatTensor([0.15, 0.005, 0.002, 0.002, 0.005]).type(dtype)
w = torch.FloatTensor([0.1, 2 / 12, 1 / 12, 1 / 12, 2 / 12]).type(dtype)
w = w / w.sum() # normalize weights
distribution = GaussianMixture(space, m, s, w)
elif test_case == "ackley":
def ackley_potential(x, stripes=15):
f_1 = 20 * (-0.2 * (((x - 0.5) * stripes) ** 2).mean(-1).sqrt()).exp()
f_2 = ((2 * np.pi * ((x - 0.5) * stripes)).cos().mean(-1)).exp()
return -(f_1 + f_2 - np.exp(1) - 20) / stripes
distribution = UnitPotential(space, ackley_potential)
#############################
# Display the target density, with a typical sample.
plt.figure(figsize=(8, 8))
space.scatter(distribution.sample(N), "red")
space.plot(distribution.potential, "red")
space.draw_frame()
#################################################
# Sampling
# ---------------------
#
# We start from a relatively bad start, albeit with 1 / 100 of lucky samples
# on of the modes of the target distribution.
start = 0.05 + 0.1 * torch.rand(N, D).type(dtype)
start[:Nlucky] = 0.9 + 0.01 * torch.rand(Nlucky, D).type(dtype)
#########################################
# For exploration, we generate a fraction of our samples
# using a simple uniform distribution.
from monaco.euclidean import UniformProposal
exploration = .05
exploration_proposal = UniformProposal(space)
#######################################
# Our proposal will stay the same throughout the experiments:
# a combination of uniform samples on balls with radii that
# range from 1/1000 to 0.3.
from monaco.euclidean import BallProposal
proposal = BallProposal(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3],
exploration=exploration, exploration_proposal=exploration_proposal)
##########################################
# First of all, we illustrate a run of the standard
# Metropolis-Hastings algorithm, parallelized on the GPU:
info = {}
from monaco.samplers import ParallelMetropolisHastings, display_samples
pmh_sampler = ParallelMetropolisHastings(space, start, proposal, annealing=5).fit(
distribution
)
info["PMH"] = display_samples(pmh_sampler, iterations=20, runs=nruns)
########################################
# Then, the standard Collective Monte Carlo method:
from monaco.samplers import CMC
cmc_sampler = CMC(space, start, proposal, annealing=5).fit(distribution)
info["CMC"] = display_samples(cmc_sampler, iterations=20, runs=nruns)
########################################
# BGK - Collective Monte Carlo method:
from monaco.samplers import Ada_CMC
from monaco.euclidean import GaussianProposal
gaussian_proposal = GaussianProposal(space, scale=[0.1],
exploration=exploration, exploration_proposal=exploration_proposal)
bgk_sampler = Ada_CMC(space, start, gaussian_proposal, annealing=5).fit(distribution)
info["BGK_CMC"] = display_samples(bgk_sampler, iterations=20, runs=1)
########################################
# GMM - Collective Monte Carlo method:
from monaco.euclidean import GMMProposal
gmm_proposal = GMMProposal(space, n_classes = 100,
exploration=exploration, exploration_proposal=exploration_proposal)
gmm_sampler = Ada_CMC(space, start, gmm_proposal, annealing=5).fit(distribution)
info["GMM_CMC"] = display_samples(gmm_sampler, iterations=20, runs=1)
#############################
# Our first algorithm - CMC with adaptive selection of the kernel bandwidth:
from monaco.samplers import MOKA_CMC
proposal = BallProposal(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3],
exploration=exploration, exploration_proposal=exploration_proposal)
moka_sampler = MOKA_CMC(space, start, proposal, annealing=5).fit(distribution)
info["MOKA"] = display_samples(moka_sampler, iterations=20, runs=nruns)
#############################
# With a Markovian selection of the kernel bandwidth:
from monaco.samplers import MOKA_Markov_CMC
proposal = BallProposal(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3],
exploration=exploration, exploration_proposal=exploration_proposal)
moka_markov_sampler = MOKA_Markov_CMC(space, start, proposal, annealing=5).fit(distribution)
info["MOKA Markov"] = display_samples(moka_markov_sampler, iterations=20, runs=nruns)
#############################
# Our second algorithm - CMC with Richardson-Lucy deconvolution:
from monaco.samplers import KIDS_CMC
proposal = BallProposal(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3],
exploration=exploration, exploration_proposal=exploration_proposal)
kids_sampler = KIDS_CMC(space, start, proposal, annealing=5, iterations=30).fit(
distribution
)
info["KIDS"] = display_samples(kids_sampler, iterations=20, runs=nruns)
#############################
# Combining bandwith estimation and deconvolution with the Moka-Kids-CMC sampler:
from monaco.samplers import MOKA_KIDS_CMC
proposal = BallProposal(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3],
exploration=exploration, exploration_proposal=exploration_proposal)
kids_sampler = MOKA_KIDS_CMC(space, start, proposal, annealing=5, iterations=30).fit(
distribution
)
info["MOKA+KIDS"] = display_samples(kids_sampler, iterations=20, runs=nruns)
#############################
# Finally, the Non Parametric Adaptive Importance Sampler,
# an efficient non-Markovian method with an extensive
# memory usage:
from monaco.samplers import NPAIS
proposal = BallProposal(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3],
exploration=exploration, exploration_proposal=exploration_proposal)
class Q_0(object):
def __init__(self):
self.w_1 = Nlucky / N
self.w_0 = 1 - self.w_1
def sample(self, n):
nlucky = int(n * (Nlucky / N))
x0 = 0.05 + 0.1 * torch.rand(n, D).type(dtype)
x0[:nlucky] = 0.9 + 0.001 * torch.rand(nlucky, D).type(dtype)
return x0
def potential(self, x):
v = 100000 * torch.ones(len(x), 1).type_as(x)
v[(0.05 <= x) & (x < 0.15)] = -np.log(self.w_0 / 0.1)
v[(0.9 <= x) & (x < 0.901)] = -np.log(self.w_1 / 0.001)
return v.view(-1)
q0 = Q_0()
npais_sampler = NPAIS(space, start, proposal, annealing=5, q0=q0, N=N).fit(distribution)
info["NPAIS"] = display_samples(npais_sampler, iterations=20, runs=nruns)
###############################################
# Comparative benchmark:
import itertools
import seaborn as sns
iters = info["PMH"]["iteration"]
def display_line(key, marker):
sns.lineplot(
x=info[key]["iteration"],
y=info[key]["error"],
label=key,
marker=marker,
markersize=6,
ci="sd",
)
plt.figure(figsize=(4, 4))
markers = itertools.cycle(("o", "X", "P", "D", "^", "<", "v", ">", "*"))
for key, marker in zip(["PMH", "CMC", "KIDS", "MOKA", "MOKA Markov", "MOKA+KIDS", "NPAIS"], markers):
display_line(key, marker)
plt.xlabel("Iterations")
plt.ylabel("ED ( sample, true distribution )")
plt.ylim(bottom=1e-4)
plt.yscale("log")
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.ylabel",
"monaco.euclidean.GMMProposal",
"numpy.log",
"torch.cuda.is_available",
"monaco.samplers.Ada_CMC",
"monaco.samplers.KIDS_CMC",
"monaco.euclidean.EuclideanSpace",
"monaco.euclidean.GaussianProposal",
"matplotlib.pyplot.xlabel",
"numpy.exp",
"monaco.samplers.ParallelMet... | [((311, 362), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.max_open_warning': 0}"], {}), "({'figure.max_open_warning': 0})\n", (330, 362), True, 'from matplotlib import pyplot as plt\n'), ((375, 400), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (398, 400), False, 'import torch\n'), ((583, 623), 'monaco.euclidean.EuclideanSpace', 'EuclideanSpace', ([], {'dimension': 'D', 'dtype': 'dtype'}), '(dimension=D, dtype=dtype)\n', (597, 623), False, 'from monaco.euclidean import EuclideanSpace\n'), ((2017, 2043), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2027, 2043), True, 'from matplotlib import pyplot as plt\n'), ((2704, 2726), 'monaco.euclidean.UniformProposal', 'UniformProposal', (['space'], {}), '(space)\n', (2719, 2726), False, 'from monaco.euclidean import UniformProposal\n'), ((2975, 3111), 'monaco.euclidean.BallProposal', 'BallProposal', (['space'], {'scale': '[0.001, 0.003, 0.01, 0.03, 0.1, 0.3]', 'exploration': 'exploration', 'exploration_proposal': 'exploration_proposal'}), '(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3], exploration\n =exploration, exploration_proposal=exploration_proposal)\n', (2987, 3111), False, 'from monaco.euclidean import BallProposal\n'), ((3488, 3543), 'monaco.samplers.display_samples', 'display_samples', (['pmh_sampler'], {'iterations': '(20)', 'runs': 'nruns'}), '(pmh_sampler, iterations=20, runs=nruns)\n', (3503, 3543), False, 'from monaco.samplers import ParallelMetropolisHastings, display_samples\n'), ((3760, 3815), 'monaco.samplers.display_samples', 'display_samples', (['cmc_sampler'], {'iterations': '(20)', 'runs': 'nruns'}), '(cmc_sampler, iterations=20, runs=nruns)\n', (3775, 3815), False, 'from monaco.samplers import ParallelMetropolisHastings, display_samples\n'), ((4002, 4110), 'monaco.euclidean.GaussianProposal', 'GaussianProposal', (['space'], {'scale': '[0.1]', 'exploration': 'exploration', 'exploration_proposal': 'exploration_proposal'}), '(space, scale=[0.1], exploration=exploration,\n exploration_proposal=exploration_proposal)\n', (4018, 4110), False, 'from monaco.euclidean import GaussianProposal\n'), ((4236, 4287), 'monaco.samplers.display_samples', 'display_samples', (['bgk_sampler'], {'iterations': '(20)', 'runs': '(1)'}), '(bgk_sampler, iterations=20, runs=1)\n', (4251, 4287), False, 'from monaco.samplers import ParallelMetropolisHastings, display_samples\n'), ((4428, 4533), 'monaco.euclidean.GMMProposal', 'GMMProposal', (['space'], {'n_classes': '(100)', 'exploration': 'exploration', 'exploration_proposal': 'exploration_proposal'}), '(space, n_classes=100, exploration=exploration,\n exploration_proposal=exploration_proposal)\n', (4439, 4533), False, 'from monaco.euclidean import GMMProposal\n'), ((4656, 4707), 'monaco.samplers.display_samples', 'display_samples', (['gmm_sampler'], {'iterations': '(20)', 'runs': '(1)'}), '(gmm_sampler, iterations=20, runs=1)\n', (4671, 4707), False, 'from monaco.samplers import ParallelMetropolisHastings, display_samples\n'), ((4867, 5003), 'monaco.euclidean.BallProposal', 'BallProposal', (['space'], {'scale': '[0.001, 0.003, 0.01, 0.03, 0.1, 0.3]', 'exploration': 'exploration', 'exploration_proposal': 'exploration_proposal'}), '(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3], exploration\n =exploration, exploration_proposal=exploration_proposal)\n', (4879, 5003), False, 'from monaco.euclidean import BallProposal\n'), ((5117, 5173), 'monaco.samplers.display_samples', 'display_samples', (['moka_sampler'], {'iterations': '(20)', 'runs': 'nruns'}), '(moka_sampler, iterations=20, runs=nruns)\n', (5132, 5173), False, 'from monaco.samplers import ParallelMetropolisHastings, display_samples\n'), ((5317, 5453), 'monaco.euclidean.BallProposal', 'BallProposal', (['space'], {'scale': '[0.001, 0.003, 0.01, 0.03, 0.1, 0.3]', 'exploration': 'exploration', 'exploration_proposal': 'exploration_proposal'}), '(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3], exploration\n =exploration, exploration_proposal=exploration_proposal)\n', (5329, 5453), False, 'from monaco.euclidean import BallProposal\n'), ((5588, 5651), 'monaco.samplers.display_samples', 'display_samples', (['moka_markov_sampler'], {'iterations': '(20)', 'runs': 'nruns'}), '(moka_markov_sampler, iterations=20, runs=nruns)\n', (5603, 5651), False, 'from monaco.samplers import ParallelMetropolisHastings, display_samples\n'), ((5799, 5935), 'monaco.euclidean.BallProposal', 'BallProposal', (['space'], {'scale': '[0.001, 0.003, 0.01, 0.03, 0.1, 0.3]', 'exploration': 'exploration', 'exploration_proposal': 'exploration_proposal'}), '(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3], exploration\n =exploration, exploration_proposal=exploration_proposal)\n', (5811, 5935), False, 'from monaco.euclidean import BallProposal\n'), ((6070, 6126), 'monaco.samplers.display_samples', 'display_samples', (['kids_sampler'], {'iterations': '(20)', 'runs': 'nruns'}), '(kids_sampler, iterations=20, runs=nruns)\n', (6085, 6126), False, 'from monaco.samplers import ParallelMetropolisHastings, display_samples\n'), ((6296, 6432), 'monaco.euclidean.BallProposal', 'BallProposal', (['space'], {'scale': '[0.001, 0.003, 0.01, 0.03, 0.1, 0.3]', 'exploration': 'exploration', 'exploration_proposal': 'exploration_proposal'}), '(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3], exploration\n =exploration, exploration_proposal=exploration_proposal)\n', (6308, 6432), False, 'from monaco.euclidean import BallProposal\n'), ((6578, 6634), 'monaco.samplers.display_samples', 'display_samples', (['kids_sampler'], {'iterations': '(20)', 'runs': 'nruns'}), '(kids_sampler, iterations=20, runs=nruns)\n', (6593, 6634), False, 'from monaco.samplers import ParallelMetropolisHastings, display_samples\n'), ((6844, 6980), 'monaco.euclidean.BallProposal', 'BallProposal', (['space'], {'scale': '[0.001, 0.003, 0.01, 0.03, 0.1, 0.3]', 'exploration': 'exploration', 'exploration_proposal': 'exploration_proposal'}), '(space, scale=[0.001, 0.003, 0.01, 0.03, 0.1, 0.3], exploration\n =exploration, exploration_proposal=exploration_proposal)\n', (6856, 6980), False, 'from monaco.euclidean import BallProposal\n'), ((7670, 7727), 'monaco.samplers.display_samples', 'display_samples', (['npais_sampler'], {'iterations': '(20)', 'runs': 'nruns'}), '(npais_sampler, iterations=20, runs=nruns)\n', (7685, 7727), False, 'from monaco.samplers import ParallelMetropolisHastings, display_samples\n'), ((8083, 8109), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (8093, 8109), True, 'from matplotlib import pyplot as plt\n'), ((8120, 8182), 'itertools.cycle', 'itertools.cycle', (["('o', 'X', 'P', 'D', '^', '<', 'v', '>', '*')"], {}), "(('o', 'X', 'P', 'D', '^', '<', 'v', '>', '*'))\n", (8135, 8182), False, 'import itertools\n'), ((8318, 8342), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (8328, 8342), True, 'from matplotlib import pyplot as plt\n'), ((8343, 8389), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ED ( sample, true distribution )"""'], {}), "('ED ( sample, true distribution )')\n", (8353, 8389), True, 'from matplotlib import pyplot as plt\n'), ((8390, 8413), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(0.0001)'}), '(bottom=0.0001)\n', (8398, 8413), True, 'from matplotlib import pyplot as plt\n'), ((8412, 8429), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (8422, 8429), True, 'from matplotlib import pyplot as plt\n'), ((8431, 8449), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8447, 8449), True, 'from matplotlib import pyplot as plt\n'), ((8452, 8462), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8460, 8462), True, 'from matplotlib import pyplot as plt\n'), ((1213, 1244), 'monaco.euclidean.GaussianMixture', 'GaussianMixture', (['space', 'm', 's', 'w'], {}), '(space, m, s, w)\n', (1228, 1244), False, 'from monaco.euclidean import GaussianMixture, UnitPotential\n'), ((7916, 8029), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': "info[key]['iteration']", 'y': "info[key]['error']", 'label': 'key', 'marker': 'marker', 'markersize': '(6)', 'ci': '"""sd"""'}), "(x=info[key]['iteration'], y=info[key]['error'], label=key,\n marker=marker, markersize=6, ci='sd')\n", (7928, 8029), True, 'import seaborn as sns\n'), ((1561, 1592), 'monaco.euclidean.GaussianMixture', 'GaussianMixture', (['space', 'm', 's', 'w'], {}), '(space, m, s, w)\n', (1576, 1592), False, 'from monaco.euclidean import GaussianMixture, UnitPotential\n'), ((3386, 3449), 'monaco.samplers.ParallelMetropolisHastings', 'ParallelMetropolisHastings', (['space', 'start', 'proposal'], {'annealing': '(5)'}), '(space, start, proposal, annealing=5)\n', (3412, 3449), False, 'from monaco.samplers import ParallelMetropolisHastings, display_samples\n'), ((3687, 3727), 'monaco.samplers.CMC', 'CMC', (['space', 'start', 'proposal'], {'annealing': '(5)'}), '(space, start, proposal, annealing=5)\n', (3690, 3727), False, 'from monaco.samplers import CMC\n'), ((4146, 4199), 'monaco.samplers.Ada_CMC', 'Ada_CMC', (['space', 'start', 'gaussian_proposal'], {'annealing': '(5)'}), '(space, start, gaussian_proposal, annealing=5)\n', (4153, 4199), False, 'from monaco.samplers import Ada_CMC\n'), ((4571, 4619), 'monaco.samplers.Ada_CMC', 'Ada_CMC', (['space', 'start', 'gmm_proposal'], {'annealing': '(5)'}), '(space, start, gmm_proposal, annealing=5)\n', (4578, 4619), False, 'from monaco.samplers import Ada_CMC\n'), ((5038, 5083), 'monaco.samplers.MOKA_CMC', 'MOKA_CMC', (['space', 'start', 'proposal'], {'annealing': '(5)'}), '(space, start, proposal, annealing=5)\n', (5046, 5083), False, 'from monaco.samplers import MOKA_CMC\n'), ((5495, 5547), 'monaco.samplers.MOKA_Markov_CMC', 'MOKA_Markov_CMC', (['space', 'start', 'proposal'], {'annealing': '(5)'}), '(space, start, proposal, annealing=5)\n', (5510, 5547), False, 'from monaco.samplers import MOKA_Markov_CMC\n'), ((5970, 6030), 'monaco.samplers.KIDS_CMC', 'KIDS_CMC', (['space', 'start', 'proposal'], {'annealing': '(5)', 'iterations': '(30)'}), '(space, start, proposal, annealing=5, iterations=30)\n', (5978, 6030), False, 'from monaco.samplers import KIDS_CMC\n'), ((6468, 6533), 'monaco.samplers.MOKA_KIDS_CMC', 'MOKA_KIDS_CMC', (['space', 'start', 'proposal'], {'annealing': '(5)', 'iterations': '(30)'}), '(space, start, proposal, annealing=5, iterations=30)\n', (6481, 6533), False, 'from monaco.samplers import MOKA_KIDS_CMC\n'), ((7581, 7635), 'monaco.samplers.NPAIS', 'NPAIS', (['space', 'start', 'proposal'], {'annealing': '(5)', 'q0': 'q0', 'N': 'N'}), '(space, start, proposal, annealing=5, q0=q0, N=N)\n', (7586, 7635), False, 'from monaco.samplers import NPAIS\n'), ((968, 984), 'torch.rand', 'torch.rand', (['M', 'D'], {}), '(M, D)\n', (978, 984), False, 'import torch\n'), ((1013, 1026), 'torch.rand', 'torch.rand', (['M'], {}), '(M)\n', (1023, 1026), False, 'import torch\n'), ((1060, 1073), 'torch.rand', 'torch.rand', (['M'], {}), '(M)\n', (1070, 1073), False, 'import torch\n'), ((1893, 1931), 'monaco.euclidean.UnitPotential', 'UnitPotential', (['space', 'ackley_potential'], {}), '(space, ackley_potential)\n', (1906, 1931), False, 'from monaco.euclidean import GaussianMixture, UnitPotential\n'), ((7438, 7460), 'numpy.log', 'np.log', (['(self.w_0 / 0.1)'], {}), '(self.w_0 / 0.1)\n', (7444, 7460), True, 'import numpy as np\n'), ((7500, 7524), 'numpy.log', 'np.log', (['(self.w_1 / 0.001)'], {}), '(self.w_1 / 0.001)\n', (7506, 7524), True, 'import numpy as np\n'), ((1357, 1410), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.15, 0.005, 0.002, 0.002, 0.005]'], {}), '([0.15, 0.005, 0.002, 0.002, 0.005])\n', (1374, 1410), False, 'import torch\n'), ((1431, 1487), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.1, 2 / 12, 1 / 12, 1 / 12, 2 / 12]'], {}), '([0.1, 2 / 12, 1 / 12, 1 / 12, 2 / 12])\n', (1448, 1487), False, 'import torch\n'), ((2384, 2400), 'torch.rand', 'torch.rand', (['N', 'D'], {}), '(N, D)\n', (2394, 2400), False, 'import torch\n'), ((2443, 2464), 'torch.rand', 'torch.rand', (['Nlucky', 'D'], {}), '(Nlucky, D)\n', (2453, 2464), False, 'import torch\n'), ((1283, 1327), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.5, 0.1, 0.2, 0.8, 0.9]'], {}), '([0.5, 0.1, 0.2, 0.8, 0.9])\n', (1300, 1327), False, 'import torch\n'), ((7198, 7214), 'torch.rand', 'torch.rand', (['n', 'D'], {}), '(n, D)\n', (7208, 7214), False, 'import torch\n'), ((7263, 7284), 'torch.rand', 'torch.rand', (['nlucky', 'D'], {}), '(nlucky, D)\n', (7273, 7284), False, 'import torch\n'), ((1847, 1856), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (1853, 1856), True, 'import numpy as np\n')] |
from os import error
import numpy as np
import numbers
import warnings
from scipy import signal
import astropy.modeling.models
from stingray.io import write, read
from stingray import utils
from stingray import Lightcurve
from stingray import AveragedPowerspectrum
__all__ = ['Simulator']
class Simulator(object):
"""
Methods to simulate and visualize light curves.
TODO: Improve documentation
Parameters
----------
dt : int, default 1
time resolution of simulated light curve
N : int, default 1024
bins count of simulated light curve
mean : float, default 0
mean value of the simulated light curve
rms : float, default 1
fractional rms of the simulated light curve,
actual rms is calculated by mean*rms
err : float, default 0
the errorbars on the final light curve
red_noise : int, default 1
multiple of real length of light curve, by
which to simulate, to avoid red noise leakage
random_state : int, default None
seed value for random processes
poisson : bool, default False
return Poisson-distributed light curves.
"""
def __init__(self, dt, N, mean, rms, err=0., red_noise=1,
random_state=None, tstart=0.0, poisson=False):
self.dt = dt
if not isinstance(N, (int, np.integer)):
raise ValueError("N must be integer!")
self.N = N
if mean == 0:
warnings.warn("Careful! A mean of zero is unphysical!" +
"This may have unintended consequences!")
self.mean = mean
self.nphot = self.mean * self.N
self.rms = rms
self.red_noise = red_noise
self.tstart = tstart
self.time = dt*np.arange(N) + self.tstart
self.nphot_factor = 1000_000
self.err = err
self.poisson = poisson
# Initialize a tuple of energy ranges with corresponding light curves
self.channels = []
self.random_state = utils.get_random_state(random_state)
assert rms <= 1, 'Fractional rms must be less than 1.'
assert dt > 0, 'Time resolution must be greater than 0'
def simulate(self, *args):
"""
Simulate light curve generation using power spectrum or
impulse response.
Examples
--------
* x = simulate(beta):
For generating a light curve using power law spectrum.
Parameters:
* beta : float
Defines the shape of spectrum
* x = simulate(s):
For generating a light curve from user-provided spectrum.
**Note**: In this case, the `red_noise` parameter is provided.
You can generate a longer light curve by providing a higher
frequency resolution on the input power spectrum.
Parameters:
* s : array-like
power spectrum
* x = simulate(model):
For generating a light curve from pre-defined model
Parameters:
* model : astropy.modeling.Model
the pre-defined model
* x = simulate('model', params):
For generating a light curve from pre-defined model
Parameters:
* model : string
the pre-defined model
* params : list iterable or dict
the parameters for the pre-defined model
* x = simulate(s, h):
For generating a light curve using impulse response.
Parameters:
* s : array-like
Underlying variability signal
* h : array-like
Impulse response
* x = simulate(s, h, 'same'):
For generating a light curve of same length as input signal,
using impulse response.
Parameters:
* s : array-like
Underlying variability signal
* h : array-like
Impulse response
* mode : str
mode can be 'same', 'filtered, or 'full'.
'same' indicates that the length of output light
curve is same as that of input signal.
'filtered' means that length of output light curve
is len(s) - lag_delay
'full' indicates that the length of output light
curve is len(s) + len(h) -1
Parameters
----------
args
See examples below.
Returns
-------
lightCurve : `LightCurve` object
"""
if isinstance(args[0], (numbers.Integral, float)) and len(args) == 1:
return self._simulate_power_law(args[0])
elif isinstance(args[0], astropy.modeling.Model) and len(args) == 1:
return self._simulate_model(args[0])
elif utils.is_string(args[0]) and len(args) == 2:
return self._simulate_model_string(args[0], args[1])
elif len(args) == 1:
return self._simulate_power_spectrum(args[0])
elif len(args) == 2:
return self._simulate_impulse_response(args[0], args[1])
elif len(args) == 3:
return self._simulate_impulse_response(args[0], args[1], args[2])
else:
raise ValueError("Length of arguments must be 1, 2 or 3.")
def simulate_channel(self, channel, *args):
"""
Simulate a lightcurve and add it to corresponding energy
channel.
Parameters
----------
channel : str
range of energy channel (e.g., 3.5-4.5)
*args
see description of simulate() for details
Returns
-------
lightCurve : `LightCurve` object
"""
# Check that channel name does not already exist.
if channel not in [lc[0] for lc in self.channels]:
self.channels.append((channel, self.simulate(*args)))
else:
raise KeyError('A channel with this name already exists.')
def get_channel(self, channel):
"""
Get lightcurve belonging to the energy channel.
"""
return [lc[1] for lc in self.channels if lc[0] == channel][0]
def get_channels(self, channels):
"""
Get multiple light curves belonging to the energy channels.
"""
return [lc[1] for lc in self.channels if lc[0] in channels]
def get_all_channels(self):
"""
Get lightcurves belonging to all channels.
"""
return [lc[1] for lc in self.channels]
def delete_channel(self, channel):
"""
Delete an energy channel.
"""
channel = [lc for lc in self.channels if lc[0] == channel]
if len(channel) == 0:
raise KeyError('This channel does not exist or has already been '
'deleted.')
else:
index = self.channels.index(channel[0])
del self.channels[index]
def delete_channels(self, channels):
"""
Delete multiple energy channels.
"""
n = len(channels)
channels = [lc for lc in self.channels if lc[0] in channels]
if len(channels) != n:
raise KeyError('One of more of the channels do not exist or have '
'already been deleted.')
else:
indices = [self.channels.index(channel) for channel in channels]
for i in sorted(indices, reverse=True):
del self.channels[i]
def count_channels(self):
"""
Return total number of energy channels.
"""
return len(self.channels)
def simple_ir(self, start=0, width=1000, intensity=1):
"""
Construct a simple impulse response using start time,
width and scaling intensity.
To create a delta impulse response, set width to 1.
Parameters
----------
start : int
start time of impulse response
width : int
width of impulse response
intensity : float
scaling parameter to set the intensity of delayed emission
corresponding to direct emission.
Returns
-------
h : numpy.ndarray
Constructed impulse response
"""
# Fill in 0 entries until the start time
h_zeros = np.zeros(int(start/self.dt))
# Define constant impulse response
h_ones = np.ones(int(width/self.dt)) * intensity
return np.append(h_zeros, h_ones)
def relativistic_ir(self, t1=3, t2=4, t3=10, p1=1, p2=1.4, rise=0.6,
decay=0.1):
"""
Construct a realistic impulse response considering the relativistic
effects.
Parameters
----------
t1 : int
primary peak time
t2 : int
secondary peak time
t3 : int
end time
p1 : float
value of primary peak
p2 : float
value of secondary peak
rise : float
slope of rising exponential from primary peak to secondary peak
decay : float
slope of decaying exponential from secondary peak to end time
Returns
-------
h : numpy.ndarray
Constructed impulse response
"""
dt = self.dt
assert t2 > t1, 'Secondary peak must be after primary peak.'
assert t3 > t2, 'End time must be after secondary peak.'
assert p2 > p1, 'Secondary peak must be greater than primary peak.'
# Append zeros before start time
h_primary = np.append(np.zeros(int(t1/dt)), p1)
# Create a rising exponential of user-provided slope
x = np.linspace(t1/dt, t2/dt, int((t2-t1)/dt))
h_rise = np.exp(rise*x)
# Evaluate a factor for scaling exponential
factor = np.max(h_rise)/(p2-p1)
h_secondary = (h_rise/factor) + p1
# Create a decaying exponential until the end time
x = np.linspace(t2/dt, t3/dt, int((t3-t2)/dt))
h_decay = (np.exp((-decay)*(x-4/dt)))
# Add the three responses
h = np.append(h_primary, h_secondary)
h = np.append(h, h_decay)
return h
def _find_inverse(self, real, imaginary):
"""
Forms complex numbers corresponding to real and imaginary
parts and finds inverse series.
Parameters
----------
real : numpy.ndarray
Co-effients corresponding to real parts of complex numbers
imaginary : numpy.ndarray
Co-efficients correspondong to imaginary parts of complex
numbers
Returns
-------
ifft : numpy.ndarray
Real inverse fourier transform of complex numbers
"""
# Form complex numbers corresponding to each frequency
f = [complex(r, i) for r, i in zip(real, imaginary)]
f = np.hstack([self.mean * self.N * self.red_noise, f])
# Obtain time series
return np.fft.irfft(f, n=self.N * self.red_noise)
def _timmerkoenig(self, pds_shape):
"""Straight application of T&K method to a PDS shape.
"""
pds_size = pds_shape.size
real = np.random.normal(size=pds_size) * np.sqrt(0.5 * pds_shape)
imaginary = np.random.normal(size=pds_size) * np.sqrt(0.5 * pds_shape)
imaginary[-1] = 0
counts = self._find_inverse(real, imaginary)
self.std = counts.std()
rescaled_counts = self._extract_and_scale(counts)
err = np.zeros_like(rescaled_counts)
if self.poisson:
bad = rescaled_counts < 0
if np.any(bad):
warnings.warn("Some bins of the light curve have counts < 0. Setting to 0")
rescaled_counts[bad] = 0
lc = Lightcurve(self.time, np.random.poisson(rescaled_counts),
err_dist='poisson', dt=self.dt, skip_checks=True)
lc.smooth_counts = rescaled_counts
else:
lc = Lightcurve(self.time, rescaled_counts,
err=err,
err_dist='gauss', dt=self.dt, skip_checks=True)
return lc
def _simulate_power_law(self, B):
"""
Generate LightCurve from a power law spectrum.
Parameters
----------
B : int
Defines the shape of power law spectrum.
Returns
-------
lightCurve : array-like
"""
# Define frequencies at which to compute PSD
w = np.fft.rfftfreq(self.red_noise*self.N, d=self.dt)[1:]
pds_shape = np.power((1/w), B)
return self._timmerkoenig(pds_shape)
def _simulate_power_spectrum(self, s):
"""
Generate a light curve from user-provided spectrum.
Parameters
----------
s : array-like
power spectrum
Returns
-------
lightCurve : `LightCurve` object
"""
# Cast spectrum as numpy array
pds_shape = np.zeros(s.size * self.red_noise)
pds_shape[:s.size] = s
return self._timmerkoenig(pds_shape)
def _simulate_model(self, model):
"""
For generating a light curve from a pre-defined model
Parameters
----------
model : astropy.modeling.Model derived function
the pre-defined model
(library-based, available in astropy.modeling.models or
custom-defined)
Returns
-------
lightCurve : :class:`stingray.lightcurve.LightCurve` object
"""
# Frequencies at which the PSD is to be computed
# (only positive frequencies, since the signal is real)
nbins = self.red_noise * self.N
simfreq = np.fft.rfftfreq(nbins, d=self.dt)[1:]
# Compute PSD from model
simpsd = model(simfreq)
return self._timmerkoenig(simpsd)
def _simulate_model_string(self, model_str, params):
"""
For generating a light curve from a pre-defined model
Parameters
----------
model_str : string
name of the pre-defined model
params : list or dictionary
parameters of the pre-defined model
Returns
-------
lightCurve : :class:`stingray.lightcurve.LightCurve` object
"""
from . import models
# Frequencies at which the PSD is to be computed
# (only positive frequencies, since the signal is real)
nbins = self.red_noise*self.N
simfreq = np.fft.rfftfreq(nbins, d=self.dt)[1:]
if model_str not in dir(models):
raise ValueError('Model is not defined!')
if isinstance(params, dict):
model = eval('models.' + model_str + '(**params)')
# Compute PSD from model
simpsd = model(simfreq)
elif isinstance(params, list):
simpsd = eval('models.' + model_str + '(simfreq, params)')
else:
raise ValueError('Params should be list or dictionary!')
return self._timmerkoenig(simpsd)
def _simulate_impulse_response(self, s, h, mode='same'):
"""
Generate LightCurve from impulse response. To get
accurate results, binning intervals (dt) of variability
signal 's' and impulse response 'h' must be equal.
Parameters
----------
s : array-like
Underlying variability signal
h : array-like
Impulse response
mode : str
mode can be 'same', 'filtered, or 'full'.
'same' indicates that the length of output light
curve is same as that of input signal.
'filtered' means that length of output light curve
is len(s) - lag_delay
'full' indicates that the length of output light
curve is len(s) + len(h) -1
Returns
-------
lightCurve : :class:`stingray.lightcurve.LightCurve` object
"""
lc = signal.fftconvolve(s, h)
if mode == 'same':
lc = lc[:-(len(h) - 1)]
elif mode == 'filtered':
lc = lc[(len(h) - 1):-(len(h) - 1)]
time = self.dt * np.arange(0.5, len(lc)) + self.tstart
err = np.zeros_like(time)
return Lightcurve(time, lc, err_dist='gauss', dt=self.dt, err=err,
skip_checks=True)
def _extract_and_scale(self, long_lc):
"""
i) Make a random cut and extract a light curve of required
length.
ii) Rescale light curve i) with zero mean and unit standard
deviation, and ii) user provided mean and rms (fractional
rms * mean)
Parameters
----------
long_lc : numpy.ndarray
Simulated lightcurve of length 'N' times 'red_noise'
Returns
-------
lc : numpy.ndarray
Normalized and extracted lightcurve of length 'N'
"""
if self.red_noise == 1:
lc = long_lc
else:
# Make random cut and extract light curve of length 'N'
extract = \
self.random_state.randint(self.N-1,
self.red_noise*self.N - self.N+1)
lc = np.take(long_lc, range(extract, extract + self.N))
mean_lc = np.mean(lc)
if self.mean == 0:
return (lc-mean_lc)/self.std * self.rms
else:
return (lc-mean_lc)/self.std * self.mean * self.rms + self.mean
def powerspectrum(self, lc, seg_size=None):
"""
Make a powerspectrum of the simulated light curve.
Parameters
----------
lc : lightcurve.Lightcurve object OR
iterable of lightcurve.Lightcurve objects
The light curve data to be Fourier-transformed.
Returns
-------
power : numpy.ndarray
The array of normalized squared absolute values of Fourier
amplitudes
"""
if seg_size is None:
seg_size = lc.tseg
return AveragedPowerspectrum(lc, seg_size).power
@staticmethod
def read(filename, format_='pickle'):
"""
Imports Simulator object.
Parameters
----------
filename : str
Name of the Simulator object to be read.
format_ : str
Available option is 'pickle.'
Returns
-------
object : `Simulator` object
"""
if format_ == 'pickle':
data = read(filename, 'pickle')
return data
else:
raise KeyError("Format not supported.")
def write(self, filename, format_='pickle'):
"""
Exports Simulator object.
Parameters
----------
filename : str
Name of the Simulator object to be created.
format_ : str
Available options are 'pickle' and 'hdf5'.
"""
if format_ == 'pickle':
write(self, filename)
else:
raise KeyError("Format not supported.")
| [
"numpy.sqrt",
"numpy.hstack",
"numpy.fft.irfft",
"stingray.io.write",
"stingray.Lightcurve",
"stingray.utils.is_string",
"numpy.arange",
"numpy.mean",
"numpy.random.poisson",
"scipy.signal.fftconvolve",
"numpy.max",
"numpy.exp",
"warnings.warn",
"numpy.random.normal",
"numpy.any",
"sti... | [((2021, 2057), 'stingray.utils.get_random_state', 'utils.get_random_state', (['random_state'], {}), '(random_state)\n', (2043, 2057), False, 'from stingray import utils\n'), ((8631, 8657), 'numpy.append', 'np.append', (['h_zeros', 'h_ones'], {}), '(h_zeros, h_ones)\n', (8640, 8657), True, 'import numpy as np\n'), ((9924, 9940), 'numpy.exp', 'np.exp', (['(rise * x)'], {}), '(rise * x)\n', (9930, 9940), True, 'import numpy as np\n'), ((10209, 10238), 'numpy.exp', 'np.exp', (['(-decay * (x - 4 / dt))'], {}), '(-decay * (x - 4 / dt))\n', (10215, 10238), True, 'import numpy as np\n'), ((10283, 10316), 'numpy.append', 'np.append', (['h_primary', 'h_secondary'], {}), '(h_primary, h_secondary)\n', (10292, 10316), True, 'import numpy as np\n'), ((10329, 10350), 'numpy.append', 'np.append', (['h', 'h_decay'], {}), '(h, h_decay)\n', (10338, 10350), True, 'import numpy as np\n'), ((11071, 11122), 'numpy.hstack', 'np.hstack', (['[self.mean * self.N * self.red_noise, f]'], {}), '([self.mean * self.N * self.red_noise, f])\n', (11080, 11122), True, 'import numpy as np\n'), ((11168, 11210), 'numpy.fft.irfft', 'np.fft.irfft', (['f'], {'n': '(self.N * self.red_noise)'}), '(f, n=self.N * self.red_noise)\n', (11180, 11210), True, 'import numpy as np\n'), ((11701, 11731), 'numpy.zeros_like', 'np.zeros_like', (['rescaled_counts'], {}), '(rescaled_counts)\n', (11714, 11731), True, 'import numpy as np\n'), ((12790, 12808), 'numpy.power', 'np.power', (['(1 / w)', 'B'], {}), '(1 / w, B)\n', (12798, 12808), True, 'import numpy as np\n'), ((13205, 13238), 'numpy.zeros', 'np.zeros', (['(s.size * self.red_noise)'], {}), '(s.size * self.red_noise)\n', (13213, 13238), True, 'import numpy as np\n'), ((16200, 16224), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['s', 'h'], {}), '(s, h)\n', (16218, 16224), False, 'from scipy import signal\n'), ((16449, 16468), 'numpy.zeros_like', 'np.zeros_like', (['time'], {}), '(time)\n', (16462, 16468), True, 'import numpy as np\n'), ((16484, 16561), 'stingray.Lightcurve', 'Lightcurve', (['time', 'lc'], {'err_dist': '"""gauss"""', 'dt': 'self.dt', 'err': 'err', 'skip_checks': '(True)'}), "(time, lc, err_dist='gauss', dt=self.dt, err=err, skip_checks=True)\n", (16494, 16561), False, 'from stingray import Lightcurve\n'), ((17530, 17541), 'numpy.mean', 'np.mean', (['lc'], {}), '(lc)\n', (17537, 17541), True, 'import numpy as np\n'), ((1468, 1570), 'warnings.warn', 'warnings.warn', (["('Careful! A mean of zero is unphysical!' +\n 'This may have unintended consequences!')"], {}), "('Careful! A mean of zero is unphysical!' +\n 'This may have unintended consequences!')\n", (1481, 1570), False, 'import warnings\n'), ((10009, 10023), 'numpy.max', 'np.max', (['h_rise'], {}), '(h_rise)\n', (10015, 10023), True, 'import numpy as np\n'), ((11377, 11408), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'pds_size'}), '(size=pds_size)\n', (11393, 11408), True, 'import numpy as np\n'), ((11411, 11435), 'numpy.sqrt', 'np.sqrt', (['(0.5 * pds_shape)'], {}), '(0.5 * pds_shape)\n', (11418, 11435), True, 'import numpy as np\n'), ((11456, 11487), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'pds_size'}), '(size=pds_size)\n', (11472, 11487), True, 'import numpy as np\n'), ((11490, 11514), 'numpy.sqrt', 'np.sqrt', (['(0.5 * pds_shape)'], {}), '(0.5 * pds_shape)\n', (11497, 11514), True, 'import numpy as np\n'), ((11811, 11822), 'numpy.any', 'np.any', (['bad'], {}), '(bad)\n', (11817, 11822), True, 'import numpy as np\n'), ((12188, 12288), 'stingray.Lightcurve', 'Lightcurve', (['self.time', 'rescaled_counts'], {'err': 'err', 'err_dist': '"""gauss"""', 'dt': 'self.dt', 'skip_checks': '(True)'}), "(self.time, rescaled_counts, err=err, err_dist='gauss', dt=self.\n dt, skip_checks=True)\n", (12198, 12288), False, 'from stingray import Lightcurve\n'), ((12715, 12766), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['(self.red_noise * self.N)'], {'d': 'self.dt'}), '(self.red_noise * self.N, d=self.dt)\n', (12730, 12766), True, 'import numpy as np\n'), ((13946, 13979), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['nbins'], {'d': 'self.dt'}), '(nbins, d=self.dt)\n', (13961, 13979), True, 'import numpy as np\n'), ((14736, 14769), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['nbins'], {'d': 'self.dt'}), '(nbins, d=self.dt)\n', (14751, 14769), True, 'import numpy as np\n'), ((18276, 18311), 'stingray.AveragedPowerspectrum', 'AveragedPowerspectrum', (['lc', 'seg_size'], {}), '(lc, seg_size)\n', (18297, 18311), False, 'from stingray import AveragedPowerspectrum\n'), ((18737, 18761), 'stingray.io.read', 'read', (['filename', '"""pickle"""'], {}), "(filename, 'pickle')\n", (18741, 18761), False, 'from stingray.io import write, read\n'), ((19200, 19221), 'stingray.io.write', 'write', (['self', 'filename'], {}), '(self, filename)\n', (19205, 19221), False, 'from stingray.io import write, read\n'), ((1768, 1780), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1777, 1780), True, 'import numpy as np\n'), ((11840, 11915), 'warnings.warn', 'warnings.warn', (['"""Some bins of the light curve have counts < 0. Setting to 0"""'], {}), "('Some bins of the light curve have counts < 0. Setting to 0')\n", (11853, 11915), False, 'import warnings\n'), ((11996, 12030), 'numpy.random.poisson', 'np.random.poisson', (['rescaled_counts'], {}), '(rescaled_counts)\n', (12013, 12030), True, 'import numpy as np\n'), ((4951, 4975), 'stingray.utils.is_string', 'utils.is_string', (['args[0]'], {}), '(args[0])\n', (4966, 4975), False, 'from stingray import utils\n')] |
from features.numpy_sift import SIFTDescriptor
import numpy as np
import features.feature_utils
from features.DetectorDescriptorTemplate import DetectorAndDescriptor
class np_sift(DetectorAndDescriptor):
def __init__(self, peak_thresh=10.0):
super(
np_sift,
self).__init__(
name='np_sift',
is_detector=True,
is_descriptor=True,
is_both=True,
patch_input=True)
self.peak_thresh = peak_thresh
self.descriptor = None
def detect_feature(self, image):
pass
def extract_descriptor(self, image, feature):
pass
def extract_all(self, image):
pass
def extract_descriptor_from_patch(self, patches):
patch_num = patches.shape[0]
patches.shape[1]
w = patches.shape[2]
if self.descriptor is None or self.descriptor.patchSize != w:
self.descriptor = SIFTDescriptor(w)
descriptors = np.zeros((patch_num, 128))
for i in range(patch_num):
patch = features.feature_utils.all_to_gray(patches[i, :, :, :])
patch = patch[:, :, 0]
descriptors[i, :] = self.descriptor.describe(patch).flatten()
return descriptors
| [
"numpy.zeros",
"features.numpy_sift.SIFTDescriptor"
] | [((977, 1003), 'numpy.zeros', 'np.zeros', (['(patch_num, 128)'], {}), '((patch_num, 128))\n', (985, 1003), True, 'import numpy as np\n'), ((937, 954), 'features.numpy_sift.SIFTDescriptor', 'SIFTDescriptor', (['w'], {}), '(w)\n', (951, 954), False, 'from features.numpy_sift import SIFTDescriptor\n')] |
"""
Tractor module for SMOT
"""
# pylint: disable=arguments-differ
from abc import ABC, abstractmethod
import numpy as np
import mxnet as mx
from .utils import timeit
from .general_detector import GeneralDetector
class BaseAnchorBasedTracktor(ABC):
"""
BaseAnchorBasedTracktor
"""
@abstractmethod
def anchors(self):
"""
Returns the list of anchors used in this detector.
-------
"""
raise NotImplementedError
@abstractmethod
def prepare_for_frame(self, frame):
"""
This method should run anything that needs to happen before the motion prediction.
It can prepare the detector or even run the backbone feature extractions.
It can also provide data to motion prediction
Parameters
----------
frame: the frame data, the same as in the detect_and_track method
Returns
-------
motion_predict_data: optional data provided to motion prediction, if no data is provided, return None
"""
raise NotImplementedError
@abstractmethod
def detect_and_track(self, frame, tracking_anchor_indices, tracking_anchor_weights, tracking_classes):
"""
Perform detection and tracking on the new frame
Parameters
----------
frame: HxWx3 RGB image
tracking_anchor_indices: NxM ndarray
tracking_anchor_weights NxM ndarray
tracking_classes: Nx1 ndarray of the class ids of the tracked object
Returns
detection_bounding_boxes: all detection results, in (x0, y0, x1, y1, confidence, cls) format
detection_source: source anchor box indices for each detection
tracking_boxes: all tracking results, in (x0, y0, x1, y1, confidence) format
extract_info: extra information from the tracktor, e.g. landmarks, a dict
-------
"""
raise NotImplementedError
@abstractmethod
def clean_up(self):
"""
Clean up after running one video
Returns
-------
"""
raise NotImplementedError
class GluonSSDMultiClassTracktor(BaseAnchorBasedTracktor):
"""
Initiate a tracktor based on an object detetor.
"""
def __init__(self, gpu_id=0, detector_thresh=0.5, model_name="",
use_pretrained=False, param_path="", data_shape=512):
self.detector = GeneralDetector(gpu_id,
aspect_ratio=1.,
data_shape=data_shape,
model_name=model_name,
use_pretrained=use_pretrained,
param_path=param_path
)
self._anchor_tensor = None
self._detector_thresh = detector_thresh
self._ctx = mx.gpu(gpu_id)
self._dummy_ti = mx.nd.array([[0]], ctx=self._ctx)
def anchors(self):
if self.detector.anchor_tensor is None:
raise ValueError("anchor not initialized yet")
return self.detector.anchor_tensor
def clean_up(self):
pass
def prepare_for_frame(self, frame):
return None
@timeit
def detect_and_track(self, frame, tracking_anchor_indices, tracking_anchor_weights, tracking_object_classes):
with_tracking = len(tracking_anchor_indices) > 0
if with_tracking:
tracking_indices = mx.nd.array(tracking_anchor_indices, ctx=self._ctx)
tracking_weights = mx.nd.array(tracking_anchor_weights, ctx=self._ctx)
tracking_classes = mx.nd.array(tracking_object_classes.reshape((-1, 1)), ctx=self._ctx)
else:
tracking_classes = tracking_indices = tracking_weights = self._dummy_ti
ids, scores, bboxes, voted_tracking_bboxes, detection_anchor_indices = \
self.detector.run_detection(frame, tracking_indices, tracking_weights, tracking_classes)
valid_det_num = (scores > self._detector_thresh).sum().astype(int).asnumpy()[0]
if valid_det_num > 0:
valid_scores = scores[:valid_det_num]
valid_bboxes = bboxes[:valid_det_num, :]
valid_classes = ids[:valid_det_num, :]
detection_output = mx.nd.concat(valid_bboxes, valid_scores, valid_classes, dim=1).asnumpy()
anchor_indices_output = detection_anchor_indices[:valid_det_num, :]
else:
# no detection
detection_output = np.array([])
anchor_indices_output = np.array([])
tracking_response = voted_tracking_bboxes.asnumpy() \
if with_tracking else np.array([])
return detection_output, anchor_indices_output, tracking_response, \
{}
| [
"mxnet.nd.array",
"numpy.array",
"mxnet.gpu",
"mxnet.nd.concat"
] | [((2904, 2918), 'mxnet.gpu', 'mx.gpu', (['gpu_id'], {}), '(gpu_id)\n', (2910, 2918), True, 'import mxnet as mx\n'), ((2945, 2978), 'mxnet.nd.array', 'mx.nd.array', (['[[0]]'], {'ctx': 'self._ctx'}), '([[0]], ctx=self._ctx)\n', (2956, 2978), True, 'import mxnet as mx\n'), ((3495, 3546), 'mxnet.nd.array', 'mx.nd.array', (['tracking_anchor_indices'], {'ctx': 'self._ctx'}), '(tracking_anchor_indices, ctx=self._ctx)\n', (3506, 3546), True, 'import mxnet as mx\n'), ((3578, 3629), 'mxnet.nd.array', 'mx.nd.array', (['tracking_anchor_weights'], {'ctx': 'self._ctx'}), '(tracking_anchor_weights, ctx=self._ctx)\n', (3589, 3629), True, 'import mxnet as mx\n'), ((4541, 4553), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4549, 4553), True, 'import numpy as np\n'), ((4590, 4602), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4598, 4602), True, 'import numpy as np\n'), ((4700, 4712), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4708, 4712), True, 'import numpy as np\n'), ((4316, 4378), 'mxnet.nd.concat', 'mx.nd.concat', (['valid_bboxes', 'valid_scores', 'valid_classes'], {'dim': '(1)'}), '(valid_bboxes, valid_scores, valid_classes, dim=1)\n', (4328, 4378), True, 'import mxnet as mx\n')] |
"""
This script is a simple MNIST training script which uses Tensorflow's Estimator interface.
It is designed to be used with SageMaker Debugger in an official SageMaker Framework container (i.e. AWS Deep Learning Container).
You will notice that this script looks exactly like a normal TensorFlow training script.
The hook needed by SageMaker Debugger to save tensors during training will be automatically added in those environments.
The hook will load configuration from json configuration that SageMaker will put in the training container from the configuration provided using the SageMaker python SDK when creating a job.
For more information, please refer to https://github.com/awslabs/sagemaker-debugger/blob/master/docs/sagemaker.md
"""
# Standard Library
import argparse
import logging
import random
# Third Party
import numpy as np
import tensorflow as tf
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--random_seed", type=bool, default=False)
parser.add_argument("--num_epochs", type=int, default=5, help="Number of epochs to train for")
parser.add_argument(
"--num_steps",
type=int,
help="Number of steps to train for. If this is passed, it overrides num_epochs",
)
parser.add_argument(
"--num_eval_steps",
type=int,
help="Number of steps to evaluate for. If this"
"is passed, it doesnt evaluate over the full eval set",
)
parser.add_argument("--model_dir", type=str, default="/tmp/mnist_model")
args = parser.parse_args()
# these random seeds are only intended for test purpose.
# for now, 2,2,12 could promise no assert failure when running tests.
# if you wish to change the number, notice that certain steps' tensor value may be capable of variation
if args.random_seed:
tf.set_random_seed(2)
np.random.seed(2)
random.seed(12)
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer, filters=32, kernel_size=[5, 5], padding="same", activation=tf.nn.relu
)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1, filters=64, kernel_size=[5, 5], padding="same", activation=tf.nn.relu
)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN
)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor"),
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=args.lr)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions["classes"])
}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
# Load training and eval data
((train_data, train_labels), (eval_data, eval_labels)) = tf.keras.datasets.mnist.load_data()
train_data = train_data / np.float32(255)
train_labels = train_labels.astype(np.int32) # not required
eval_data = eval_data / np.float32(255)
eval_labels = eval_labels.astype(np.int32) # not required
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir=args.model_dir)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data}, y=train_labels, batch_size=128, num_epochs=args.num_epochs, shuffle=True
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data}, y=eval_labels, num_epochs=1, shuffle=False
)
mnist_classifier.train(input_fn=train_input_fn, steps=args.num_steps)
mnist_classifier.evaluate(input_fn=eval_input_fn, steps=args.num_eval_steps)
| [
"logging.getLogger",
"tensorflow.estimator.inputs.numpy_input_fn",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.nn.softmax",
"tensorflow.set_random_seed",
"argparse.ArgumentParser",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.estimator.Estimator",
"tensorflow.metrics.accuracy",
"... | [((922, 947), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (945, 947), False, 'import argparse\n'), ((4188, 4223), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (4221, 4223), True, 'import tensorflow as tf\n'), ((4448, 4519), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'cnn_model_fn', 'model_dir': 'args.model_dir'}), '(model_fn=cnn_model_fn, model_dir=args.model_dir)\n', (4470, 4519), True, 'import tensorflow as tf\n'), ((4538, 4671), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': train_data}", 'y': 'train_labels', 'batch_size': '(128)', 'num_epochs': 'args.num_epochs', 'shuffle': '(True)'}), "(x={'x': train_data}, y=train_labels,\n batch_size=128, num_epochs=args.num_epochs, shuffle=True)\n", (4572, 4671), True, 'import tensorflow as tf\n'), ((4691, 4793), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': eval_data}", 'y': 'eval_labels', 'num_epochs': '(1)', 'shuffle': '(False)'}), "(x={'x': eval_data}, y=eval_labels,\n num_epochs=1, shuffle=False)\n", (4725, 4793), True, 'import tensorflow as tf\n'), ((1832, 1853), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(2)'], {}), '(2)\n', (1850, 1853), True, 'import tensorflow as tf\n'), ((1858, 1875), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (1872, 1875), True, 'import numpy as np\n'), ((1880, 1895), 'random.seed', 'random.seed', (['(12)'], {}), '(12)\n', (1891, 1895), False, 'import random\n'), ((2010, 2052), 'tensorflow.reshape', 'tf.reshape', (["features['x']", '[-1, 28, 28, 1]'], {}), "(features['x'], [-1, 28, 28, 1])\n", (2020, 2052), True, 'import tensorflow as tf\n'), ((2095, 2206), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'input_layer', 'filters': '(32)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=input_layer, filters=32, kernel_size=[5, 5],\n padding='same', activation=tf.nn.relu)\n", (2111, 2206), True, 'import tensorflow as tf\n'), ((2253, 2319), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv1', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv1, pool_size=[2, 2], strides=2)\n', (2276, 2319), True, 'import tensorflow as tf\n'), ((2383, 2489), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'pool1', 'filters': '(64)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=pool1, filters=64, kernel_size=[5, 5], padding=\n 'same', activation=tf.nn.relu)\n", (2399, 2489), True, 'import tensorflow as tf\n'), ((2511, 2577), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv2', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv2, pool_size=[2, 2], strides=2)\n', (2534, 2577), True, 'import tensorflow as tf\n'), ((2614, 2649), 'tensorflow.reshape', 'tf.reshape', (['pool2', '[-1, 7 * 7 * 64]'], {}), '(pool2, [-1, 7 * 7 * 64])\n', (2624, 2649), True, 'import tensorflow as tf\n'), ((2662, 2731), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'pool2_flat', 'units': '(1024)', 'activation': 'tf.nn.relu'}), '(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n', (2677, 2731), True, 'import tensorflow as tf\n'), ((2746, 2838), 'tensorflow.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'dense', 'rate': '(0.4)', 'training': '(mode == tf.estimator.ModeKeys.TRAIN)'}), '(inputs=dense, rate=0.4, training=mode == tf.estimator.\n ModeKeys.TRAIN)\n', (2763, 2838), True, 'import tensorflow as tf\n'), ((2881, 2922), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'dropout', 'units': '(10)'}), '(inputs=dropout, units=10)\n', (2896, 2922), True, 'import tensorflow as tf\n'), ((3427, 3495), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (3465, 3495), True, 'import tensorflow as tf\n'), ((4017, 4103), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'eval_metric_ops': 'eval_metric_ops'}), '(mode=mode, loss=loss, eval_metric_ops=\n eval_metric_ops)\n', (4043, 4103), True, 'import tensorflow as tf\n'), ((4251, 4266), 'numpy.float32', 'np.float32', (['(255)'], {}), '(255)\n', (4261, 4266), True, 'import numpy as np\n'), ((4353, 4368), 'numpy.float32', 'np.float32', (['(255)'], {}), '(255)\n', (4363, 4368), True, 'import numpy as np\n'), ((869, 888), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (886, 888), False, 'import logging\n'), ((3022, 3053), 'tensorflow.argmax', 'tf.argmax', ([], {'input': 'logits', 'axis': '(1)'}), '(input=logits, axis=1)\n', (3031, 3053), True, 'import tensorflow as tf\n'), ((3185, 3229), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""softmax_tensor"""'}), "(logits, name='softmax_tensor')\n", (3198, 3229), True, 'import tensorflow as tf\n'), ((3299, 3361), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions'}), '(mode=mode, predictions=predictions)\n', (3325, 3361), True, 'import tensorflow as tf\n'), ((3610, 3666), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'args.lr'}), '(learning_rate=args.lr)\n', (3643, 3666), True, 'import tensorflow as tf\n'), ((3771, 3838), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'train_op': 'train_op'}), '(mode=mode, loss=loss, train_op=train_op)\n', (3797, 3838), True, 'import tensorflow as tf\n'), ((3929, 3999), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'labels', 'predictions': "predictions['classes']"}), "(labels=labels, predictions=predictions['classes'])\n", (3948, 3999), True, 'import tensorflow as tf\n'), ((3728, 3754), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (3752, 3754), True, 'import tensorflow as tf\n')] |
from collections import defaultdict
import numpy as np
from dateutil import parser
from pandas import DataFrame
from peewee import *
from playhouse.db_url import connect
from config import app_config as cfg
# Connect to the database URL defined in the app_config
db = connect(cfg.database['url'])
def create_database():
db.connect()
db.drop_tables([User, Tweet], True)
db.create_tables([User, Tweet], True)
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
screen_name = CharField()
is_bot = BooleanField()
followers = IntegerField()
following = IntegerField()
def reputation(self):
if self.followers == 0:
return 0
else:
return self.followers / float(self.followers + self.following)
@classmethod
def get_sample(self, is_bot=False):
return User.select().where(User.is_bot == is_bot)
@classmethod
def followers_friends_per_users(self, users):
data = [{
"followers" : user.followers,
"following" : user.following,
"accountreputation" : user.reputation()
} for user in users]
df = DataFrame(data, columns=["followers", "following", "accountreputation", "CDFx", "CDFy"], index=range(len(users)))
df_size = len(df.index)
df["CDFx"] = np.sort(df["accountreputation"])
df["CDFy"] = np.array(range(df_size)) / float(df_size)
return df
@classmethod
def entropy(X):
probs = [np.mean(X == c) for c in set(X)]
return np.sum(-p * np.log2(p) for p in probs)
class Tweet(BaseModel):
user = ForeignKeyField(User, related_name='tweets')
text = CharField()
date = CharField()
source = CharField()
mentions = CharField()
@classmethod
def get_sample(cls, is_bot=False, min_tweets=200):
selected_users = Tweet.select(Tweet.user) \
.group_by(Tweet.user) \
.having(fn.Count(Tweet.user) >= min_tweets)
tweets = (Tweet.select(Tweet).join(User)
.where(
User.is_bot == is_bot,
User.id << selected_users
))
return tweets
@classmethod
def avg_mentions_per_user(cls, tweets):
mentions_per_user = defaultdict(lambda: [])
for tweet in tweets:
count = 0
if len(tweet.mentions) > 0:
count = len(tweet.mentions.split(","))
mentions_per_user[tweet.user_id].append(count)
avg_per_user = {user: np.mean(mentions) for (user, mentions) in mentions_per_user.iteritems()}
return avg_per_user
@classmethod
def vocabulary_size(cls, tweets):
words_per_user = defaultdict(lambda: set())
for tweet in tweets:
for word in tweet.text.split(" "):
words_per_user[tweet.user_id].add(word)
return {name: len(words) for (name, words) in words_per_user.iteritems()}
@classmethod
def tweet_density(cls, tweets):
tweets_df = DataFrame(columns=["user_id", "date"], index=range(len(tweets)))
for i, tweet in enumerate(tweets):
date = parser.parse(tweet.date)
tweets_df["date"][i] = str(date.year)+str(date.month)+str(date.day)
tweets_df["user_id"][i] = tweet.user_id
grouped = tweets_df.groupby(['user_id', 'date']).size().reset_index()
count_list_by_user = grouped[0].apply(lambda x: x if (x < 6) else 6).tolist()
mean_count = np.mean(count_list_by_user)
median_count = np.median(count_list_by_user)
return count_list_by_user, mean_count, median_count
@classmethod
def tweet_weekday(cls, tweets):
tweets_df = DataFrame(columns=["user_id", "weekday"], index=range(len(tweets)))
for i, tweet in enumerate(tweets):
tweets_df["weekday"][i] = str(tweet.date.split(' ')[0])
tweets_df["user_id"][i] = tweet.user_id
grouped = tweets_df.groupby(['user_id', 'weekday']).size().reset_index()
list_days = set(grouped["weekday"])
stats_weekdays = DataFrame(columns=["weekday", "mean","std"], index=range(len(list_days)))
stats_weekdays["weekday"] = list_days
stats_weekdays["mean"] = list(map(lambda day : np.mean(grouped[0][grouped["weekday"] == day]),list_days))
stats_weekdays["std"] = list(map(lambda day : np.std(grouped[0][grouped["weekday"] == day]),list_days))
prop_weekdays = DataFrame(columns=["weekday", "prop","std"], index=range(len(list_days)))
prop_weekdays["weekday"] = list_days
prop_weekdays['prop'] = stats_weekdays['mean'] / sum(stats_weekdays['mean'])
prop_weekdays['std'] = stats_weekdays['std'] / sum(stats_weekdays['mean'])
sorted_weekdays = prop_weekdays.reindex([4,3,0,2,5,6,1])
return sorted_weekdays
@classmethod
def top_sources(cls, tweets):
sources = [{"source": tweet.source} for tweet in tweets]
return DataFrame(sources).stack().value_counts()
| [
"numpy.mean",
"dateutil.parser.parse",
"numpy.median",
"playhouse.db_url.connect",
"numpy.sort",
"collections.defaultdict",
"numpy.std",
"pandas.DataFrame",
"numpy.log2"
] | [((270, 298), 'playhouse.db_url.connect', 'connect', (["cfg.database['url']"], {}), "(cfg.database['url'])\n", (277, 298), False, 'from playhouse.db_url import connect\n'), ((1349, 1381), 'numpy.sort', 'np.sort', (["df['accountreputation']"], {}), "(df['accountreputation'])\n", (1356, 1381), True, 'import numpy as np\n'), ((2270, 2294), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (2281, 2294), False, 'from collections import defaultdict\n'), ((3501, 3528), 'numpy.mean', 'np.mean', (['count_list_by_user'], {}), '(count_list_by_user)\n', (3508, 3528), True, 'import numpy as np\n'), ((3552, 3581), 'numpy.median', 'np.median', (['count_list_by_user'], {}), '(count_list_by_user)\n', (3561, 3581), True, 'import numpy as np\n'), ((1519, 1534), 'numpy.mean', 'np.mean', (['(X == c)'], {}), '(X == c)\n', (1526, 1534), True, 'import numpy as np\n'), ((2530, 2547), 'numpy.mean', 'np.mean', (['mentions'], {}), '(mentions)\n', (2537, 2547), True, 'import numpy as np\n'), ((3156, 3180), 'dateutil.parser.parse', 'parser.parse', (['tweet.date'], {}), '(tweet.date)\n', (3168, 3180), False, 'from dateutil import parser\n'), ((1580, 1590), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (1587, 1590), True, 'import numpy as np\n'), ((4276, 4322), 'numpy.mean', 'np.mean', (["grouped[0][grouped['weekday'] == day]"], {}), "(grouped[0][grouped['weekday'] == day])\n", (4283, 4322), True, 'import numpy as np\n'), ((4389, 4434), 'numpy.std', 'np.std', (["grouped[0][grouped['weekday'] == day]"], {}), "(grouped[0][grouped['weekday'] == day])\n", (4395, 4434), True, 'import numpy as np\n'), ((4989, 5007), 'pandas.DataFrame', 'DataFrame', (['sources'], {}), '(sources)\n', (4998, 5007), False, 'from pandas import DataFrame\n')] |
"""
GameModels.py
Reinforcement Learning Agent object.
Imported into app.py
author: @justjoshtings
created: 3/16/2022
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models, backend
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Convolution2D
from tensorflow.keras.optimizers import Adam
import numpy as np
from copy import deepcopy
from rl.agents import DQNAgent
from rl.memory import SequentialMemory
from rl.policy import LinearAnnealedPolicy, EpsGreedyQPolicy
from rl.callbacks import ModelIntervalCheckpoint
from rl.core import Processor
from ImageProcessor import AtariProcessor
import gym
import ale_py
# Random Seed
random_seed = 42
# Set random seed in tensorflow
tf.random.set_seed(random_seed)
# Set random seed in numpy
np.random.seed(random_seed)
class DQNAgentService:
'''
Object to wrap DQN keras rl model object with gym atari environment.
'''
def __init__(self, model_height, model_width, env_name, window_length, model_name, model_channels=0):
'''
Params:
self: instance of object
model_height (int) : height of game state images to input into DQN model, 105
model_width (int) : width of game state images to input into DQN model, 105
env_game (str) : env name of Atari game to load, 'ALE/MsPacman-v5']
window_length (int) : number of image game state stacks per input, 4
model_name (str) : name of model
model_channels (int) : 3 if want to use RGB, 0 if grayscale
'''
self.model_height = model_height
self.model_width = model_width
self.env_name = env_name
self.window_length = window_length
self.model_name = model_name
self.model_channels = model_channels
self.env = gym.make(self.env_name)
self.env_height, self.env_width, self.env_channels = self.env.observation_space.shape
self.actions = self.env.action_space.n
def build_model(self):
'''
Method to build model
Params:
self: instance of object
Returns:
self.model: keras model instance
'''
backend.clear_session()
if self.model_channels > 0:
self.input_shape=(self.window_length, self.model_height, self.model_width, self.model_channels)
else:
self.input_shape=(self.window_length, self.model_height, self.model_width)
inputs = layers.Input(shape = self.input_shape)
conv1 = layers.Conv2D(32, (8, 8), strides=(4,4), activation='relu', padding='same', name='conv1')(inputs)
conv2 = layers.Conv2D(64, (4, 4), strides=(2,2), activation='relu', padding='same', name='conv2')(conv1)
conv3 = layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='conv3')(conv2)
flatten = layers.Flatten()(conv3)
dense1 = layers.Dense(512, activation='relu')(flatten)
dense2 = layers.Dense(256, activation='relu')(dense1)
final_layer = layers.Dense(self.actions, activation='linear')(dense2)
self.model = models.Model(inputs=inputs, outputs=final_layer, name=self.model_name)
return self.model
def build_agent(self, policy_value_max, policy_value_min, policy_value_test, policy_nb_steps, enable_double_dqn, enable_dueling_network, dueling_type, nb_steps_warmup, replay_memory=None):
'''
Method to build agent
Params:
self: instance of object
policy_value_max (float) : between 0 and 1 representing max value of epsilon in epsilon-greedy linear annealing
policy_value_min (float) : between 0 and 1 representing min value of epsilon in epsilon-greedy linear annealing
policy_value_test (float) : between 0 and 1 representing value of epsilon during testing
policy_nb_steps (int) : number of steps from start of training to decrease epsilon before setting epsilon to policy_value_min
enable_double_dqn (Boolean) : enable double dqn
enable_dueling_network (Boolean) : enable dueling dqn
dueling_type (str) : 'avg'
nb_steps_warmup (int) : number of steps to warmup before training
replay_memory (SequentialMemory): if not None, replay memory to load into DQNAgent() for continuiation of training. Otherwise, create new SequentialMemory.
Returns:
dqn: dqn model
'''
# Policy hyperparameters
self.policy_value_max = policy_value_max
self.policy_value_min = policy_value_min
self.policy_value_test = policy_value_test
self.policy_nb_steps = policy_nb_steps
# Agent hyperparameters
self.enable_double_dqn = enable_double_dqn
self.enable_dueling_network = enable_dueling_network
self.dueling_type = dueling_type
self.nb_steps_warmup = nb_steps_warmup
self.policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1., value_min=.1, value_test=.1, nb_steps=8000) # For 1 million total steps, I think having the policy nb_steps around 600k is a good slope.
self.processor = AtariProcessor((self.model_height,self.model_width))
# If we want to load from saved memory
if replay_memory:
self.memory = replay_memory
else:
self.memory = SequentialMemory(limit=10000, window_length=self.window_length)
self.dqn = DQNAgent(model=self.model, memory=self.memory, policy=self.policy,
enable_double_dqn=False,
enable_dueling_network=True, dueling_type='avg',
processor=self.processor,
nb_actions=self.actions, nb_steps_warmup=2500 #nb_steps_warmup reduces instability of first few steps https://datascience.stackexchange.com/questions/46056/in-keras-library-what-is-the-meaning-of-nb-steps-warmup-in-the-dqnagent-objec
)
return self.dqn
def load_weights(self, model_path):
'''
Method to load weights
Params:
self: instance of object
model_path (str) : path of saved model weights
'''
self.dqn.compile(Adam(learning_rate=1e-4))
try:
self.dqn.load_weights(model_path)
except OSError:
print("Weights file {} not found".format(model_path))
def get_model(self):
'''
Method to return dqn model
'''
return self.dqn
def play(self, n_episodes, render_mode='human'):
'''
Method to play game with model iteratively (one step of env at a time)
Params:
self: instance of object
render_mode (str) : mode to render gameplay in ['human', 'rgb', None]
'''
self.env = gym.make(self.env_name, render_mode=render_mode) #render_mode = ('human','rgb',None)
self.dqn.training = False
action_repetition = 1
for i in range(n_episodes):
episode_reward = 0.
observation = deepcopy(self.env.reset())
if self.dqn.processor is not None:
observation = self.dqn.processor.process_observation(observation)
assert observation is not None
action = 0
done = False
# Run the episode until we're done.
while not done:
action = self.dqn.forward(observation)
if self.dqn.processor is not None:
action = self.dqn.processor.process_action(action)
reward = 0.
for _ in range(action_repetition):
observation, r, d, info = self.env.step(action)
observation = deepcopy(observation)
if self.dqn.processor is not None:
observation, r, d, info = self.dqn.processor.process_step(
observation, r, d, info)
reward += r
if d:
done = True
break
self.dqn.backward(reward, terminal=done)
episode_reward += reward
self.dqn.step += 1
def play_gen(self, n_episodes=1, render_mode=None):
'''
Method to play game with model iteratively, yielding observation, action after each iteration of game step.
Acts as a generator.
Params:
self: instance of object
n_episodes (int) : number of episodes to play
render_mode (str) : mode to render gameplay in ['human', 'rgb', None]
'''
self.env = gym.make(self.env_name, render_mode=render_mode) #render_mode = ('human','rgb',None)
self.dqn.training = False
action_repetition = 1
for i in range(n_episodes):
episode_reward = 0.
observation = deepcopy(self.env.reset())
observation_deprocessed = deepcopy(observation)
if self.dqn.processor is not None:
observation = self.dqn.processor.process_observation(observation)
assert observation is not None
action = 0
done = False
# Setup sequence, Ms-Pac Man controller state should display 0
prior_lives = 100000
frame_count = 0
frames_between_lives = 30
setup_sequence = True
# Run the episode until we're done.
while not done:
yield (observation, observation_deprocessed, action, done)
action = self.dqn.forward(observation)
if self.dqn.processor is not None:
action = self.dqn.processor.process_action(action)
reward = 0.
for _ in range(action_repetition):
observation, r, d, info = self.env.step(action)
observation = deepcopy(observation)
observation_deprocessed = deepcopy(observation)
if self.dqn.processor is not None:
observation, r, d, info = self.dqn.processor.process_step(
observation, r, d, info)
# Setup sequence, Ms-Pac Man controller state should display 0
lives = info['lives']
ep_frame_number = info['episode_frame_number']
if lives < prior_lives or ep_frame_number <= 1*self.window_length:
setup_sequence = True
initial_frame_number = deepcopy(ep_frame_number)
if setup_sequence == True:
action = 0
frame_count += 1
if (frame_count*self.window_length)+initial_frame_number >= (frames_between_lives*self.window_length)+initial_frame_number:
setup_sequence = False
frame_count = 0
if lives == 0:
action = 0
prior_lives = deepcopy(lives)
reward += r
if d:
action = 0
done = True
break
self.dqn.backward(reward, terminal=done)
episode_reward += reward
self.dqn.step += 1
if __name__ == "__main__":
print('Executing test play', __name__)
def load_model(model_path, env_name):
'''
Method to load model
Params:
model_path (str) : path to model weights file
env_name (str) : name of game environment
'''
# Model parameters
window_length = 4
input_shape = (105, 105)
ms_pacman_model = DQNAgentService(model_height=input_shape[0], model_width=input_shape[1], env_name=env_name, window_length=window_length, model_name='Final_Model', model_channels=0)
ms_pacman_model.build_model()
ms_pacman_model.build_agent(policy_value_max=1., policy_value_min=.1, policy_value_test=.1, policy_nb_steps=8000,
enable_double_dqn=False, enable_dueling_network=True, dueling_type='avg', nb_steps_warmup=2500)
ms_pacman_model.load_weights(model_path)
return window_length, input_shape, ms_pacman_model
path = './models/Dueling_DQN_Round2_weights_final_steps15000.h5f'
window_length, input_shape, ms_pacman_model = load_model(path, 'ALE/MsPacman-v5')
ms_pacman_model.play(2)
# for ob, action, done in ms_pacman_model.play_gen():
# print(ob,action,done)
# if done:
# break
else:
print('Importing', __name__)
| [
"tensorflow.keras.layers.Input",
"copy.deepcopy",
"tensorflow.random.set_seed",
"tensorflow.keras.layers.Conv2D",
"rl.memory.SequentialMemory",
"tensorflow.keras.optimizers.Adam",
"ImageProcessor.AtariProcessor",
"tensorflow.keras.layers.Dense",
"rl.agents.DQNAgent",
"numpy.random.seed",
"rl.pol... | [((777, 808), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['random_seed'], {}), '(random_seed)\n', (795, 808), True, 'import tensorflow as tf\n'), ((836, 863), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (850, 863), True, 'import numpy as np\n'), ((1874, 1897), 'gym.make', 'gym.make', (['self.env_name'], {}), '(self.env_name)\n', (1882, 1897), False, 'import gym\n'), ((2246, 2269), 'tensorflow.keras.backend.clear_session', 'backend.clear_session', ([], {}), '()\n', (2267, 2269), False, 'from tensorflow.keras import layers, models, backend\n'), ((2534, 2570), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'self.input_shape'}), '(shape=self.input_shape)\n', (2546, 2570), False, 'from tensorflow.keras import layers, models, backend\n'), ((3168, 3238), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': 'inputs', 'outputs': 'final_layer', 'name': 'self.model_name'}), '(inputs=inputs, outputs=final_layer, name=self.model_name)\n', (3180, 3238), False, 'from tensorflow.keras import layers, models, backend\n'), ((5225, 5278), 'ImageProcessor.AtariProcessor', 'AtariProcessor', (['(self.model_height, self.model_width)'], {}), '((self.model_height, self.model_width))\n', (5239, 5278), False, 'from ImageProcessor import AtariProcessor\n'), ((5527, 5753), 'rl.agents.DQNAgent', 'DQNAgent', ([], {'model': 'self.model', 'memory': 'self.memory', 'policy': 'self.policy', 'enable_double_dqn': '(False)', 'enable_dueling_network': '(True)', 'dueling_type': '"""avg"""', 'processor': 'self.processor', 'nb_actions': 'self.actions', 'nb_steps_warmup': '(2500)'}), "(model=self.model, memory=self.memory, policy=self.policy,\n enable_double_dqn=False, enable_dueling_network=True, dueling_type=\n 'avg', processor=self.processor, nb_actions=self.actions,\n nb_steps_warmup=2500)\n", (5535, 5753), False, 'from rl.agents import DQNAgent\n'), ((6908, 6956), 'gym.make', 'gym.make', (['self.env_name'], {'render_mode': 'render_mode'}), '(self.env_name, render_mode=render_mode)\n', (6916, 6956), False, 'import gym\n'), ((8762, 8810), 'gym.make', 'gym.make', (['self.env_name'], {'render_mode': 'render_mode'}), '(self.env_name, render_mode=render_mode)\n', (8770, 8810), False, 'import gym\n'), ((2590, 2684), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(8, 8)'], {'strides': '(4, 4)', 'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""conv1"""'}), "(32, (8, 8), strides=(4, 4), activation='relu', padding='same',\n name='conv1')\n", (2603, 2684), False, 'from tensorflow.keras import layers, models, backend\n'), ((2704, 2798), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""conv2"""'}), "(64, (4, 4), strides=(2, 2), activation='relu', padding='same',\n name='conv2')\n", (2717, 2798), False, 'from tensorflow.keras import layers, models, backend\n'), ((2817, 2891), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""conv3"""'}), "(64, (3, 3), activation='relu', padding='same', name='conv3')\n", (2830, 2891), False, 'from tensorflow.keras import layers, models, backend\n'), ((2918, 2934), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2932, 2934), False, 'from tensorflow.keras import layers, models, backend\n'), ((2960, 2996), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (2972, 2996), False, 'from tensorflow.keras import layers, models, backend\n'), ((3023, 3059), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (3035, 3059), False, 'from tensorflow.keras import layers, models, backend\n'), ((3090, 3137), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.actions'], {'activation': '"""linear"""'}), "(self.actions, activation='linear')\n", (3102, 3137), False, 'from tensorflow.keras import layers, models, backend\n'), ((5017, 5035), 'rl.policy.EpsGreedyQPolicy', 'EpsGreedyQPolicy', ([], {}), '()\n', (5033, 5035), False, 'from rl.policy import LinearAnnealedPolicy, EpsGreedyQPolicy\n'), ((5431, 5494), 'rl.memory.SequentialMemory', 'SequentialMemory', ([], {'limit': '(10000)', 'window_length': 'self.window_length'}), '(limit=10000, window_length=self.window_length)\n', (5447, 5494), False, 'from rl.memory import SequentialMemory\n'), ((6308, 6334), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (6312, 6334), False, 'from tensorflow.keras.optimizers import Adam\n'), ((9072, 9093), 'copy.deepcopy', 'deepcopy', (['observation'], {}), '(observation)\n', (9080, 9093), False, 'from copy import deepcopy\n'), ((7835, 7856), 'copy.deepcopy', 'deepcopy', (['observation'], {}), '(observation)\n', (7843, 7856), False, 'from copy import deepcopy\n'), ((10035, 10056), 'copy.deepcopy', 'deepcopy', (['observation'], {}), '(observation)\n', (10043, 10056), False, 'from copy import deepcopy\n'), ((10103, 10124), 'copy.deepcopy', 'deepcopy', (['observation'], {}), '(observation)\n', (10111, 10124), False, 'from copy import deepcopy\n'), ((11193, 11208), 'copy.deepcopy', 'deepcopy', (['lives'], {}), '(lives)\n', (11201, 11208), False, 'from copy import deepcopy\n'), ((10709, 10734), 'copy.deepcopy', 'deepcopy', (['ep_frame_number'], {}), '(ep_frame_number)\n', (10717, 10734), False, 'from copy import deepcopy\n')] |
import torch.utils.data as data
import os
import os.path
import numpy as np
from numpy.random import randint
import torch
from colorama import init
from colorama import Fore, Back, Style
init(autoreset=True)
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class TSNDataSet(data.Dataset):
def __init__(self, root_path, list_file, num_dataload,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.t7', transform=None,
force_grayscale=False, random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.num_dataload = num_dataload
if self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
self.new_length += 1 # Diff needs one more image to calculate diff
self._parse_list() # read all the video files
def _load_feature(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
feat_path = os.path.join(directory, self.image_tmpl.format(idx))
try:
feat = [torch.load(feat_path)]
except:
print(Back.RED + feat_path)
return feat
elif self.modality == 'Flow':
x_feat = torch.load(os.path.join(directory, self.image_tmpl.format('x', idx)))
y_feat = torch.load(os.path.join(directory, self.image_tmpl.format('y', idx)))
return [x_feat, y_feat]
def _parse_list(self):
self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]
# repeat the list if the length is less than num_dataload (especially for target data)
n_repeat = self.num_dataload//len(self.video_list)
n_left = self.num_dataload%len(self.video_list)
self.video_list = self.video_list*n_repeat + self.video_list[:n_left]
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
#np.random.seed(1)
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_test_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = np.ones(self.num_segments-num_select,dtype=int)*id_select[id_select[0]-1]
offsets = np.append(id_select, id_expand)
return offsets + 1
def __getitem__(self, index):
record = self.video_list[index]
if not self.test_mode:
segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
frames = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
seg_feats = self._load_feature(record.path, p)
frames.extend(seg_feats)
if p < record.num_frames:
p += 1
# process_data = self.transform(frames)
process_data = torch.stack(frames)
return process_data, record._data
def __len__(self):
return len(self.video_list)
| [
"numpy.ones",
"torch.load",
"torch.stack",
"numpy.append",
"numpy.random.randint",
"numpy.zeros",
"colorama.init"
] | [((190, 210), 'colorama.init', 'init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (194, 210), False, 'from colorama import init\n'), ((5166, 5185), 'torch.stack', 'torch.stack', (['frames'], {}), '(frames)\n', (5177, 5185), False, 'import torch\n'), ((3540, 3570), 'numpy.zeros', 'np.zeros', (['(self.num_segments,)'], {}), '((self.num_segments,))\n', (3548, 3570), True, 'import numpy as np\n'), ((4344, 4375), 'numpy.append', 'np.append', (['id_select', 'id_expand'], {}), '(id_select, id_expand)\n', (4353, 4375), True, 'import numpy as np\n'), ((2836, 2885), 'numpy.random.randint', 'randint', (['average_duration'], {'size': 'self.num_segments'}), '(average_duration, size=self.num_segments)\n', (2843, 2885), False, 'from numpy.random import randint\n'), ((3078, 3108), 'numpy.zeros', 'np.zeros', (['(self.num_segments,)'], {}), '((self.num_segments,))\n', (3086, 3108), True, 'import numpy as np\n'), ((4248, 4298), 'numpy.ones', 'np.ones', (['(self.num_segments - num_select)'], {'dtype': 'int'}), '(self.num_segments - num_select, dtype=int)\n', (4255, 4298), True, 'import numpy as np\n'), ((1700, 1721), 'torch.load', 'torch.load', (['feat_path'], {}), '(feat_path)\n', (1710, 1721), False, 'import torch\n'), ((2968, 3040), 'numpy.random.randint', 'randint', (['(record.num_frames - self.new_length + 1)'], {'size': 'self.num_segments'}), '(record.num_frames - self.new_length + 1, size=self.num_segments)\n', (2975, 3040), False, 'from numpy.random import randint\n')] |
# coding: utf-8
# # Preamble
# In[3]:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
import seaborn as sns
sns.set(style="white")
# Allows for interactive shell - outputs all non variable statements
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import numpy as np
np.set_printoptions(precision=4, linewidth=100)
from matplotlib import pyplot as plt
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input, decode_predictions
import numpy as np
model = VGG16(weights='imagenet', include_top=True)
# In[2]:
import os
import shutil
from glob import glob
np.random.seed(10)
current_dir = os.getcwd()
DATASET_DIR=os.path.join(current_dir, 'dataset')
CROSSVALID_DIR=os.path.join(DATASET_DIR, 'cross_valid')
TRAIN_DIR = os.path.join(DATASET_DIR, 'train')
TEST_DIR = os.path.join(DATASET_DIR, 'test')
CROSSVALID_DIR = os.path.join(DATASET_DIR, 'cross_valid')
SAMPLE_DIR = os.path.join(DATASET_DIR, 'sample')
WEIGHTS_DIR = os.path.join(current_dir, 'weights')
# # Use Keras Vgg16 to get the predictions
# * Download the dataset in the current directory.
# ```
# kg download -c 'dogs-vs-cats-redux-kernels-edition'
# ```
# * Inspect the data
# * Prepare a single image
# * Feed it into pretrained vgg16
# ## Inspect the data
#
# Graph the image
# In[6]:
# Unzip a single file to test on the pretrained model
#!unzip -oj "test.zip" "test/1.jpg" -d "/tmp/cats_dogs"
# Load the image
# img_path = '/tmp/cats_dogs/1.jpg'
img_path = 'dataset/train/cat/cat.1.jpg'
img = image.load_img(img_path, target_size=(224, 224))
# Plot the single image
f = plt.figure(figsize=(10, 5))
sp = f.add_subplot(1, 1, 1) ## (rows, cols, index)
sp.axis('On')
sp.set_title(img_path, fontsize=16)
plt.imshow(img)
# ## Predict using Keras Vgg16
# In[ ]:
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
preds = model.predict(x)
decode_predictions(preds)
# In[ ]:
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
decode_predictions(preds)
# # Kaggle Competition
# 1. Prepare dataset
# 1. Download the dataset
# 1. Unzip training and test dataset
# 1. Create the training, validation, sample batch dataset
# 1. Create the labels
# 1. Model preparation
# 1. Finetune the keras model
# 1. Pop the last layer, freeze all layers, add a softmax layer and update set of classes
# 1. Fit the keras model
# 1. Train the updated keras model
# 1. Save and load the model after couple of epochs
# 1. Perform predictions
# 1. Debug
# 1. View the confusion matrix
# 1. Visual Inspection
# 1. Inspect correct labels
# 1. Inspect incorrect labels
# 1. Inspect correct labels with high probability
# 1. Inspect incorrect label with high probability
# 1. Inspect correct labels with medium probability
# 1. Kaggle Submission
# 1. Prepare csv file
# 1. Submit
# ## Prepare dataset
# See `lesson1_catsdogs-prepare_dataset.ipynb` which will download and create the various labeled datasets.
# ## Perform predictions
# In[ ]:
model.load_weights(os.path.join(WEIGHTS_DIR, 'intial_sample_run_2.h5'))
def get_data_as_np(path, batch_size=5):
batches = datagen.flow_from_directory(
path,
target_size=(224, 224),
batch_size=10,
class_mode=None,
shuffle=False
)
return np.concatenate([batches.next() for i in range(len(batches))])
model.predict(get_data_as_np(crossvalid_dir, 5), batch_size=5)
# In[ ]:
test_batches = self.get_batches(path, shuffle=False, batch_size=batch_size, class_mode=None)
test_batches, self.model.predict_generator(test_batches, test_batches.nb_sample)
preds[1:4]
preds.shape
| [
"matplotlib.pyplot.imshow",
"keras.preprocessing.image.img_to_array",
"seaborn.set",
"keras.applications.vgg16.VGG16",
"os.path.join",
"keras.preprocessing.image.load_img",
"os.getcwd",
"matplotlib.pyplot.figure",
"keras.applications.vgg16.preprocess_input",
"numpy.random.seed",
"numpy.expand_di... | [((200, 222), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (207, 222), True, 'import seaborn as sns\n'), ((420, 467), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'linewidth': '(100)'}), '(precision=4, linewidth=100)\n', (439, 467), True, 'import numpy as np\n'), ((690, 733), 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (695, 733), False, 'from keras.applications.vgg16 import VGG16\n'), ((793, 811), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (807, 811), True, 'import numpy as np\n'), ((827, 838), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (836, 838), False, 'import os\n'), ((851, 887), 'os.path.join', 'os.path.join', (['current_dir', '"""dataset"""'], {}), "(current_dir, 'dataset')\n", (863, 887), False, 'import os\n'), ((903, 943), 'os.path.join', 'os.path.join', (['DATASET_DIR', '"""cross_valid"""'], {}), "(DATASET_DIR, 'cross_valid')\n", (915, 943), False, 'import os\n'), ((956, 990), 'os.path.join', 'os.path.join', (['DATASET_DIR', '"""train"""'], {}), "(DATASET_DIR, 'train')\n", (968, 990), False, 'import os\n'), ((1002, 1035), 'os.path.join', 'os.path.join', (['DATASET_DIR', '"""test"""'], {}), "(DATASET_DIR, 'test')\n", (1014, 1035), False, 'import os\n'), ((1053, 1093), 'os.path.join', 'os.path.join', (['DATASET_DIR', '"""cross_valid"""'], {}), "(DATASET_DIR, 'cross_valid')\n", (1065, 1093), False, 'import os\n'), ((1107, 1142), 'os.path.join', 'os.path.join', (['DATASET_DIR', '"""sample"""'], {}), "(DATASET_DIR, 'sample')\n", (1119, 1142), False, 'import os\n'), ((1158, 1194), 'os.path.join', 'os.path.join', (['current_dir', '"""weights"""'], {}), "(current_dir, 'weights')\n", (1170, 1194), False, 'import os\n'), ((1707, 1755), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (1721, 1755), False, 'from keras.preprocessing import image\n'), ((1785, 1812), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1795, 1812), True, 'from matplotlib import pyplot as plt\n'), ((1914, 1929), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1924, 1929), True, 'from matplotlib import pyplot as plt\n'), ((1979, 2002), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1997, 2002), False, 'from keras.preprocessing import image\n'), ((2007, 2032), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2021, 2032), True, 'import numpy as np\n'), ((2058, 2083), 'keras.applications.vgg16.decode_predictions', 'decode_predictions', (['preds'], {}), '(preds)\n', (2076, 2083), False, 'from keras.applications.vgg16 import preprocess_input, decode_predictions\n'), ((2101, 2124), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (2119, 2124), False, 'from keras.preprocessing import image\n'), ((2129, 2154), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2143, 2154), True, 'import numpy as np\n'), ((2159, 2178), 'keras.applications.vgg16.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (2175, 2178), False, 'from keras.applications.vgg16 import preprocess_input, decode_predictions\n'), ((2204, 2229), 'keras.applications.vgg16.decode_predictions', 'decode_predictions', (['preds'], {}), '(preds)\n', (2222, 2229), False, 'from keras.applications.vgg16 import preprocess_input, decode_predictions\n'), ((3314, 3365), 'os.path.join', 'os.path.join', (['WEIGHTS_DIR', '"""intial_sample_run_2.h5"""'], {}), "(WEIGHTS_DIR, 'intial_sample_run_2.h5')\n", (3326, 3365), False, 'import os\n')] |
"""
This module implements the G0W0 approximation on top of `pyscf.tdscf.rhf_slow` TDHF implementation. Unlike `gw.py`, all
integrals are stored in memory. Several variants of GW are available:
* `pyscf.gw_slow`: the molecular implementation;
* `pyscf.pbc.gw.gw_slow`: single-kpoint PBC (periodic boundary condition) implementation;
* `pyscf.pbc.gw.kgw_slow_supercell`: a supercell approach to PBC implementation with multiple k-points. Runs the
molecular code for a model with several k-points for the cost of discarding momentum conservation and using dense
instead of sparse matrixes;
* (this module) `pyscf.pbc.gw.kgw_slow`: a PBC implementation with multiple k-points;
"""
from pyscf.gw import gw_slow
from pyscf.pbc.gw import kgw_slow_supercell
from pyscf.lib import einsum, direct_sum
from pyscf.pbc.tdscf.krhf_slow import get_block_k_ix
import numpy
# Convention for these modules:
# * IMDS contains routines for intermediates
# * kernel finds GW roots
# * GW provides a container
class IMDS(kgw_slow_supercell.IMDS):
def __init__(self, td, eri=None):
"""
GW intermediates (k-version).
Args:
Args:
td: a container with TD solution;
eri: a container with electron repulsion integrals;
"""
gw_slow.AbstractIMDS.__init__(self, td, eri=eri)
self.nk = len(self.td._scf.mo_energy)
# MF
self.nocc = self.eri.nocc
self.o = tuple(e[:nocc] for e, nocc in zip(self.eri.mo_energy, self.eri.nocc))
self.v = tuple(e[nocc:] for e, nocc in zip(self.eri.mo_energy, self.eri.nocc))
# TD
self.td_xy = self.td.xy
self.td_e = self.td.e
self.tdm = self.construct_tdm()
def eri_ov(self, item):
item, k1, k2, k3, k4 = item
return self.eri.eri_ov(item, (k1, k2, k3, k4)) / self.nk
__getitem__ = eri_ov
def construct_tdm(self):
# Indexes of td_x:
# - k_transfer
# - k
# - o: k
# - v: fw[k]
# Indexes of td_y:
# - k_transfer
# - k
# - o: k
# - v: bw[k]
# Original code:
# tdm_oo = einsum('vxia,ipaq->vxpq', td_xy, self["oovo"])
# tdm_ov = einsum('vxia,ipaq->vxpq', td_xy, self["oovv"])
# tdm_vv = einsum('vxia,ipaq->vxpq', td_xy, self["ovvv"])
# ERI k:
# ki, kp, ka=?, kq
# Now fw[kp] = kq, bw[kq] = kp -> bw[ki] = ka, fw[ka] = ki
# for x amplitudes, the transfer is bw[0] such that ov -> k, bw[k]
# for y amplitudes, the transfer is also fw[0] such that ov -> k, bw[k]
result = []
for k_transfer in range(self.nk):
xy_k = self.td_xy[k_transfer]
fw, bw, _, _ = get_block_k_ix(self.eri, k_transfer)
result.append([[], []])
for xy_kx, ix_fw, ix_bw, storage in (
(xy_k[:, 0], fw, bw, result[k_transfer][0]), # X
(xy_k[:, 1], bw, fw, result[k_transfer][1]), # Y
):
for kp in range(self.nk):
tdm_oo = tdm_ov = tdm_vv = 0
tdm_vo = 0
for ki in range(self.nk):
x = xy_kx[:, ki] * 2
tdm_oo = tdm_oo + einsum('via,ipaq->vpq', x, self["oovo", ki, kp, ix_fw[ki], ix_bw[kp]])
tdm_ov = tdm_ov + einsum('via,ipaq->vpq', x, self["oovv", ki, kp, ix_fw[ki], ix_bw[kp]])
tdm_vv = tdm_vv + einsum('via,ipaq->vpq', x, self["ovvv", ki, kp, ix_fw[ki], ix_bw[kp]])
tdm_vo = tdm_vo + einsum('via,ipaq->vpq', x, self["ovvo", ki, kp, ix_fw[ki], ix_bw[kp]])
tdm = numpy.concatenate(
(
numpy.concatenate((tdm_oo, tdm_ov), axis=2),
numpy.concatenate((tdm_vo, tdm_vv), axis=2)
),
axis=1,
)
storage.append(tdm)
# The output is the following:
# for each kp, kq pair two 3-tensors are given
# The last two indexes in each tensor correspond to kp, kq
# Given fw, bw, _, _ = get_block_k_ix(self.eri, (kp, kq)),
# The first index of the two tensors will correspond to tdhf.e[bw[0]], tdhf.e[fw[0]] correspondingly
return numpy.array(result)
def get_sigma_element(self, omega, p, eta, vir_sgn=1):
k, p = p
# Molecular implementation
# ------------------------
# tdm = self.tdm.sum(axis=1)
# evi = direct_sum('v-i->vi', self.td_e, self.o)
# eva = direct_sum('v+a->va', self.td_e, self.v)
# sigma = numpy.sum(tdm[:, :self.nocc, p] ** 2 / (omega + evi - 1j * eta))
# sigma += numpy.sum(tdm[:, self.nocc:, p] ** 2 / (omega - eva + vir_sgn * 1j * eta))
# return sigma
sigma = 0
for k_transfer, tdm_k in enumerate(self.tdm):
fw, bw, _, _ = get_block_k_ix(self.eri, k_transfer)
same = fw == bw
different = numpy.logical_not(same)
terms = []
if same.sum() > 0:
terms.append((tdm_k[0] + tdm_k[1], fw[same], fw[same]))
if different.sum() > 0:
terms.append((tdm_k[0], bw[different], fw[different]))
terms.append((tdm_k[1], fw[different], bw[different]))
for tdm_kx, ix_fw, ix_bw in terms:
k1, k2 = ix_bw[k], k
evi = direct_sum('v-i->vi', self.td_e[k_transfer], self.o[k1])
eva = direct_sum('v+a->va', self.td_e[k_transfer], self.v[k1])
sigma += numpy.sum(tdm_kx[k1][:, :self.nocc[k1], p] ** 2 / (omega + evi - 1j * eta))
sigma += numpy.sum(tdm_kx[k1][:, self.nocc[k1]:, p] ** 2 / (omega - eva + vir_sgn * 1j * eta))
return sigma
kernel = gw_slow.kernel
class GW(gw_slow.GW):
base_imds = IMDS
| [
"pyscf.gw.gw_slow.AbstractIMDS.__init__",
"pyscf.pbc.tdscf.krhf_slow.get_block_k_ix",
"numpy.logical_not",
"numpy.array",
"numpy.sum",
"numpy.concatenate",
"pyscf.lib.einsum",
"pyscf.lib.direct_sum"
] | [((1290, 1338), 'pyscf.gw.gw_slow.AbstractIMDS.__init__', 'gw_slow.AbstractIMDS.__init__', (['self', 'td'], {'eri': 'eri'}), '(self, td, eri=eri)\n', (1319, 1338), False, 'from pyscf.gw import gw_slow\n'), ((4375, 4394), 'numpy.array', 'numpy.array', (['result'], {}), '(result)\n', (4386, 4394), False, 'import numpy\n'), ((2743, 2779), 'pyscf.pbc.tdscf.krhf_slow.get_block_k_ix', 'get_block_k_ix', (['self.eri', 'k_transfer'], {}), '(self.eri, k_transfer)\n', (2757, 2779), False, 'from pyscf.pbc.tdscf.krhf_slow import get_block_k_ix\n'), ((4994, 5030), 'pyscf.pbc.tdscf.krhf_slow.get_block_k_ix', 'get_block_k_ix', (['self.eri', 'k_transfer'], {}), '(self.eri, k_transfer)\n', (5008, 5030), False, 'from pyscf.pbc.tdscf.krhf_slow import get_block_k_ix\n'), ((5084, 5107), 'numpy.logical_not', 'numpy.logical_not', (['same'], {}), '(same)\n', (5101, 5107), False, 'import numpy\n'), ((5522, 5578), 'pyscf.lib.direct_sum', 'direct_sum', (['"""v-i->vi"""', 'self.td_e[k_transfer]', 'self.o[k1]'], {}), "('v-i->vi', self.td_e[k_transfer], self.o[k1])\n", (5532, 5578), False, 'from pyscf.lib import einsum, direct_sum\n'), ((5601, 5657), 'pyscf.lib.direct_sum', 'direct_sum', (['"""v+a->va"""', 'self.td_e[k_transfer]', 'self.v[k1]'], {}), "('v+a->va', self.td_e[k_transfer], self.v[k1])\n", (5611, 5657), False, 'from pyscf.lib import einsum, direct_sum\n'), ((5683, 5760), 'numpy.sum', 'numpy.sum', (['(tdm_kx[k1][:, :self.nocc[k1], p] ** 2 / (omega + evi - 1.0j * eta))'], {}), '(tdm_kx[k1][:, :self.nocc[k1], p] ** 2 / (omega + evi - 1.0j * eta))\n', (5692, 5760), False, 'import numpy\n'), ((5784, 5876), 'numpy.sum', 'numpy.sum', (['(tdm_kx[k1][:, self.nocc[k1]:, p] ** 2 / (omega - eva + vir_sgn * 1.0j * eta))'], {}), '(tdm_kx[k1][:, self.nocc[k1]:, p] ** 2 / (omega - eva + vir_sgn * \n 1.0j * eta))\n', (5793, 5876), False, 'import numpy\n'), ((3274, 3344), 'pyscf.lib.einsum', 'einsum', (['"""via,ipaq->vpq"""', 'x', "self['oovo', ki, kp, ix_fw[ki], ix_bw[kp]]"], {}), "('via,ipaq->vpq', x, self['oovo', ki, kp, ix_fw[ki], ix_bw[kp]])\n", (3280, 3344), False, 'from pyscf.lib import einsum, direct_sum\n'), ((3387, 3457), 'pyscf.lib.einsum', 'einsum', (['"""via,ipaq->vpq"""', 'x', "self['oovv', ki, kp, ix_fw[ki], ix_bw[kp]]"], {}), "('via,ipaq->vpq', x, self['oovv', ki, kp, ix_fw[ki], ix_bw[kp]])\n", (3393, 3457), False, 'from pyscf.lib import einsum, direct_sum\n'), ((3500, 3570), 'pyscf.lib.einsum', 'einsum', (['"""via,ipaq->vpq"""', 'x', "self['ovvv', ki, kp, ix_fw[ki], ix_bw[kp]]"], {}), "('via,ipaq->vpq', x, self['ovvv', ki, kp, ix_fw[ki], ix_bw[kp]])\n", (3506, 3570), False, 'from pyscf.lib import einsum, direct_sum\n'), ((3613, 3683), 'pyscf.lib.einsum', 'einsum', (['"""via,ipaq->vpq"""', 'x', "self['ovvo', ki, kp, ix_fw[ki], ix_bw[kp]]"], {}), "('via,ipaq->vpq', x, self['ovvo', ki, kp, ix_fw[ki], ix_bw[kp]])\n", (3619, 3683), False, 'from pyscf.lib import einsum, direct_sum\n'), ((3784, 3827), 'numpy.concatenate', 'numpy.concatenate', (['(tdm_oo, tdm_ov)'], {'axis': '(2)'}), '((tdm_oo, tdm_ov), axis=2)\n', (3801, 3827), False, 'import numpy\n'), ((3857, 3900), 'numpy.concatenate', 'numpy.concatenate', (['(tdm_vo, tdm_vv)'], {'axis': '(2)'}), '((tdm_vo, tdm_vv), axis=2)\n', (3874, 3900), False, 'import numpy\n')] |
from imf.filter import matched_filter_core
from imf.types import FrequencySeries, TimeSeries
import numpy as np
def power_chisq(htilde, stilde, num_bins, times, psd=None,
method="regression", **kwargs):
bins = power_chisq_bins(htilde, num_bins, psd=psd, method=method, **kwargs)
snr, corr, norm = matched_filter_core(htilde, stilde, psd=psd,
times=times, method=method, **kwargs)
return power_chisq_from_precomputed(corr, snr, norm, bins, times,
method=method, **kwargs), len(bins)
def power_chisq_bins(htilde, num_bins, psd=None, method="regression", **kwargs):
sigma_vec = sigmasq_series(htilde, psd=psd)
return power_chisq_bins_from_sigmasq_series(sigma_vec, num_bins)
def sigmasq_series(htilde, psd=None):
autocorr = htilde.conj() * htilde
if psd is not None:
autocorr /= psd
return autocorr.cumsum()
def power_chisq_bins_from_sigmasq_series(sigma_vec, num_bins):
sigmasq = sigma_vec[len(sigma_vec ) -2]
edge_vec = np.arange(0, num_bins) * sigmasq / num_bins
bins = np.searchsorted(sigma_vec, edge_vec, side='right')
bins = np.append(bins, len(sigma_vec) - 1)
bins = np.unique(bins)
# if len(bins) != num_bins + 1:
# print("using {} bins instead of {}".format(len(bins), num_bins))
return bins
def power_chisq_from_precomputed(corr, snr, norm, bins, times,
method="regression", **kwargs):
qtilde = FrequencySeries(np.zeros(len(corr)),
frequency_grid=corr.frequency_object,
dtype=corr.dtype,
epoch=corr.epoch)
chisq = TimeSeries(np.zeros(len(snr)), times=snr.times,
dtype=snr.dtype, epoch=snr.epoch)
num_bins = len(bins) - 1
for j in range(num_bins):
k_min = int(bins[j])
k_max = int(bins[ j +1])
qtilde[k_min:k_max] = corr[k_min:k_max]
q = qtilde.to_timeseries(method=method, times=times, **kwargs)
qtilde.fill(0)
chisq += q.squared_norm()
chisq = (chisq * num_bins - snr.squared_norm()) * (norm ** 2)
chisq = TimeSeries(chisq, times=snr.times, epoch=snr.epoch)
return chisq
def weighted_snr(snr, chisq):
for i in range(len(chisq)):
if chisq[i] > 1:
snr[i] /= ((1 + chisq[i]**3) / 2.0)**(1.0 / 6)
return snr | [
"numpy.unique",
"imf.filter.matched_filter_core",
"numpy.searchsorted",
"imf.types.TimeSeries",
"numpy.arange"
] | [((325, 412), 'imf.filter.matched_filter_core', 'matched_filter_core', (['htilde', 'stilde'], {'psd': 'psd', 'times': 'times', 'method': 'method'}), '(htilde, stilde, psd=psd, times=times, method=method, **\n kwargs)\n', (344, 412), False, 'from imf.filter import matched_filter_core\n'), ((1132, 1182), 'numpy.searchsorted', 'np.searchsorted', (['sigma_vec', 'edge_vec'], {'side': '"""right"""'}), "(sigma_vec, edge_vec, side='right')\n", (1147, 1182), True, 'import numpy as np\n'), ((1241, 1256), 'numpy.unique', 'np.unique', (['bins'], {}), '(bins)\n', (1250, 1256), True, 'import numpy as np\n'), ((2227, 2278), 'imf.types.TimeSeries', 'TimeSeries', (['chisq'], {'times': 'snr.times', 'epoch': 'snr.epoch'}), '(chisq, times=snr.times, epoch=snr.epoch)\n', (2237, 2278), False, 'from imf.types import FrequencySeries, TimeSeries\n'), ((1077, 1099), 'numpy.arange', 'np.arange', (['(0)', 'num_bins'], {}), '(0, num_bins)\n', (1086, 1099), True, 'import numpy as np\n')] |
import numpy as np
from .utils import propensity_score
class IPMW:
"""
IPMW model
Example)
>>>import zepid as ze
>>>from zepid.causal.ipw import IPMW
>>>df = ze.load_sample_data(timevary=False)
>>>ipm = IPMW(df,missing='dead')
>>>ipm.fit(model='age0 + art + male')
"""
def __init__(self, df, missing_variable, stabilized=True):
"""Calculates the weight for inverse probability of missing weights using logistic regression.
Function automatically codes a missingness indicator (based on np.nan), so data can be directly
input.
df:
-pandas dataframe object containing all variables of interest
missing_variable:
-column name for missing data. numpy.nan values should indicate missing observations
stabilized:
-whether to return the stabilized or unstabilized IPMW. Default is to return stabilized weights
"""
if df.loc[df[missing_variable].isnull()][missing_variable].shape[0] == 0:
raise ValueError('IPMW requires that missing data is coded as np.nan')
self.df = df.copy()
self.missing = missing_variable
self.df.loc[self.df[self.missing].isnull(), 'observed_indicator'] = 0
self.df.loc[self.df[self.missing].notnull(), 'observed_indicator'] = 1
self.stabilized = stabilized
self.Weight = None
def fit(self, model, print_results=True):
"""
Provide the regression model to generate the inverse probability of missing weights. The fitted regression
model will be used to generate the IPW. The weights can be accessed via the IMPW.Weight attribute
model:
-statsmodels glm format for modeling data. Independent variables should be predictive of missingness of
variable of interest. Example) 'var1 + var2 + var3'
print_results:
-whether to print the model results. Default is True
"""
mod = propensity_score(self.df, 'observed_indicator ~ ' + model, print_results=print_results)
p = mod.predict(self.df)
if self.stabilized:
p_ = np.mean(self.df['observed_indicator'])
w = p_ / p
else:
w = p ** -1
self.df['ipmw'] = w
self.Weight = w
| [
"numpy.mean"
] | [((2155, 2193), 'numpy.mean', 'np.mean', (["self.df['observed_indicator']"], {}), "(self.df['observed_indicator'])\n", (2162, 2193), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
@author: <NAME> 刘祥德
@license: (C) Copyright 2019-now, Node Supply Chain Manager Corporation Limited.
@contact: <EMAIL>
@software:
@file: sort.py
@time: 2019/12/21 13:41
@version 1.0
@desc:
"""
import os
import json
import cv2
from pathlib import Path
import numpy as np
def sort_landmarks_sparse_1(output_dir: Path):
for d in output_dir.iterdir():
landmarks_dir = d / 'landmarks'
json_dir = d / 'results'
results = list(json_dir.glob('*.json'))
for r in results:
print(r)
res = json.load(open(r))
if 'faces' not in res:
return None
if 'landmark' not in res['faces'][0]:
return None
landmarks = res['faces'][0]['landmark']
landmarks_list = []
landmarks = sort_dict(landmarks)
# print_result(printFuctionTitle("人脸关键点检测"), landmarks)
for k, landmark in landmarks.items():
landmarks_list.append([landmark['x'], landmark['y']])
landmarks_list = np.array(landmarks_list)
img_name = os.path.splitext(os.path.basename(r))[0]
txt_name = img_name + '.txt'
np.savetxt(str(landmarks_dir / txt_name), landmarks_list, fmt="%d")
def sort_landmarks_sparse_2(output_dir: Path):
landmarks_dir = output_dir / 'landmarks'
json_dir = output_dir / 'results'
results = list(json_dir.glob('*.json'))
for r in results:
print(r)
res = json.load(open(r))
if 'faces' not in res:
return None
if 'landmark' not in res['faces'][0]:
return None
landmarks = res['faces'][0]['landmark']
landmarks_list = []
landmarks = sort_dict(landmarks)
# print_result(printFuctionTitle("人脸关键点检测"), landmarks)
for k, landmark in landmarks.items():
landmarks_list.append([landmark['x'], landmark['y']])
landmarks_list = np.array(landmarks_list)
img_name = os.path.splitext(os.path.basename(r))[0]
txt_name = img_name + '.txt'
np.savetxt(str(landmarks_dir / txt_name), landmarks_list, fmt="%d")
def sort_landmarks_dense_1(output_dir: Path):
for d in output_dir.iterdir():
landmarks_dir = d / 'landmarks'
json_dir = d / 'results'
results = list(json_dir.glob('*.json'))
for r in results:
print(r)
res = json.load(open(r))
if 'face' not in res:
return None
if 'landmark' not in res['face']:
return None
landmarks = res['face']['landmark']
landmarks_list = []
# print_result(printFuctionTitle("人脸关键点检测"), landmarks)
for region, landmarks_dict in landmarks.items():
landmarks_dict = sort_dict(landmarks_dict)
for k, landmark in landmarks_dict.items():
landmarks_list.append([landmark['x'], landmark['y']])
landmarks_list = np.array(landmarks_list)
img_name = os.path.splitext(os.path.basename(r))[0]
txt_name = img_name + '.txt'
np.savetxt(str(landmarks_dir / txt_name), landmarks_list, fmt="%d")
def sort_landmarks_dense_2(output_dir: Path):
landmarks_dir = output_dir / 'landmarks'
json_dir = output_dir / 'results'
results = list(json_dir.glob('*.json'))
for r in results:
print(r)
res = json.load(open(r))
if 'face' not in res:
return None
if 'landmark' not in res['face']:
return None
landmarks = res['face']['landmark']
# print_result(printFuctionTitle("人脸关键点检测"), landmarks)
landmarks_list = []
for region, landmarks_dict in landmarks.items():
landmarks_dict = sort_dict(landmarks_dict)
for k, landmark in landmarks_dict.items():
landmarks_list.append([landmark['x'], landmark['y']])
landmarks_list = np.array(landmarks_list)
img_name = os.path.splitext(os.path.basename(r))[0]
txt_name = img_name + '.txt'
np.savetxt(str(landmarks_dir / txt_name), landmarks_list, fmt="%d")
def sort_dict(landmarks_dict):
landmarks_list = sorted(landmarks_dict.items(), key=lambda d: d[0])
new_dict = {}
for entry in landmarks_list:
new_dict[entry[0]] = entry[1]
return new_dict
def sortedDictValues(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
# landmarks_path = 'datasets/Articst-faces/landmarks'
# landmarks_path = 'datasets/WebCariTrain/landmarks/845'
landmarks_path = 'datasets/Articst-faces/landmarks'
# dataset_name = 'AF_dataset'
# output_name = 'AF-landmarks-83'
landmarks_path = Path(landmarks_path)
# sort_landmarks_dense_2(landmarks_path)
sort_landmarks_dense_1(landmarks_path)
# sort_landmarks_sparse_1(landmarks_path)
| [
"numpy.array",
"os.path.basename",
"pathlib.Path"
] | [((4780, 4800), 'pathlib.Path', 'Path', (['landmarks_path'], {}), '(landmarks_path)\n', (4784, 4800), False, 'from pathlib import Path\n'), ((1992, 2016), 'numpy.array', 'np.array', (['landmarks_list'], {}), '(landmarks_list)\n', (2000, 2016), True, 'import numpy as np\n'), ((4019, 4043), 'numpy.array', 'np.array', (['landmarks_list'], {}), '(landmarks_list)\n', (4027, 4043), True, 'import numpy as np\n'), ((1091, 1115), 'numpy.array', 'np.array', (['landmarks_list'], {}), '(landmarks_list)\n', (1099, 1115), True, 'import numpy as np\n'), ((3044, 3068), 'numpy.array', 'np.array', (['landmarks_list'], {}), '(landmarks_list)\n', (3052, 3068), True, 'import numpy as np\n'), ((2053, 2072), 'os.path.basename', 'os.path.basename', (['r'], {}), '(r)\n', (2069, 2072), False, 'import os\n'), ((4080, 4099), 'os.path.basename', 'os.path.basename', (['r'], {}), '(r)\n', (4096, 4099), False, 'import os\n'), ((1156, 1175), 'os.path.basename', 'os.path.basename', (['r'], {}), '(r)\n', (1172, 1175), False, 'import os\n'), ((3109, 3128), 'os.path.basename', 'os.path.basename', (['r'], {}), '(r)\n', (3125, 3128), False, 'import os\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of pyunicorn.
# Copyright (C) 2008--2019 <NAME> and pyunicorn authors
# URL: <http://www.pik-potsdam.de/members/donges/software>
# License: BSD (3-clause)
#
# Please acknowledge and cite the use of this software and its authors
# when results are used in publications or published elsewhere.
#
# You can use the following reference:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# and <NAME>, "Unified functional network and nonlinear time series analysis
# for complex systems science: The pyunicorn package"
"""
Provides classes for the analysis of dynamical systems and time series based
on recurrence plots, including measures of recurrence quantification
analysis (RQA) and recurrence network analysis.
"""
# array object and fast numerics
import numpy as np
from ..core import InteractingNetworks
from .recurrence_plot import RecurrencePlot
from .cross_recurrence_plot import CrossRecurrencePlot
#
# Class definitions
#
class InterSystemRecurrenceNetwork(InteractingNetworks):
"""
Generating and quantitatively analyzing inter-system recurrence networks.
For a inter-system recurrence network, time series x and y do not need to
have the same length! Formally, nodes are identified with state vectors in
the common phase space of both time series. Hence, the time series need to
have the same number of dimensions and identical physical units.
Undirected links are added to describe recurrences within x and y as well
as cross-recurrences between x and y. Self-loops are excluded in this
undirected network representation.
More information on the theory and applications of inter system recurrence
networks can be found in [Feldhoff2012]_.
**Examples:**
- Create an instance of InterSystemRecurrenceNetwork with fixed
recurrence thresholds and without embedding::
InterSystemRecurrenceNetwork(x, y, threshold=(0.1, 0.2, 0.1))
- Create an instance of InterSystemRecurrenceNetwork at a fixed
recurrence rate and using time delay embedding::
InterSystemRecurrenceNetwork(
x, y, dim=3, tau=(2, 1), recurrence_rate=(0.05, 0.05, 0.02))
"""
#
# Internal methods
#
def __init__(self, x, y, metric="supremum",
normalize=False, silence_level=0, **kwds):
"""
Initialize an instance of InterSystemRecurrenceNetwork (ISRN).
.. note::
For an inter system recurrence network, time series x and y need to
have the same number of dimensions!
Creates an embedding of the given time series x and y, calculates a
inter system recurrence matrix from the embedding and then creates
an InteractingNetwork object from this matrix, interpreting the inter
system recurrence matrix as the adjacency matrix of an undirected
complex network.
Either recurrence thresholds ``threshold`` or
recurrence rates ``recurrence_rate`` have to be given as keyword
arguments.
Embedding is only supported for scalar time series. If embedding
dimension ``dim`` and delay ``tau`` are **both** given as keyword
arguments, embedding is applied. Multidimensional time series are
processed as is by default.
:type x: 2D Numpy array (time, dimension)
:arg x: The time series x to be analyzed, can be scalar or
multi-dimensional.
:type y: 2D Numpy array (time, dimension)
:arg y: The time series y to be analyzed, can be scalar or
multi-dimensional.
:type metric: tuple of string
:arg metric: The metric for measuring distances in phase space
("manhattan", "euclidean", "supremum").
:arg bool normalize: Decide whether to normalize the time series to
zero mean and unit standard deviation.
:arg int silence_level: The inverse level of verbosity of the object.
:arg kwds: Additional options.
:type threshold: tuple of number (three numbers)
:keyword threshold: The recurrence threshold keyword for generating
the recurrence plot using fixed thresholds. Give
for each time series and the cross recurrence plot
separately.
:type recurrence_rate: tuple of number (three numbers)
:keyword recurrence_rate: The recurrence rate keyword for generating
the recurrence plot using a fixed recurrence
rate. Give separately for each time series.
:keyword int dim: The embedding dimension. Must be the same for both
time series.
:type tau: tuple of int
:keyword tau: The embedding delay. Give separately for each time
series.
"""
# Store time series
self.x = x.copy().astype("float32")
"""The time series x."""
self.y = y.copy().astype("float32")
"""The time series y."""
# Reshape time series
self.x.shape = (self.x.shape[0], -1)
self.y.shape = (self.y.shape[0], -1)
# Get embedding dimension and delay from **kwds
dim = kwds.get("dim")
tau = kwds.get("tau")
# Check for consistency
if self.x.shape[1] == self.y.shape[1]:
# Set silence_level
self.silence_level = silence_level
"""The inverse level of verbosity of the object."""
# Get number of nodes in subnetwork x
self.N_x = self.x.shape[0]
"""Number of nodes in subnetwork x."""
# Get number of nodes in subnetwork y
self.N_y = self.y.shape[0]
"""Number of nodes in subnetwork y."""
# Get total number of nodes of ISRN
self.N = self.N_x + self.N_y
"""Total number of nodes of ISRN."""
# Store type of metric
self.metric = metric
"""The metric used for measuring distances in phase space."""
# Normalize time series
if normalize:
RecurrencePlot.normalize_time_series(self.x)
RecurrencePlot.normalize_time_series(self.y)
# Embed time series if required
self.dim = dim
if dim is not None and tau is not None and self.x.shape[1] == 1:
self.x_embedded = \
RecurrencePlot.embed_time_series(self.x, dim, tau[0])
"""The embedded time series x."""
self.y_embedded = \
RecurrencePlot.embed_time_series(self.y, dim, tau[1])
"""The embedded time series y."""
else:
self.x_embedded = self.x
self.y_embedded = self.y
# Get threshold or recurrence rate from **kwds, construct
# ISRN accordingly
threshold = kwds.get("threshold")
recurrence_rate = kwds.get("recurrence_rate")
self.threshold = threshold
if threshold is not None:
# Calculate the ISRN using the radius of neighborhood
# threshold
ISRM = self.set_fixed_threshold(threshold)
elif recurrence_rate is not None:
# Calculate the ISRN using a fixed recurrence rate
ISRM = self.set_fixed_recurrence_rate(recurrence_rate)
else:
raise NameError("Please give either threshold or \
recurrence_rate to construct the joint \
recurrence plot!")
InteractingNetworks.__init__(self, adjacency=ISRM, directed=False,
silence_level=self.silence_level)
# No treatment of missing values yet!
self.missing_values = False
else:
raise ValueError("Both time series x and y need to have the same \
dimension!")
def __str__(self):
"""
Returns a string representation.
"""
return ('InterSystemRecurrenceNetwork: time series shapes %s, %s.\n'
'Embedding dimension %i\nThreshold %s, %s metric.\n%s') % (
self.x.shape, self.y.shape, self.dim if self.dim else 0,
self.threshold, self.metric,
InteractingNetworks.__str__(self))
#
# Service methods
#
def clear_cache(self):
"""
Clean up memory by deleting information that can be recalculated from
basic data.
Extends the clean up methods of the parent classes.
"""
# Call clean up of RecurrencePlot objects
self.rp_x.clear_cache()
self.rp_y.clear_cache()
# Call clean up of CrossRecurrencePlot object
self.crp_xy.clear_cache()
# Call clean up of InteractingNetworks
InteractingNetworks.clear_cache(self)
#
# Methods to handle inter system recurrence networks
#
def inter_system_recurrence_matrix(self):
"""
Return the current inter system recurrence matrix :math:`ISRM`.
:rtype: 2D square Numpy array
:return: the current inter system recurrence matrix :math:`ISRM`.
"""
# Shortcuts
N = self.N
N_x = self.N_x
N_y = self.N_y
# Init
ISRM = np.zeros((N, N))
# Combine to inter system recurrence matrix
ISRM[:N_x, :N_x] = self.rp_x.recurrence_matrix()
ISRM[:N_x, N_x:N] = self.crp_xy.recurrence_matrix()
ISRM[N_x:N, :N_x] = self.crp_xy.recurrence_matrix().transpose()
ISRM[N_x:N, N_x:N] = self.rp_y.recurrence_matrix()
return ISRM
def set_fixed_threshold(self, threshold):
"""
Create a inter system recurrence network at fixed thresholds.
:type threshold: tuple of number (three numbers)
:arg threshold: The three threshold parameters. Give for each
time series and the cross recurrence plot separately.
"""
# Compute recurrence matrices of x and y
self.rp_x = RecurrencePlot(time_series=self.x_embedded,
threshold=threshold[0],
metric=self.metric,
silence_level=self.silence_level)
self.rp_y = RecurrencePlot(time_series=self.y_embedded,
threshold=threshold[1],
metric=self.metric,
silence_level=self.silence_level)
# Compute cross-recurrence matrix of x and y
self.crp_xy = CrossRecurrencePlot(x=self.x_embedded, y=self.y_embedded,
threshold=threshold[2],
metric=self.metric,
silence_level=self.silence_level)
# Get combined ISRM
ISRM = self.inter_system_recurrence_matrix()
# Set diagonal of ISRM to zero to avoid self-loops
ISRM.flat[::self.N + 1] = 0
return ISRM
def set_fixed_recurrence_rate(self, density):
"""
Create a inter system recurrence network at fixed link densities (
recurrence rates).
:type density: tuple of number (three numbers)
:arg density: The three recurrence rate parameters. Give for each
time series and the cross recurrence plot separately.
"""
# Compute recurrence matrices of x and y
self.rp_x = RecurrencePlot(time_series=self.x_embedded,
recurrence_rate=density[0],
metric=self.metric,
silence_level=self.silence_level)
self.rp_y = RecurrencePlot(time_series=self.y_embedded,
recurrence_rate=density[1],
metric=self.metric,
silence_level=self.silence_level)
# Compute cross-recurrence matrix of x and y
self.crp_xy = CrossRecurrencePlot(x=self.x_embedded, y=self.y_embedded,
recurrence_rate=density[2],
metric=self.metric,
silence_level=self.silence_level)
# Get combined ISRM
ISRM = self.inter_system_recurrence_matrix()
# Set diagonal of ISRM to zero to avoid self-loops
ISRM.flat[::self.N + 1] = 0
return ISRM
#
# Methods to quantify inter system recurrence networks
#
def internal_recurrence_rates(self):
"""
Return internal recurrence rates of subnetworks x and y.
:rtype: tuple of number (float)
:return: the internal recurrence rates of subnetworks x and y.
"""
return (self.rp_x.recurrence_rate(),
self.rp_y.recurrence_rate())
def cross_recurrence_rate(self):
"""
Return cross recurrence rate between subnetworks x and y.
:rtype: number (float)
:return: the cross recurrence rate between subnetworks x and y.
"""
return self.crp_xy.cross_recurrence_rate()
def cross_global_clustering_xy(self):
"""
Return cross global clustering of x with respect to y.
See [Feldhoff2012]_ for definition, further explanation and
applications.
:rtype: number (float)
:return: the cross global clustering of x with respect to y.
"""
return self.cross_global_clustering(np.arange(self.N_x),
np.arange(self.N_x, self.N))
def cross_global_clustering_yx(self):
"""
Return cross global clustering of y with respect to x.
See [Feldhoff2012]_ for definition, further explanation and
applications.
:rtype: number (float)
:return: the cross global clustering of y with respect to x.
"""
return self.cross_global_clustering(np.arange(self.N_x, self.N),
np.arange(self.N_x))
def cross_transitivity_xy(self):
"""
Return cross transitivity of x with respect to y.
See [Feldhoff2012]_ for definition, further explanation and
applications.
:rtype: number (float)
:return: the cross transitivity of x with respect to y.
"""
return self.cross_transitivity(np.arange(self.N_x),
np.arange(self.N_x, self.N))
def cross_transitivity_yx(self):
"""
Return cross transitivity of y with respect to x.
See [Feldhoff2012]_ for definition, further explanation and
applications.
:rtype: number (float)
:return: the cross transitivity of y with respect to x.
"""
return self.cross_transitivity(np.arange(self.N_x, self.N),
np.arange(self.N_x))
| [
"numpy.zeros",
"numpy.arange"
] | [((9619, 9635), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (9627, 9635), True, 'import numpy as np\n'), ((13924, 13943), 'numpy.arange', 'np.arange', (['self.N_x'], {}), '(self.N_x)\n', (13933, 13943), True, 'import numpy as np\n'), ((13989, 14016), 'numpy.arange', 'np.arange', (['self.N_x', 'self.N'], {}), '(self.N_x, self.N)\n', (13998, 14016), True, 'import numpy as np\n'), ((14384, 14411), 'numpy.arange', 'np.arange', (['self.N_x', 'self.N'], {}), '(self.N_x, self.N)\n', (14393, 14411), True, 'import numpy as np\n'), ((14457, 14476), 'numpy.arange', 'np.arange', (['self.N_x'], {}), '(self.N_x)\n', (14466, 14476), True, 'import numpy as np\n'), ((14824, 14843), 'numpy.arange', 'np.arange', (['self.N_x'], {}), '(self.N_x)\n', (14833, 14843), True, 'import numpy as np\n'), ((14884, 14911), 'numpy.arange', 'np.arange', (['self.N_x', 'self.N'], {}), '(self.N_x, self.N)\n', (14893, 14911), True, 'import numpy as np\n'), ((15259, 15286), 'numpy.arange', 'np.arange', (['self.N_x', 'self.N'], {}), '(self.N_x, self.N)\n', (15268, 15286), True, 'import numpy as np\n'), ((15327, 15346), 'numpy.arange', 'np.arange', (['self.N_x'], {}), '(self.N_x)\n', (15336, 15346), True, 'import numpy as np\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import random
import wget
import time
import warnings
import json
import collections
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from torch.utils.data import Subset
from utils import get_dataset, get_model, get_optimizer, get_scheduler
from utils import LossTracker,run_cmd
from torch.utils.data import DataLoader
from utils import get_pacing_function,balance_order
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--data-dir', default='dataset',
help='path to dataset')
parser.add_argument('--order-dir', default='cifar10-cscores-orig-order.npz',
help='path to train val idx')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
help='model architecture: (default: resnet18)')
parser.add_argument('--dataset', default='cifar10', type=str,
help='dataset')
parser.add_argument('--printfreq', default=10, type=int,
help='print frequency (default: 10)')
parser.add_argument('--workers', default=4, type=int,
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int,
help='number of total epochs to run')
parser.add_argument('-b', '--batchsize', default=128, type=int,
help='mini-batch size (default: 256), this is the total')
parser.add_argument('--optimizer', default="sgd", type=str,
help='optimizer')
parser.add_argument('--scheduler', default="cosine", type=str,
help='lr scheduler')
parser.add_argument('--lr', default=0.1, type=float,
help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', default=5e-4, type=float,
help='weight decay (default: 1e-4)')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--half', default=False, action='store_true',
help='training with half precision')
# curriculum params
parser.add_argument("--pacing-f", default="linear", type=str, help="which pacing function to take")
parser.add_argument('--pacing-a', default=1., type=float,
help='weight decay (default: 1e-4)')
parser.add_argument('--pacing-b', default=1., type=float,
help='weight decay (default: 1e-4)')
parser.add_argument("--ordering", default="curr", type=str, help="which test case to use. supports: standard, curriculum, anti and random")
parser.add_argument('--rand-fraction', default=0., type=float,
help='label curruption (default:0)')
args = parser.parse_args()
def main():
set_seed(args.seed)
# create training and validation datasets and intiate the dataloaders
tr_set = get_dataset(args.dataset, args.data_dir, 'train',rand_fraction=args.rand_fraction)
if args.dataset == "cifar100N":
val_set = get_dataset("cifar100", args.data_dir, 'val')
tr_set_clean = get_dataset("cifar100", args.data_dir, 'train')
else:
val_set = get_dataset(args.dataset, args.data_dir, 'val')
train_loader = torch.utils.data.DataLoader(tr_set, batch_size=args.batchsize,\
shuffle=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batchsize*2,
shuffle=False, num_workers=args.workers, pin_memory=True)
criterion_ind = nn.CrossEntropyLoss(reduction="none").cuda()
# initiate a recorder for saving and loading stats and checkpoints
if 'cscores-orig-order.npz' in args.order_dir:
temp_path = os.path.join("orders",args.dataset+'-cscores-orig-order.npz')
if not os.path.isfile(temp_path):
print ('Downloading the data cifar10-cscores-orig-order.npz and cifar100-cscores-orig-order.npz to folder orders')
if 'cifar100' == args.dataset:
url = 'https://pluskid.github.io/structural-regularity/cscores/cifar100-cscores-orig-order.npz'
if 'cifar10' == args.dataset:
url = 'https://pluskid.github.io/structural-regularity/cscores/cifar10-cscores-orig-order.npz'
wget.download(url, './orders')
temp_x = np.load(temp_path)['scores']
ordering = collections.defaultdict(list)
list(map(lambda a, b: ordering[a].append(b), np.arange(len(temp_x)),temp_x))
order = [k for k, v in sorted(ordering.items(), key=lambda item: -1*item[1][0])]
else:
print ('Please check if the files %s in your folder -- orders. See ./orders/README.md for instructions on how to create the folder' %(args.order_dir))
order = [x for x in list(torch.load(os.path.join("orders",args.order_dir)).keys())]
order = balance_order(order, tr_set, num_classes=len(tr_set.classes))
print ("check BALANCING",len(order),len(tr_set.classes))
#decide CL, Anti-CL, or random-CL
if args.ordering == "random":
np.random.shuffle(order)
elif args.ordering == "anti_curr":
order = [x for x in reversed(order)]
#check the statistics
bs = args.batchsize
N = len(order)
myiterations = (N//bs+1)*args.epochs
#initial training
model = get_model(args.arch, tr_set.nchannels, tr_set.imsize, len(tr_set.classes), args.half)
optimizer = get_optimizer(args.optimizer, model.parameters(), args.lr, args.momentum, args.wd)
scheduler = get_scheduler(args.scheduler, optimizer, num_epochs=myiterations)
start_epoch = 0
total_iter = 0
history = {"train_loss": [], "train_acc": [], "val_loss": [], "val_acc": [], "iter": [0,] }
start_time = time.time()
trainsets = Subset(tr_set, order)
train_loader = torch.utils.data.DataLoader(trainsets, batch_size=args.batchsize,
shuffle=True, num_workers=args.workers, pin_memory=True)
criterion = nn.CrossEntropyLoss().cuda()
if args.ordering == "standard":
iterations = 0
for epoch in range(args.epochs):
tr_loss, tr_acc1, iterations = train(train_loader, model, criterion, optimizer,scheduler, epoch,iterations)
val_loss, val_acc1 = validate(val_loader, model, criterion)
print ("%s epoch %s iterations w/ LEARNING RATE %s"%(epoch, iterations,optimizer.param_groups[0]["lr"]))
history["val_loss"].append(val_loss)
history["val_acc"].append(val_acc1)
history["train_loss"].append(tr_loss)
history["train_acc"].append(tr_acc1)
history["iter"].append(iterations)
else:
all_sum = N/(myiterations*(myiterations+1)/2)
iter_per_epoch = N//bs
pre_iterations = 0
startIter = 0
pacing_function = get_pacing_function(myiterations, N, args)
startIter_next = pacing_function(0) # <=======================================
print ('0 iter data between %s and %s w/ Pacing %s'%(startIter,startIter_next,args.pacing_f,))
trainsets = Subset(tr_set, list(order[startIter:max(startIter_next,256)]))
train_loader = torch.utils.data.DataLoader(trainsets, batch_size=args.batchsize,
shuffle=True, num_workers=args.workers, pin_memory=True)
dataiter = iter(train_loader)
step = 0
while step < myiterations:
tracker = LossTracker(len(train_loader), f'iteration : [{step}]', args.printfreq)
for images, target in train_loader:
step += 1
images, target = cuda_transfer(images, target)
output = model(images)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
tracker.update(loss, output, target)
tracker.display(step-pre_iterations)
#If we hit the end of the dynamic epoch build a new data loader
pre_iterations = step
if startIter_next <= N:
startIter_next = pacing_function(step)# <=======================================
print ("%s iter data between %s and %s w/ Pacing %s and LEARNING RATE %s "%(step,startIter,startIter_next,args.pacing_f, optimizer.param_groups[0]["lr"]))
train_loader = torch.utils.data.DataLoader(Subset(tr_set, list(order[startIter:max(startIter_next,256)])),\
batch_size=args.batchsize,\
shuffle=True, num_workers=args.workers, pin_memory=True)
# start your record
if step > 50:
tr_loss, tr_acc1 = tracker.losses.avg, tracker.top1.avg
val_loss, val_acc1 = validate(val_loader, model, criterion)
# record
history["val_loss"].append(val_loss)
history["val_acc"].append(val_acc1)
history["train_loss"].append(tr_loss)
history["train_acc"].append(tr_acc1)
history['iter'].append(step)
torch.save(history,"stat.pt")
# reinitialization<=================
model.train()
def train(train_loader, model, criterion, optimizer,scheduler, epoch, iterations):
# switch to train mode
model.train()
tracker = LossTracker(len(train_loader), f'Epoch: [{epoch}]', args.printfreq)
for i, (images, target) in enumerate(train_loader):
iterations += 1
images, target = cuda_transfer(images, target)
output = model(images)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tracker.update(loss, output, target)
tracker.display(i)
scheduler.step()
return tracker.losses.avg, tracker.top1.avg, iterations
def validate(val_loader, model, criterion):
# switch to evaluate mode
model.eval()
with torch.no_grad():
tracker = LossTracker(len(val_loader), f'val', args.printfreq)
for i, (images, target) in enumerate(val_loader):
images, target = cuda_transfer(images, target)
output = model(images)
loss = criterion(output, target)
tracker.update(loss, output, target)
tracker.display(i)
return tracker.losses.avg, tracker.top1.avg
def set_seed(seed=None):
if seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
def cuda_transfer(images, target):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if args.half: images = images.half()
return images, target
if __name__ == '__main__':
main()
| [
"wget.download",
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"utils.get_pacing_function",
"warnings.warn",
"utils.get_scheduler",
"os.path.isfile",
"torch.save",
"time.time",
"torch.manual_seed",
"os.path.join",
"utils.get_dataset",
"random.seed",
"torch.utils.data.Subset",
"... | [((1059, 1114), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Training"""'}), "(description='PyTorch Training')\n", (1082, 1114), False, 'import argparse\n'), ((3608, 3696), 'utils.get_dataset', 'get_dataset', (['args.dataset', 'args.data_dir', '"""train"""'], {'rand_fraction': 'args.rand_fraction'}), "(args.dataset, args.data_dir, 'train', rand_fraction=args.\n rand_fraction)\n", (3619, 3696), False, 'from utils import get_dataset, get_model, get_optimizer, get_scheduler\n'), ((3965, 4088), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['tr_set'], {'batch_size': 'args.batchsize', 'shuffle': '(True)', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(tr_set, batch_size=args.batchsize, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n', (3992, 4088), False, 'import torch\n'), ((4135, 4265), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_set'], {'batch_size': '(args.batchsize * 2)', 'shuffle': '(False)', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(val_set, batch_size=args.batchsize * 2, shuffle\n =False, num_workers=args.workers, pin_memory=True)\n', (4162, 4265), False, 'import torch\n'), ((6307, 6372), 'utils.get_scheduler', 'get_scheduler', (['args.scheduler', 'optimizer'], {'num_epochs': 'myiterations'}), '(args.scheduler, optimizer, num_epochs=myiterations)\n', (6320, 6372), False, 'from utils import get_dataset, get_model, get_optimizer, get_scheduler\n'), ((6526, 6537), 'time.time', 'time.time', ([], {}), '()\n', (6535, 6537), False, 'import time\n'), ((6554, 6575), 'torch.utils.data.Subset', 'Subset', (['tr_set', 'order'], {}), '(tr_set, order)\n', (6560, 6575), False, 'from torch.utils.data import Subset\n'), ((6595, 6722), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainsets'], {'batch_size': 'args.batchsize', 'shuffle': '(True)', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(trainsets, batch_size=args.batchsize, shuffle=\n True, num_workers=args.workers, pin_memory=True)\n', (6622, 6722), False, 'import torch\n'), ((3745, 3790), 'utils.get_dataset', 'get_dataset', (['"""cifar100"""', 'args.data_dir', '"""val"""'], {}), "('cifar100', args.data_dir, 'val')\n", (3756, 3790), False, 'from utils import get_dataset, get_model, get_optimizer, get_scheduler\n'), ((3814, 3861), 'utils.get_dataset', 'get_dataset', (['"""cifar100"""', 'args.data_dir', '"""train"""'], {}), "('cifar100', args.data_dir, 'train')\n", (3825, 3861), False, 'from utils import get_dataset, get_model, get_optimizer, get_scheduler\n'), ((3890, 3937), 'utils.get_dataset', 'get_dataset', (['args.dataset', 'args.data_dir', '"""val"""'], {}), "(args.dataset, args.data_dir, 'val')\n", (3901, 3937), False, 'from utils import get_dataset, get_model, get_optimizer, get_scheduler\n'), ((4490, 4554), 'os.path.join', 'os.path.join', (['"""orders"""', "(args.dataset + '-cscores-orig-order.npz')"], {}), "('orders', args.dataset + '-cscores-orig-order.npz')\n", (4502, 4554), False, 'import os\n'), ((5137, 5166), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (5160, 5166), False, 'import collections\n'), ((5839, 5863), 'numpy.random.shuffle', 'np.random.shuffle', (['order'], {}), '(order)\n', (5856, 5863), True, 'import numpy as np\n'), ((7640, 7682), 'utils.get_pacing_function', 'get_pacing_function', (['myiterations', 'N', 'args'], {}), '(myiterations, N, args)\n', (7659, 7682), False, 'from utils import get_pacing_function, balance_order\n'), ((7992, 8119), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainsets'], {'batch_size': 'args.batchsize', 'shuffle': '(True)', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(trainsets, batch_size=args.batchsize, shuffle=\n True, num_workers=args.workers, pin_memory=True)\n', (8019, 8119), False, 'import torch\n'), ((10953, 10968), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10966, 10968), False, 'import torch\n'), ((11385, 11407), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (11396, 11407), False, 'import random\n'), ((11416, 11444), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (11433, 11444), False, 'import torch\n'), ((11503, 11728), 'warnings.warn', 'warnings.warn', (['"""You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints."""'], {}), "(\n 'You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.'\n )\n", (11516, 11728), False, 'import warnings\n'), ((4302, 4339), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (4321, 4339), True, 'import torch.nn as nn\n'), ((4567, 4592), 'os.path.isfile', 'os.path.isfile', (['temp_path'], {}), '(temp_path)\n', (4581, 4592), False, 'import os\n'), ((5041, 5071), 'wget.download', 'wget.download', (['url', '"""./orders"""'], {}), "(url, './orders')\n", (5054, 5071), False, 'import wget\n'), ((5089, 5107), 'numpy.load', 'np.load', (['temp_path'], {}), '(temp_path)\n', (5096, 5107), True, 'import numpy as np\n'), ((6765, 6786), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6784, 6786), True, 'import torch.nn as nn\n'), ((10113, 10143), 'torch.save', 'torch.save', (['history', '"""stat.pt"""'], {}), "(history, 'stat.pt')\n", (10123, 10143), False, 'import torch\n'), ((5554, 5592), 'os.path.join', 'os.path.join', (['"""orders"""', 'args.order_dir'], {}), "('orders', args.order_dir)\n", (5566, 5592), False, 'import os\n')] |
"""
@brief test log(time=2s)
"""
import unittest
from logging import getLogger
import numpy
from onnx import helper, TensorProto
from pyquickhelper.pycode import ExtTestCase, ignore_warnings
from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611
OnnxAdd)
from mlprodict.onnxrt import OnnxInference
from mlprodict.tools.asv_options_helper import (
get_ir_version_from_onnx, get_opset_number_from_onnx)
class TestOnnxrtRuntimeEmpty(ExtTestCase):
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
@ignore_warnings(DeprecationWarning)
def test_onnxt_runtime_empty(self):
idi = numpy.identity(2, dtype=numpy.float32)
onx = OnnxAdd('X', idi, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
model_def.ir_version = get_ir_version_from_onnx()
oinf = OnnxInference(model_def, runtime='empty')
self.assertNotEmpty(oinf)
@ignore_warnings(DeprecationWarning)
def test_onnxt_runtime_empty_dot(self):
idi = numpy.identity(2, dtype=numpy.float32)
onx = OnnxAdd('X', idi, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
model_def.ir_version = get_ir_version_from_onnx()
oinf = OnnxInference(model_def, runtime='empty')
self.assertNotEmpty(oinf)
dot = oinf.to_dot()
self.assertIn("-> Y;", dot)
@ignore_warnings(DeprecationWarning)
def test_onnxt_runtime_empty_unknown(self):
X = helper.make_tensor_value_info(
'X', TensorProto.FLOAT, [None, 2]) # pylint: disable=E1101
Y = helper.make_tensor_value_info(
'Y', TensorProto.FLOAT, [None, 2]) # pylint: disable=E1101
Z = helper.make_tensor_value_info(
'Z', TensorProto.FLOAT, [None, 2]) # pylint: disable=E1101
node_def = helper.make_node('Add', ['X', 'Y'], ['Zt'], name='Zt')
node_def2 = helper.make_node(
'AddUnknown', ['X', 'Zt'], ['Z'], name='Z')
graph_def = helper.make_graph(
[node_def, node_def2], 'test-model', [X, Y], [Z])
model_def = helper.make_model(
graph_def, producer_name='mlprodict', ir_version=6, producer_version='0.1')
oinf = OnnxInference(model_def, runtime='empty')
self.assertNotEmpty(oinf)
dot = oinf.to_dot()
self.assertIn('AddUnknown', dot)
self.assertNotIn('x{', dot)
if __name__ == "__main__":
unittest.main()
| [
"numpy.identity",
"logging.getLogger",
"onnx.helper.make_graph",
"onnx.helper.make_node",
"onnx.helper.make_tensor_value_info",
"mlprodict.tools.asv_options_helper.get_ir_version_from_onnx",
"onnx.helper.make_model",
"mlprodict.onnxrt.OnnxInference",
"unittest.main",
"pyquickhelper.pycode.ignore_w... | [((567, 602), 'pyquickhelper.pycode.ignore_warnings', 'ignore_warnings', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (582, 602), False, 'from pyquickhelper.pycode import ExtTestCase, ignore_warnings\n'), ((1032, 1067), 'pyquickhelper.pycode.ignore_warnings', 'ignore_warnings', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (1047, 1067), False, 'from pyquickhelper.pycode import ExtTestCase, ignore_warnings\n'), ((1565, 1600), 'pyquickhelper.pycode.ignore_warnings', 'ignore_warnings', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (1580, 1600), False, 'from pyquickhelper.pycode import ExtTestCase, ignore_warnings\n'), ((2619, 2634), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2632, 2634), False, 'import unittest\n'), ((508, 529), 'logging.getLogger', 'getLogger', (['"""skl2onnx"""'], {}), "('skl2onnx')\n", (517, 529), False, 'from logging import getLogger\n'), ((657, 695), 'numpy.identity', 'numpy.identity', (['(2)'], {'dtype': 'numpy.float32'}), '(2, dtype=numpy.float32)\n', (671, 695), False, 'import numpy\n'), ((908, 934), 'mlprodict.tools.asv_options_helper.get_ir_version_from_onnx', 'get_ir_version_from_onnx', ([], {}), '()\n', (932, 934), False, 'from mlprodict.tools.asv_options_helper import get_ir_version_from_onnx, get_opset_number_from_onnx\n'), ((950, 991), 'mlprodict.onnxrt.OnnxInference', 'OnnxInference', (['model_def'], {'runtime': '"""empty"""'}), "(model_def, runtime='empty')\n", (963, 991), False, 'from mlprodict.onnxrt import OnnxInference\n'), ((1126, 1164), 'numpy.identity', 'numpy.identity', (['(2)'], {'dtype': 'numpy.float32'}), '(2, dtype=numpy.float32)\n', (1140, 1164), False, 'import numpy\n'), ((1377, 1403), 'mlprodict.tools.asv_options_helper.get_ir_version_from_onnx', 'get_ir_version_from_onnx', ([], {}), '()\n', (1401, 1403), False, 'from mlprodict.tools.asv_options_helper import get_ir_version_from_onnx, get_opset_number_from_onnx\n'), ((1419, 1460), 'mlprodict.onnxrt.OnnxInference', 'OnnxInference', (['model_def'], {'runtime': '"""empty"""'}), "(model_def, runtime='empty')\n", (1432, 1460), False, 'from mlprodict.onnxrt import OnnxInference\n'), ((1661, 1725), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""X"""', 'TensorProto.FLOAT', '[None, 2]'], {}), "('X', TensorProto.FLOAT, [None, 2])\n", (1690, 1725), False, 'from onnx import helper, TensorProto\n'), ((1776, 1840), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""Y"""', 'TensorProto.FLOAT', '[None, 2]'], {}), "('Y', TensorProto.FLOAT, [None, 2])\n", (1805, 1840), False, 'from onnx import helper, TensorProto\n'), ((1891, 1955), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""Z"""', 'TensorProto.FLOAT', '[None, 2]'], {}), "('Z', TensorProto.FLOAT, [None, 2])\n", (1920, 1955), False, 'from onnx import helper, TensorProto\n'), ((2013, 2067), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['X', 'Y']", "['Zt']"], {'name': '"""Zt"""'}), "('Add', ['X', 'Y'], ['Zt'], name='Zt')\n", (2029, 2067), False, 'from onnx import helper, TensorProto\n'), ((2088, 2148), 'onnx.helper.make_node', 'helper.make_node', (['"""AddUnknown"""', "['X', 'Zt']", "['Z']"], {'name': '"""Z"""'}), "('AddUnknown', ['X', 'Zt'], ['Z'], name='Z')\n", (2104, 2148), False, 'from onnx import helper, TensorProto\n'), ((2182, 2249), 'onnx.helper.make_graph', 'helper.make_graph', (['[node_def, node_def2]', '"""test-model"""', '[X, Y]', '[Z]'], {}), "([node_def, node_def2], 'test-model', [X, Y], [Z])\n", (2199, 2249), False, 'from onnx import helper, TensorProto\n'), ((2283, 2380), 'onnx.helper.make_model', 'helper.make_model', (['graph_def'], {'producer_name': '"""mlprodict"""', 'ir_version': '(6)', 'producer_version': '"""0.1"""'}), "(graph_def, producer_name='mlprodict', ir_version=6,\n producer_version='0.1')\n", (2300, 2380), False, 'from onnx import helper, TensorProto\n'), ((2405, 2446), 'mlprodict.onnxrt.OnnxInference', 'OnnxInference', (['model_def'], {'runtime': '"""empty"""'}), "(model_def, runtime='empty')\n", (2418, 2446), False, 'from mlprodict.onnxrt import OnnxInference\n'), ((781, 809), 'mlprodict.tools.asv_options_helper.get_opset_number_from_onnx', 'get_opset_number_from_onnx', ([], {}), '()\n', (807, 809), False, 'from mlprodict.tools.asv_options_helper import get_ir_version_from_onnx, get_opset_number_from_onnx\n'), ((1250, 1278), 'mlprodict.tools.asv_options_helper.get_opset_number_from_onnx', 'get_opset_number_from_onnx', ([], {}), '()\n', (1276, 1278), False, 'from mlprodict.tools.asv_options_helper import get_ir_version_from_onnx, get_opset_number_from_onnx\n')] |
from unittest import TestCase
import numpy as np
from scipy.special import logsumexp
from src.d04_modeling.poisson_hmm import PoissonHMM
from scipy.stats import poisson, multivariate_normal
class TestHmmEm(TestCase):
@classmethod
def setUpClass(cls) -> None:
print("setUp")
cls.num_states = 2
cls.model = PoissonHMM(num_states=cls.num_states)
cls.p = 3
cls.pi = np.ones(cls.num_states)
np.random.seed(1992)
mu1 = np.random.normal(loc=0.0, scale=1., size=cls.p)
mu2 = np.random.normal(loc=0.0, scale=1., size=cls.p)
cls.mu = np.array([mu1, mu2])
cls.transition_matrix = np.random.dirichlet([1.] * cls.num_states, size=cls.num_states)
# generate data
cls.num_periods = 1000
cls.generate_data(cls.num_periods)
cls.initial_dist = np.ones(cls.num_states) / cls.num_states
@classmethod
def generate_data(cls, num_periods):
x_data = np.ones((num_periods, cls.p))
x_data[:, 1:] = multivariate_normal.rvs(mean=np.zeros(cls.p-1), cov=np.eye(cls.p-1), size=num_periods)
y_data = np.zeros((num_periods,))
latent_states = np.zeros(num_periods)
rate = np.exp(np.dot(x_data[0, :], cls.mu[0]))
y_data[0] = poisson.rvs(mu=rate)
for t in range(1, num_periods):
p = cls.transition_matrix[int(latent_states[t - 1]), :]
z = np.random.choice(np.arange(cls.num_states).astype(int), p=p)
rate = np.exp(np.dot(x_data[t, :], cls.mu[z]))
y_data[t] = poisson.rvs(mu=rate)
latent_states[t] = z
cls.event_data = dict(x=[x_data], y=[y_data])
cls.latent_states = latent_states
def test_compute_log_likelihoods(self):
log_likelihoods = self.model.compute_log_likelihoods(x_data=self.event_data['x'][0],
y_data=self.event_data['y'][0],
mu=self.mu, num_periods=self.num_periods)
rate0 = np.exp(np.dot(self.event_data['x'][0], self.mu[0]))
log_p0 = poisson.logpmf(k=self.event_data['y'][0], mu=rate0)
rate1 = np.exp(np.dot(self.event_data['x'][0], self.mu[1]))
log_p1 = poisson.logpmf(k=self.event_data['y'][0], mu=rate1)
self.assertAlmostEqual(log_likelihoods[:, 0].sum(), log_p0.sum())
self.assertAlmostEqual(log_likelihoods[:, 1].sum(), log_p1.sum())
def test_forward_pass(self):
log_likelihoods = self.model.compute_log_likelihoods(x_data=self.event_data['x'][0],
y_data=self.event_data['y'][0],
mu=self.mu, num_periods=self.num_periods)
log_alphas = self.model.forward_pass(initial_dist=self.initial_dist, transition_matrix=self.transition_matrix,
log_likelihoods=log_likelihoods)
num_periods, num_states = log_likelihoods.shape
expected = np.zeros((num_periods, num_states))
expected[0, :] = np.log(self.initial_dist)
for t in range(1, num_periods):
factor = expected[t-1, :] + log_likelihoods[t-1, :]
for next_state in range(num_states):
log_alphas_next = logsumexp(np.log(self.transition_matrix[:, next_state].flatten()) + factor)
expected[t, next_state] = log_alphas_next
normalizing_factor = expected[t-1, :] + log_likelihoods[t-1, :]
expected[t, :] = expected[t, :] - logsumexp(normalizing_factor, axis=0)[np.newaxis]
self.assertEqual(0, (log_alphas - expected).sum())
def test_backward_pass(self):
log_likelihoods = self.model.compute_log_likelihoods(x_data=self.event_data['x'][0],
y_data=self.event_data['y'][0],
mu=self.mu, num_periods=self.num_periods)
log_betas = self.model.backward_pass(transition_matrix=self.transition_matrix, log_likelihoods=log_likelihoods)
num_periods, num_states = log_likelihoods.shape
expected = np.zeros((num_periods, num_states))
for t in range(1, num_periods):
factor = expected[num_periods-t, :] + log_likelihoods[num_periods-t, :]
for prev_state in range(num_states):
log_betas_prev = logsumexp(np.log(self.transition_matrix[prev_state, :].flatten()) + factor)
expected[num_periods-1-t, prev_state] = log_betas_prev
self.assertEqual(0, (log_betas - expected).sum())
def test_compute_marginal_ll(self):
log_likelihoods = self.model.compute_log_likelihoods(x_data=self.event_data['x'][0],
y_data=self.event_data['y'][0],
mu=self.mu, num_periods=self.num_periods)
log_alphas = self.model.forward_pass(initial_dist=self.initial_dist, transition_matrix=self.transition_matrix,
log_likelihoods=log_likelihoods)
marginal_ll = self.model.compute_marginal_ll(log_alphas, log_likelihoods)
expected = logsumexp(log_likelihoods + log_alphas, axis=1)
self.assertEqual(marginal_ll, expected.sum())
def test_e_step(self):
parameters = {'mu': self.mu,
'initial_dist': self.initial_dist,
'transition_matrix': self.transition_matrix}
expectations, marginal_ll, transition_expectations = self.model.e_step(self.event_data, parameters)
# expectations, transition_expectations = self.view_latent_states()
self.assertAlmostEqual(first=expectations[0].sum(), second=self.num_periods, places=6)
self.assertAlmostEqual(first=transition_expectations[0].sum(axis=0).sum(axis=0)[0], second=1, places=6)
self.assertAlmostEqual(first=transition_expectations[0].sum(),
second=(self.num_periods-1), places=6)
self.assertTrue(isinstance(marginal_ll, np.float64))
def test_m_step(self):
parameters = {'mu': self.mu,
'initial_dist': self.initial_dist,
'transition_matrix': self.transition_matrix}
# expectations, marginal_ll, transition_expectations = self.model.e_step(self.event_data, parameters)
expectations, transition_expectations = self.view_latent_states()
parameters = self.model.m_step(self.event_data, expectations, transition_expectations)
self.assertEqual(parameters['mu'].shape, (self.num_states, self.p))
self.assertEqual(parameters['transition_matrix'].shape, (self.num_states, self.num_states))
def view_latent_states(self):
expectations = np.zeros((self.num_periods, self.num_states))
transition_expectations = np.zeros((self.num_states, self.num_states, self.num_periods - 1))
for k in range(self.num_states):
mask = self.latent_states == k
expectations[mask, k] = 1.
expectations = [expectations]
for i in range(self.num_states):
for j in range(self.num_states):
mask = (self.latent_states[1:] == j) & (self.latent_states[:-1] == i)
transition_expectations[i, j, mask] = 1.
transition_expectations = [transition_expectations]
return expectations, transition_expectations
def test_fit_hmm(self):
lls, parameters = self.model.fit(self.event_data)
lls = np.diff(lls)
self.assertTrue((lls > 0).all())
| [
"numpy.random.normal",
"numpy.eye",
"numpy.ones",
"scipy.stats.poisson.rvs",
"numpy.log",
"numpy.diff",
"numpy.array",
"numpy.random.dirichlet",
"numpy.zeros",
"numpy.dot",
"numpy.random.seed",
"src.d04_modeling.poisson_hmm.PoissonHMM",
"scipy.stats.poisson.logpmf",
"scipy.special.logsumex... | [((340, 377), 'src.d04_modeling.poisson_hmm.PoissonHMM', 'PoissonHMM', ([], {'num_states': 'cls.num_states'}), '(num_states=cls.num_states)\n', (350, 377), False, 'from src.d04_modeling.poisson_hmm import PoissonHMM\n'), ((414, 437), 'numpy.ones', 'np.ones', (['cls.num_states'], {}), '(cls.num_states)\n', (421, 437), True, 'import numpy as np\n'), ((446, 466), 'numpy.random.seed', 'np.random.seed', (['(1992)'], {}), '(1992)\n', (460, 466), True, 'import numpy as np\n'), ((481, 529), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': 'cls.p'}), '(loc=0.0, scale=1.0, size=cls.p)\n', (497, 529), True, 'import numpy as np\n'), ((543, 591), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': 'cls.p'}), '(loc=0.0, scale=1.0, size=cls.p)\n', (559, 591), True, 'import numpy as np\n'), ((608, 628), 'numpy.array', 'np.array', (['[mu1, mu2]'], {}), '([mu1, mu2])\n', (616, 628), True, 'import numpy as np\n'), ((661, 725), 'numpy.random.dirichlet', 'np.random.dirichlet', (['([1.0] * cls.num_states)'], {'size': 'cls.num_states'}), '([1.0] * cls.num_states, size=cls.num_states)\n', (680, 725), True, 'import numpy as np\n'), ((968, 997), 'numpy.ones', 'np.ones', (['(num_periods, cls.p)'], {}), '((num_periods, cls.p))\n', (975, 997), True, 'import numpy as np\n'), ((1126, 1150), 'numpy.zeros', 'np.zeros', (['(num_periods,)'], {}), '((num_periods,))\n', (1134, 1150), True, 'import numpy as np\n'), ((1175, 1196), 'numpy.zeros', 'np.zeros', (['num_periods'], {}), '(num_periods)\n', (1183, 1196), True, 'import numpy as np\n'), ((1272, 1292), 'scipy.stats.poisson.rvs', 'poisson.rvs', ([], {'mu': 'rate'}), '(mu=rate)\n', (1283, 1292), False, 'from scipy.stats import poisson, multivariate_normal\n'), ((2130, 2181), 'scipy.stats.poisson.logpmf', 'poisson.logpmf', ([], {'k': "self.event_data['y'][0]", 'mu': 'rate0'}), "(k=self.event_data['y'][0], mu=rate0)\n", (2144, 2181), False, 'from scipy.stats import poisson, multivariate_normal\n'), ((2267, 2318), 'scipy.stats.poisson.logpmf', 'poisson.logpmf', ([], {'k': "self.event_data['y'][0]", 'mu': 'rate1'}), "(k=self.event_data['y'][0], mu=rate1)\n", (2281, 2318), False, 'from scipy.stats import poisson, multivariate_normal\n'), ((3064, 3099), 'numpy.zeros', 'np.zeros', (['(num_periods, num_states)'], {}), '((num_periods, num_states))\n', (3072, 3099), True, 'import numpy as np\n'), ((3125, 3150), 'numpy.log', 'np.log', (['self.initial_dist'], {}), '(self.initial_dist)\n', (3131, 3150), True, 'import numpy as np\n'), ((4224, 4259), 'numpy.zeros', 'np.zeros', (['(num_periods, num_states)'], {}), '((num_periods, num_states))\n', (4232, 4259), True, 'import numpy as np\n'), ((5301, 5348), 'scipy.special.logsumexp', 'logsumexp', (['(log_likelihoods + log_alphas)'], {'axis': '(1)'}), '(log_likelihoods + log_alphas, axis=1)\n', (5310, 5348), False, 'from scipy.special import logsumexp\n'), ((6889, 6934), 'numpy.zeros', 'np.zeros', (['(self.num_periods, self.num_states)'], {}), '((self.num_periods, self.num_states))\n', (6897, 6934), True, 'import numpy as np\n'), ((6969, 7035), 'numpy.zeros', 'np.zeros', (['(self.num_states, self.num_states, self.num_periods - 1)'], {}), '((self.num_states, self.num_states, self.num_periods - 1))\n', (6977, 7035), True, 'import numpy as np\n'), ((7641, 7653), 'numpy.diff', 'np.diff', (['lls'], {}), '(lls)\n', (7648, 7653), True, 'import numpy as np\n'), ((851, 874), 'numpy.ones', 'np.ones', (['cls.num_states'], {}), '(cls.num_states)\n', (858, 874), True, 'import numpy as np\n'), ((1219, 1250), 'numpy.dot', 'np.dot', (['x_data[0, :]', 'cls.mu[0]'], {}), '(x_data[0, :], cls.mu[0])\n', (1225, 1250), True, 'import numpy as np\n'), ((1561, 1581), 'scipy.stats.poisson.rvs', 'poisson.rvs', ([], {'mu': 'rate'}), '(mu=rate)\n', (1572, 1581), False, 'from scipy.stats import poisson, multivariate_normal\n'), ((2068, 2111), 'numpy.dot', 'np.dot', (["self.event_data['x'][0]", 'self.mu[0]'], {}), "(self.event_data['x'][0], self.mu[0])\n", (2074, 2111), True, 'import numpy as np\n'), ((2205, 2248), 'numpy.dot', 'np.dot', (["self.event_data['x'][0]", 'self.mu[1]'], {}), "(self.event_data['x'][0], self.mu[1])\n", (2211, 2248), True, 'import numpy as np\n'), ((1051, 1070), 'numpy.zeros', 'np.zeros', (['(cls.p - 1)'], {}), '(cls.p - 1)\n', (1059, 1070), True, 'import numpy as np\n'), ((1074, 1091), 'numpy.eye', 'np.eye', (['(cls.p - 1)'], {}), '(cls.p - 1)\n', (1080, 1091), True, 'import numpy as np\n'), ((1504, 1535), 'numpy.dot', 'np.dot', (['x_data[t, :]', 'cls.mu[z]'], {}), '(x_data[t, :], cls.mu[z])\n', (1510, 1535), True, 'import numpy as np\n'), ((3594, 3631), 'scipy.special.logsumexp', 'logsumexp', (['normalizing_factor'], {'axis': '(0)'}), '(normalizing_factor, axis=0)\n', (3603, 3631), False, 'from scipy.special import logsumexp\n'), ((1434, 1459), 'numpy.arange', 'np.arange', (['cls.num_states'], {}), '(cls.num_states)\n', (1443, 1459), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Copyright (C) 2015 <NAME>
import numpy as np
from scipy import sparse as sp
class OneHotEncoder():
"""Transforms categorical features to continuous numeric features"""
def __init__(self,sparse=True):
self.sparse = sparse
def fit(self, X):
data = np.asarray(X)
unique_feats = []
offset = 0
for i in range(data.shape[1]):
feat_set_i = set(data[:,i])
d = {val:i+offset for i,val in enumerate(feat_set_i)}
unique_feats.append(d)
offset += len(feat_set_i)
self.unique_feats = unique_feats
return self
def transform(self, X):
X = np.atleast_2d(X)
if self.sparse:
one_hot_matrix = sp.lil_matrix((len(X), sum(len(i) for i in self.unique_feats)))
else:
one_hot_matrix = np.zeros((len(X), sum(len(i) for i in self.unique_feats)), bool)
for i,vec in enumerate(X):
for j,val in enumerate(vec):
if val in self.unique_feats[j]:
one_hot_matrix[i, self.unique_feats[j][val]] = 1.0
return sp.csr_matrix(one_hot_matrix) if self.sparse else one_hot_matrix
| [
"numpy.atleast_2d",
"scipy.sparse.csr_matrix",
"numpy.asarray"
] | [((326, 339), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (336, 339), True, 'import numpy as np\n'), ((714, 730), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (727, 730), True, 'import numpy as np\n'), ((1171, 1200), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['one_hot_matrix'], {}), '(one_hot_matrix)\n', (1184, 1200), True, 'from scipy import sparse as sp\n')] |
import numpy as np
from numpy.testing import assert_almost_equal
import torch
import torch.nn as nn
from torch.utils import data
import torch.optim as optim
from mbrltools.pytorch_utils import train, predict, _set_device
torch.manual_seed(0)
class MLP(nn.Module):
"""Multi-Layer Perceptron for the sake of testing."""
def __init__(self, input_size, output_size, layers,
activation=nn.LeakyReLU()):
super(MLP, self).__init__()
model = nn.Sequential()
model.add_module('initial-lin', nn.Linear(input_size, layers[0]))
model.add_module('initial-act', activation)
for i in range(len(layers) - 1):
model.add_module('layer{}-lin'.format(i + 1),
nn.Linear(layers[i], layers[i + 1]))
model.add_module('layer{}-act'.format(i + 1), activation)
model.add_module('final-lin', nn.Linear(layers[-1], output_size))
self.model = model
def forward(self, x):
return self.model(x)
def test_batch_predict():
# Batch predict is equal to non batch predict
# create a simple model (a MLP with 2 inner layers)
device = _set_device()
input_size, output_size, layers = 2, 2, [5, 5]
model = MLP(input_size, output_size, layers)
model = model.to(device)
# create a random dataset. take a number of samples that is not a multiple
# of the batch_size.
n_samples = 26
x = torch.randn(n_samples, input_size)
y = torch.randn(n_samples, output_size)
dataset = data.TensorDataset(x, y)
model = train(model, dataset, n_epochs=1, batch_size=10)
with torch.no_grad():
predictions_without_batch = model(x.to(device)).cpu()
predictions_with_batch = predict(model, x, batch_size=10)
predictions_with_predict_no_batch = predict(model, x, batch_size=None)
assert_almost_equal(
predictions_with_batch.numpy(), predictions_without_batch.numpy())
assert_almost_equal(
predictions_with_batch.numpy(),
predictions_with_predict_no_batch.numpy())
def test_best_model():
# check the best model
loss_fn = torch.nn.MSELoss()
# create a simple model (a MLP with 2 inner layers)
device = _set_device()
input_size, output_size, layers = 2, 2, [50, 50]
model = MLP(input_size, output_size, layers)
model = model.to(device)
# create a random dataset
n_samples = 100
x = torch.randn(n_samples, input_size)
y = 2 * x + 1
x, y = x.to(device), y.to(device)
dataset = data.TensorDataset(x, y)
# create training and validation sets
validation_fraction = 0.5
n_samples = len(dataset)
ind_split = int(np.floor(validation_fraction * n_samples))
dataset_train = data.TensorDataset(*dataset[ind_split:])
dataset_valid = data.TensorDataset(*dataset[:ind_split])
optimizer = optim.Adam(model.parameters(), lr=1e-2)
model, best_val_loss = train(
model, dataset_train, optimizer=optimizer, dataset_valid=dataset_valid,
n_epochs=10, batch_size=20, return_best_model=True, loss_fn=loss_fn)
# check val_loss
X_valid = dataset_valid.tensors[0]
y_valid = dataset_valid.tensors[1]
y_valid_pred = predict(model, X_valid)
val_loss = loss_fn(y_valid, y_valid_pred).item()
assert_almost_equal(val_loss, best_val_loss)
| [
"torch.manual_seed",
"torch.nn.LeakyReLU",
"mbrltools.pytorch_utils._set_device",
"mbrltools.pytorch_utils.predict",
"torch.nn.Sequential",
"numpy.floor",
"torch.utils.data.TensorDataset",
"torch.nn.MSELoss",
"numpy.testing.assert_almost_equal",
"torch.nn.Linear",
"torch.no_grad",
"mbrltools.p... | [((224, 244), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (241, 244), False, 'import torch\n'), ((1167, 1180), 'mbrltools.pytorch_utils._set_device', '_set_device', ([], {}), '()\n', (1178, 1180), False, 'from mbrltools.pytorch_utils import train, predict, _set_device\n'), ((1442, 1476), 'torch.randn', 'torch.randn', (['n_samples', 'input_size'], {}), '(n_samples, input_size)\n', (1453, 1476), False, 'import torch\n'), ((1485, 1520), 'torch.randn', 'torch.randn', (['n_samples', 'output_size'], {}), '(n_samples, output_size)\n', (1496, 1520), False, 'import torch\n'), ((1535, 1559), 'torch.utils.data.TensorDataset', 'data.TensorDataset', (['x', 'y'], {}), '(x, y)\n', (1553, 1559), False, 'from torch.utils import data\n'), ((1572, 1620), 'mbrltools.pytorch_utils.train', 'train', (['model', 'dataset'], {'n_epochs': '(1)', 'batch_size': '(10)'}), '(model, dataset, n_epochs=1, batch_size=10)\n', (1577, 1620), False, 'from mbrltools.pytorch_utils import train, predict, _set_device\n'), ((2157, 2175), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (2173, 2175), False, 'import torch\n'), ((2246, 2259), 'mbrltools.pytorch_utils._set_device', '_set_device', ([], {}), '()\n', (2257, 2259), False, 'from mbrltools.pytorch_utils import train, predict, _set_device\n'), ((2450, 2484), 'torch.randn', 'torch.randn', (['n_samples', 'input_size'], {}), '(n_samples, input_size)\n', (2461, 2484), False, 'import torch\n'), ((2555, 2579), 'torch.utils.data.TensorDataset', 'data.TensorDataset', (['x', 'y'], {}), '(x, y)\n', (2573, 2579), False, 'from torch.utils import data\n'), ((2765, 2805), 'torch.utils.data.TensorDataset', 'data.TensorDataset', (['*dataset[ind_split:]'], {}), '(*dataset[ind_split:])\n', (2783, 2805), False, 'from torch.utils import data\n'), ((2826, 2866), 'torch.utils.data.TensorDataset', 'data.TensorDataset', (['*dataset[:ind_split]'], {}), '(*dataset[:ind_split])\n', (2844, 2866), False, 'from torch.utils import data\n'), ((2951, 3106), 'mbrltools.pytorch_utils.train', 'train', (['model', 'dataset_train'], {'optimizer': 'optimizer', 'dataset_valid': 'dataset_valid', 'n_epochs': '(10)', 'batch_size': '(20)', 'return_best_model': '(True)', 'loss_fn': 'loss_fn'}), '(model, dataset_train, optimizer=optimizer, dataset_valid=\n dataset_valid, n_epochs=10, batch_size=20, return_best_model=True,\n loss_fn=loss_fn)\n', (2956, 3106), False, 'from mbrltools.pytorch_utils import train, predict, _set_device\n'), ((3234, 3257), 'mbrltools.pytorch_utils.predict', 'predict', (['model', 'X_valid'], {}), '(model, X_valid)\n', (3241, 3257), False, 'from mbrltools.pytorch_utils import train, predict, _set_device\n'), ((3315, 3359), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['val_loss', 'best_val_loss'], {}), '(val_loss, best_val_loss)\n', (3334, 3359), False, 'from numpy.testing import assert_almost_equal\n'), ((412, 426), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (424, 426), True, 'import torch.nn as nn\n'), ((482, 497), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (495, 497), True, 'import torch.nn as nn\n'), ((1631, 1646), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1644, 1646), False, 'import torch\n'), ((1743, 1775), 'mbrltools.pytorch_utils.predict', 'predict', (['model', 'x'], {'batch_size': '(10)'}), '(model, x, batch_size=10)\n', (1750, 1775), False, 'from mbrltools.pytorch_utils import train, predict, _set_device\n'), ((1820, 1854), 'mbrltools.pytorch_utils.predict', 'predict', (['model', 'x'], {'batch_size': 'None'}), '(model, x, batch_size=None)\n', (1827, 1854), False, 'from mbrltools.pytorch_utils import train, predict, _set_device\n'), ((2702, 2743), 'numpy.floor', 'np.floor', (['(validation_fraction * n_samples)'], {}), '(validation_fraction * n_samples)\n', (2710, 2743), True, 'import numpy as np\n'), ((538, 570), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'layers[0]'], {}), '(input_size, layers[0])\n', (547, 570), True, 'import torch.nn as nn\n'), ((899, 933), 'torch.nn.Linear', 'nn.Linear', (['layers[-1]', 'output_size'], {}), '(layers[-1], output_size)\n', (908, 933), True, 'import torch.nn as nn\n'), ((753, 788), 'torch.nn.Linear', 'nn.Linear', (['layers[i]', 'layers[i + 1]'], {}), '(layers[i], layers[i + 1])\n', (762, 788), True, 'import torch.nn as nn\n')] |
import time
start = time.time()
import numpy as np
from scipy.optimize import basinhopping, minimize
import sys
input = sys.stdin.buffer.readline
def main() -> None:
N, K = map(int, input().split())
A = np.array(list(map(int, sys.stdin.read().split())))
X, Y, C = A[::3], A[1::3], A[2::3]
def f(x, X=X, Y=Y, C=C):
return np.sort(C * np.sqrt(np.power(X-x[0], 2) + np.power(Y-x[1], 2)))[K-1]
ans = 1e9
ans_ps = []
for i, (x, y) in enumerate(zip(X, Y)):
res = minimize(f, [x, y], method='Nelder-Mead', tol=1e-7, options={"fatol": 1e-7})
cand = f(res.x)
ans = min(ans, cand)
ans_ps.append((cand, i))
#print(x0)
# res = basinhopping(f, x0, niter=100)
ans_ps.sort()
cands = [(i, j) for i in range(N) for j in range(i+1, N)]
cands.sort(key=lambda x: x[0] + x[1])
print(ans_ps)
for ii, jj in cands:
# if time.time() - start > 1.935:
# break
i, j = ans_ps[ii][1], ans_ps[jj][1]
x0 = [(X[i] + X[j]) / 2, (Y[i] + Y[j]) / 2]
res = minimize(f, x0, method='Nelder-Mead', tol=1e-7, options={"fatol": 1e-7})
cand = f(res.x)
if cand < ans:
ans = cand
print(ii, jj)
# ans = min(ans, cand)
print("{:.20f}".format(ans))
if __name__ == '__main__':
main()
| [
"sys.stdin.read",
"scipy.optimize.minimize",
"time.time",
"numpy.power"
] | [((20, 31), 'time.time', 'time.time', ([], {}), '()\n', (29, 31), False, 'import time\n'), ((502, 580), 'scipy.optimize.minimize', 'minimize', (['f', '[x, y]'], {'method': '"""Nelder-Mead"""', 'tol': '(1e-07)', 'options': "{'fatol': 1e-07}"}), "(f, [x, y], method='Nelder-Mead', tol=1e-07, options={'fatol': 1e-07})\n", (510, 580), False, 'from scipy.optimize import basinhopping, minimize\n'), ((1060, 1134), 'scipy.optimize.minimize', 'minimize', (['f', 'x0'], {'method': '"""Nelder-Mead"""', 'tol': '(1e-07)', 'options': "{'fatol': 1e-07}"}), "(f, x0, method='Nelder-Mead', tol=1e-07, options={'fatol': 1e-07})\n", (1068, 1134), False, 'from scipy.optimize import basinhopping, minimize\n'), ((235, 251), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (249, 251), False, 'import sys\n'), ((366, 387), 'numpy.power', 'np.power', (['(X - x[0])', '(2)'], {}), '(X - x[0], 2)\n', (374, 387), True, 'import numpy as np\n'), ((388, 409), 'numpy.power', 'np.power', (['(Y - x[1])', '(2)'], {}), '(Y - x[1], 2)\n', (396, 409), True, 'import numpy as np\n')] |
from typing import Callable, List, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from tqdm import trange
def sid(model_spectra: torch.tensor, target_spectra: torch.tensor, threshold: float = 1e-8, eps: float = 1e-8, torch_device: str = 'cpu') -> torch.tensor:
# normalize the model spectra before comparison
nan_mask=torch.isnan(target_spectra)+torch.isnan(model_spectra)
nan_mask=nan_mask.to(device=torch_device)
zero_sub=torch.zeros_like(target_spectra,device=torch_device)
model_spectra = model_spectra.to(torch_device)
model_spectra[model_spectra < threshold] = threshold
sum_model_spectra = torch.sum(torch.where(nan_mask,zero_sub,model_spectra),axis=1)
sum_model_spectra = torch.unsqueeze(sum_model_spectra,axis=1)
model_spectra = torch.div(model_spectra,sum_model_spectra)
# calculate loss value
if not isinstance(target_spectra,torch.Tensor):
target_spectra = torch.tensor(target_spectra)
target_spectra = target_spectra.to(torch_device)
loss = torch.ones_like(target_spectra)
loss = loss.to(torch_device)
target_spectra[nan_mask]=1
model_spectra[nan_mask]=1
loss = torch.mul(torch.log(torch.div(model_spectra,target_spectra)),model_spectra) \
+ torch.mul(torch.log(torch.div(target_spectra,model_spectra)),target_spectra)
loss[nan_mask]=0
loss = torch.sum(loss,axis=1)
return loss
def jsd(model_spectra: torch.tensor, target_spectra: torch.tensor, threshold: float = 1e-8, eps: float = 1e-8, torch_device: str = 'cpu') -> torch.tensor:
# normalize the model spectra before comparison
nan_mask=torch.isnan(target_spectra)+torch.isnan(model_spectra)
nan_mask=nan_mask.to(device=torch_device)
zero_sub=torch.zeros_like(target_spectra,device=torch_device)
model_spectra = model_spectra.to(torch_device)
model_spectra[model_spectra < threshold] = threshold
sum_model_spectra = torch.sum(torch.where(nan_mask,zero_sub,model_spectra),axis=1)
sum_model_spectra = torch.unsqueeze(sum_model_spectra,axis=1)
model_spectra = torch.div(model_spectra,sum_model_spectra)
# average spectra
if not isinstance(target_spectra,torch.Tensor):
target_spectra = torch.tensor(target_spectra)
target_spectra = target_spectra.to(torch_device)
target_spectra[nan_mask]=1
model_spectra[nan_mask]=1
avg_spectra = torch.ones_like(target_spectra)
avg_spectra = avg_spectra.to(torch_device)
avg_spectra = torch.add(target_spectra,model_spectra)
avg_spectra = torch.div(avg_spectra,2)
# calculate loss
loss = torch.ones_like(target_spectra)
loss = loss.to(torch_device)
loss = torch.mul(torch.log(torch.div(model_spectra,avg_spectra)),model_spectra) \
+ torch.mul(torch.log(torch.div(target_spectra,avg_spectra)),target_spectra)
loss[nan_mask]=0
loss = torch.div(torch.sum(loss,axis=1),2)
return loss
def stmse(model_spectra: torch.tensor, target_spectra: torch.tensor, threshold: float = 1e-8, eps: float = 1e-8, torch_device: str = 'cpu') -> torch.tensor:
# normalize the model spectra before comparison
nan_mask=torch.isnan(target_spectra)+torch.isnan(model_spectra)
nan_mask=nan_mask.to(device=torch_device)
zero_sub=torch.zeros_like(target_spectra,device=torch_device)
model_spectra = model_spectra.to(torch_device)
model_spectra[model_spectra < threshold] = threshold
sum_model_spectra = torch.sum(torch.where(nan_mask,zero_sub,model_spectra),axis=1)
sum_model_spectra = torch.unsqueeze(sum_model_spectra,axis=1)
model_spectra = torch.div(model_spectra,sum_model_spectra)
# calculate loss value
if not isinstance(target_spectra,torch.Tensor):
target_spectra = torch.tensor(target_spectra)
target_spectra = target_spectra.to(torch_device)
loss = torch.ones_like(target_spectra)
loss = loss.to(torch_device)
target_spectra[nan_mask]=1
model_spectra[nan_mask]=1
loss = torch.mean(torch.div((model_spectra-target_spectra)**2,target_spectra),dim=1)
return loss
def srmse(model_spectra: torch.tensor, target_spectra: torch.tensor, threshold: float = 1e-8, eps: float = 1e-8, torch_device: str = 'cpu') -> torch.tensor:
# normalize the model spectra before comparison
nan_mask=torch.isnan(target_spectra)+torch.isnan(model_spectra)
nan_mask=nan_mask.to(device=torch_device)
zero_sub=torch.zeros_like(target_spectra,device=torch_device)
model_spectra = model_spectra.to(torch_device)
model_spectra[model_spectra < threshold] = threshold
sum_model_spectra = torch.sum(torch.where(nan_mask,zero_sub,model_spectra),axis=1)
sum_model_spectra = torch.unsqueeze(sum_model_spectra,axis=1)
model_spectra = torch.div(model_spectra,sum_model_spectra)
# calculate loss value
if not isinstance(target_spectra,torch.Tensor):
target_spectra = torch.tensor(target_spectra)
target_spectra = target_spectra.to(torch_device)
loss = torch.ones_like(target_spectra)
loss = loss.to(torch_device)
target_spectra[nan_mask]=1
model_spectra[nan_mask]=1
loss = torch.mean((model_spectra-target_spectra)**2,dim=1)
loss = torch.sqrt(loss + eps)
return loss
def smse(model_spectra: torch.tensor, target_spectra: torch.tensor, threshold: float = 1e-8, eps: float = 1e-8, torch_device: str = 'cpu') -> torch.tensor:
# normalize the model spectra before comparison
nan_mask=torch.isnan(target_spectra)+torch.isnan(model_spectra)
nan_mask=nan_mask.to(device=torch_device)
zero_sub=torch.zeros_like(target_spectra,device=torch_device)
model_spectra = model_spectra.to(torch_device)
model_spectra[model_spectra < threshold] = threshold
sum_model_spectra = torch.sum(torch.where(nan_mask,zero_sub,model_spectra),axis=1)
sum_model_spectra = torch.unsqueeze(sum_model_spectra,axis=1)
model_spectra = torch.div(model_spectra,sum_model_spectra)
# calculate loss value
if not isinstance(target_spectra,torch.Tensor):
target_spectra = torch.tensor(target_spectra)
target_spectra = target_spectra.to(torch_device)
loss = torch.ones_like(target_spectra)
loss = loss.to(torch_device)
target_spectra[nan_mask]=1
model_spectra[nan_mask]=1
loss = torch.mean((model_spectra-target_spectra)**2,dim=1)
return loss
def wasserstein(model_spectra: torch.tensor, target_spectra: torch.tensor, threshold: float = 1e-8, eps: float = 1e-8, torch_device: str = 'cpu') -> torch.tensor:
# normalize the model spectra before comparison
nan_mask=torch.isnan(target_spectra)+torch.isnan(model_spectra)
nan_mask=nan_mask.to(device=torch_device)
zero_sub=torch.zeros_like(target_spectra,device=torch_device)
model_spectra = model_spectra.to(torch_device)
model_spectra[model_spectra < threshold] = threshold
sum_model_spectra = torch.sum(torch.where(nan_mask,zero_sub,model_spectra),axis=1)
sum_model_spectra = torch.unsqueeze(sum_model_spectra,axis=1)
model_spectra = torch.div(model_spectra,sum_model_spectra)
# cumulative spectra
if not isinstance(target_spectra,torch.Tensor):
target_spectra = torch.tensor(target_spectra)
target_spectra = target_spectra.to(torch_device)
target_spectra[nan_mask]=0
model_spectra[nan_mask]=0
cum_model = torch.ones_like(model_spectra)
cum_model = cum_model.to(torch_device)
cum_targets = torch.ones_like(target_spectra)
cum_targets = cum_targets.to(torch_device)
cum_model = torch.cumsum(model_spectra,dim=1)
cum_targets = torch.cumsum(target_spectra,dim=1)
# calculate loss
loss = torch.ones_like(target_spectra)
loss = loss.to(torch_device)
loss = torch.add(cum_model,torch.mul(cum_targets,-1))
loss = torch.abs(loss)
loss = torch.sum(loss,axis=1)
return loss
def pre_normalize_targets(targets: List[List[float]], threshold: float = 1e-8, torch_device: str = 'cpu', batch_size: int = 50) -> List[List[float]]:
normalized_targets = []
num_iters, iter_step = len(targets), batch_size
for i in trange(0, num_iters, iter_step):
with torch.no_grad():
# Prepare batch
batch = targets[i:i + iter_step]
batch = torch.tensor(batch,dtype=float,device=torch_device)
batch[batch < threshold] = threshold
nan_mask=torch.isnan(batch)
batch[nan_mask]=0
batch_sums = torch.sum(batch,axis=1)
batch_sums = torch.unsqueeze(batch_sums,axis=1)
norm_batch = torch.div(batch,batch_sums)
norm_batch[nan_mask] = float('nan')
norm_batch = norm_batch.data.cpu().tolist()
# Collect vectors
normalized_targets.extend(norm_batch)
return normalized_targets
def generate_conv_matrix(length: int = 1801, stdev: float = 0.0, torch_device: str = 'cpu') -> torch.tensor:
conv_matrix = torch.eye(length,dtype=float,device=torch_device)
# conv_matrix = torch.unsqueeze(conv_matrix,dim=0)
if stdev != 0:
conv_vector=[0]*length
for vector_i in range(length):
conv_vector[vector_i]=(1/np.sqrt(2*np.pi*np.square(stdev)))*np.exp(-1*(np.square(vector_i)/(2*np.square(stdev))))
for source_i in range(length):
for conv_i in range(length):
# conv_matrix[0,source_i,conv_i]=conv_vector[abs(source_i-conv_i)]
conv_matrix[source_i,conv_i]=conv_vector[abs(source_i-conv_i)]
return conv_matrix
def roundrobin_sid(spectra: torch.Tensor, threshold: float = 1e-8, torch_device: str = 'cpu', stdev: float = 0.0) -> torch.Tensor:
"""
Takes a block of input spectra and makes a pairwise comparison between each of the input spectra for a given molecule,
returning a list of the spectral informations divergences. Also saves a file with the list and reference to which sid
came from which pair of spectra. To be used evaluating the variation between an ensemble of model spectrum predictions.
:spectra: A 3D tensor containing each of the spectra to be compared. Different molecules along axis=0,
different ensemble spectra along axis=1, different frequency bins along axis=2.
:threshold: SID calculation requires positive values in each position, this value is used to replace any zero or negative values.
:torch_device: Tag for pytorch device to use for calculation. If run in chemprop, this will be args.device.
:save_file: A location to save the sid results for each pair, does not write unless specified.
:stdev: If the spectra are to be gaussian convolved before sid comparison, they will be spread using a gaussian of this standard deviation,
defined in terms of the number of target bins not the units of the bin labels.
:return: A tensor containing a list of SIDs for each pairwise combination of spectra along axis=1, for each molecule provided along axis=0.
"""
spectra=spectra.to(device = torch_device,dtype=float)
if stdev != 0.0:
conv_matrix = generate_conv_matrix(length=len(spectra[0,0]),stdev=stdev,torch_device=torch_device)
ensemble_size=spectra.size()[1]
spectrum_size=spectra.size()[2]
number_pairs=sum(range(ensemble_size))
ensemble_sids=torch.zeros([0,number_pairs],device=torch_device,dtype=float) #0*n
for i in range(len(spectra)):
with torch.no_grad():
mol_spectra = spectra[i] #10*1801
nan_mask=torch.isnan(mol_spectra[0]) #1801
nan_mask=nan_mask.to(device=torch_device)
mol_spectra = mol_spectra.to(device=torch_device,dtype=float)
mol_spectra[mol_spectra<threshold] = threshold
mol_spectra[:,nan_mask]=0
if stdev != 0.0:
mol_spectra = torch.matmul(mol_spectra,conv_matrix)
mol_sums = torch.sum(mol_spectra, axis=1) #10
mol_sums = torch.unsqueeze(mol_sums,axis=1) #10*1
mol_norm = torch.div(mol_spectra,mol_sums) #10*1801
mol_norm[:,nan_mask]=1
ensemble_head = torch.zeros([0,spectrum_size],device=torch_device,dtype=float) #0*1801
ensemble_tail = torch.zeros([0,spectrum_size],device=torch_device,dtype=float) #0*1801
for j in range(len(mol_norm)-1):
ensemble_tail = torch.cat((ensemble_tail,mol_norm[j+1:]),axis=0) #n*1801
ensemble_head = torch.cat((ensemble_head,mol_norm[:-j-1]),axis=0) #n*1801
mol_loss = torch.zeros_like(ensemble_head,device=torch_device,dtype=float) #n*1801
mol_loss = torch.mul(torch.log(torch.div(ensemble_head,ensemble_tail)),ensemble_head) \
+ torch.mul(torch.log(torch.div(ensemble_tail,ensemble_head)),ensemble_tail)
mol_loss[:,nan_mask]=0
mol_loss = torch.sum(mol_loss,axis=1) #n
mol_loss = torch.unsqueeze(mol_loss,axis=0)
ensemble_sids = torch.cat((ensemble_sids,mol_loss),axis=0) #0*n
return ensemble_sids
def apply_spectral_mask(spectral_mask: List[List[float]],spectra: List[List[float]],features: List[List[float]], torch_device: str = 'cpu', batch_size: int = 50):
masked_spectra = []
spectral_mask=np.array(spectral_mask,dtype=float)
spectral_mask=torch.from_numpy(spectral_mask).to(device=torch_device)
num_iters, iter_step = len(spectra), batch_size
for i in trange(0, num_iters, iter_step):
with torch.no_grad():
# Prepare batch
batch_spectra = spectra[i:i + iter_step]
batch_features = features[i:i + iter_step]
batch_spectra = torch.tensor(batch_spectra,dtype=float,device=torch_device)
batch_features = torch.tensor(batch_features,dtype=float,device=torch_device)
# Extract phase features from batch_features
phase_features = batch_features[:,-len(spectral_mask):]
batch_mask=torch.matmul(phase_features,spectral_mask).bool()
batch_spectra[~batch_mask]=float('nan')
batch_spectra = batch_spectra.data.cpu().tolist()
# Collect vectors
masked_spectra.extend(batch_spectra)
return masked_spectra
| [
"torch.mul",
"torch.sqrt",
"torch.from_numpy",
"numpy.array",
"torch.sum",
"torch.mean",
"torch.unsqueeze",
"torch.eye",
"torch.matmul",
"torch.zeros_like",
"torch.ones_like",
"torch.abs",
"numpy.square",
"torch.add",
"tqdm.trange",
"torch.cumsum",
"torch.where",
"torch.cat",
"to... | [((465, 518), 'torch.zeros_like', 'torch.zeros_like', (['target_spectra'], {'device': 'torch_device'}), '(target_spectra, device=torch_device)\n', (481, 518), False, 'import torch\n'), ((737, 779), 'torch.unsqueeze', 'torch.unsqueeze', (['sum_model_spectra'], {'axis': '(1)'}), '(sum_model_spectra, axis=1)\n', (752, 779), False, 'import torch\n'), ((799, 842), 'torch.div', 'torch.div', (['model_spectra', 'sum_model_spectra'], {}), '(model_spectra, sum_model_spectra)\n', (808, 842), False, 'import torch\n'), ((1039, 1070), 'torch.ones_like', 'torch.ones_like', (['target_spectra'], {}), '(target_spectra)\n', (1054, 1070), False, 'import torch\n'), ((1373, 1396), 'torch.sum', 'torch.sum', (['loss'], {'axis': '(1)'}), '(loss, axis=1)\n', (1382, 1396), False, 'import torch\n'), ((1747, 1800), 'torch.zeros_like', 'torch.zeros_like', (['target_spectra'], {'device': 'torch_device'}), '(target_spectra, device=torch_device)\n', (1763, 1800), False, 'import torch\n'), ((2019, 2061), 'torch.unsqueeze', 'torch.unsqueeze', (['sum_model_spectra'], {'axis': '(1)'}), '(sum_model_spectra, axis=1)\n', (2034, 2061), False, 'import torch\n'), ((2081, 2124), 'torch.div', 'torch.div', (['model_spectra', 'sum_model_spectra'], {}), '(model_spectra, sum_model_spectra)\n', (2090, 2124), False, 'import torch\n'), ((2384, 2415), 'torch.ones_like', 'torch.ones_like', (['target_spectra'], {}), '(target_spectra)\n', (2399, 2415), False, 'import torch\n'), ((2481, 2521), 'torch.add', 'torch.add', (['target_spectra', 'model_spectra'], {}), '(target_spectra, model_spectra)\n', (2490, 2521), False, 'import torch\n'), ((2539, 2564), 'torch.div', 'torch.div', (['avg_spectra', '(2)'], {}), '(avg_spectra, 2)\n', (2548, 2564), False, 'import torch\n'), ((2596, 2627), 'torch.ones_like', 'torch.ones_like', (['target_spectra'], {}), '(target_spectra)\n', (2611, 2627), False, 'import torch\n'), ((3253, 3306), 'torch.zeros_like', 'torch.zeros_like', (['target_spectra'], {'device': 'torch_device'}), '(target_spectra, device=torch_device)\n', (3269, 3306), False, 'import torch\n'), ((3525, 3567), 'torch.unsqueeze', 'torch.unsqueeze', (['sum_model_spectra'], {'axis': '(1)'}), '(sum_model_spectra, axis=1)\n', (3540, 3567), False, 'import torch\n'), ((3587, 3630), 'torch.div', 'torch.div', (['model_spectra', 'sum_model_spectra'], {}), '(model_spectra, sum_model_spectra)\n', (3596, 3630), False, 'import torch\n'), ((3827, 3858), 'torch.ones_like', 'torch.ones_like', (['target_spectra'], {}), '(target_spectra)\n', (3842, 3858), False, 'import torch\n'), ((4395, 4448), 'torch.zeros_like', 'torch.zeros_like', (['target_spectra'], {'device': 'torch_device'}), '(target_spectra, device=torch_device)\n', (4411, 4448), False, 'import torch\n'), ((4667, 4709), 'torch.unsqueeze', 'torch.unsqueeze', (['sum_model_spectra'], {'axis': '(1)'}), '(sum_model_spectra, axis=1)\n', (4682, 4709), False, 'import torch\n'), ((4729, 4772), 'torch.div', 'torch.div', (['model_spectra', 'sum_model_spectra'], {}), '(model_spectra, sum_model_spectra)\n', (4738, 4772), False, 'import torch\n'), ((4969, 5000), 'torch.ones_like', 'torch.ones_like', (['target_spectra'], {}), '(target_spectra)\n', (4984, 5000), False, 'import torch\n'), ((5106, 5162), 'torch.mean', 'torch.mean', (['((model_spectra - target_spectra) ** 2)'], {'dim': '(1)'}), '((model_spectra - target_spectra) ** 2, dim=1)\n', (5116, 5162), False, 'import torch\n'), ((5169, 5191), 'torch.sqrt', 'torch.sqrt', (['(loss + eps)'], {}), '(loss + eps)\n', (5179, 5191), False, 'import torch\n'), ((5544, 5597), 'torch.zeros_like', 'torch.zeros_like', (['target_spectra'], {'device': 'torch_device'}), '(target_spectra, device=torch_device)\n', (5560, 5597), False, 'import torch\n'), ((5816, 5858), 'torch.unsqueeze', 'torch.unsqueeze', (['sum_model_spectra'], {'axis': '(1)'}), '(sum_model_spectra, axis=1)\n', (5831, 5858), False, 'import torch\n'), ((5878, 5921), 'torch.div', 'torch.div', (['model_spectra', 'sum_model_spectra'], {}), '(model_spectra, sum_model_spectra)\n', (5887, 5921), False, 'import torch\n'), ((6118, 6149), 'torch.ones_like', 'torch.ones_like', (['target_spectra'], {}), '(target_spectra)\n', (6133, 6149), False, 'import torch\n'), ((6255, 6311), 'torch.mean', 'torch.mean', (['((model_spectra - target_spectra) ** 2)'], {'dim': '(1)'}), '((model_spectra - target_spectra) ** 2, dim=1)\n', (6265, 6311), False, 'import torch\n'), ((6666, 6719), 'torch.zeros_like', 'torch.zeros_like', (['target_spectra'], {'device': 'torch_device'}), '(target_spectra, device=torch_device)\n', (6682, 6719), False, 'import torch\n'), ((6938, 6980), 'torch.unsqueeze', 'torch.unsqueeze', (['sum_model_spectra'], {'axis': '(1)'}), '(sum_model_spectra, axis=1)\n', (6953, 6980), False, 'import torch\n'), ((7000, 7043), 'torch.div', 'torch.div', (['model_spectra', 'sum_model_spectra'], {}), '(model_spectra, sum_model_spectra)\n', (7009, 7043), False, 'import torch\n'), ((7304, 7334), 'torch.ones_like', 'torch.ones_like', (['model_spectra'], {}), '(model_spectra)\n', (7319, 7334), False, 'import torch\n'), ((7396, 7427), 'torch.ones_like', 'torch.ones_like', (['target_spectra'], {}), '(target_spectra)\n', (7411, 7427), False, 'import torch\n'), ((7491, 7525), 'torch.cumsum', 'torch.cumsum', (['model_spectra'], {'dim': '(1)'}), '(model_spectra, dim=1)\n', (7503, 7525), False, 'import torch\n'), ((7543, 7578), 'torch.cumsum', 'torch.cumsum', (['target_spectra'], {'dim': '(1)'}), '(target_spectra, dim=1)\n', (7555, 7578), False, 'import torch\n'), ((7610, 7641), 'torch.ones_like', 'torch.ones_like', (['target_spectra'], {}), '(target_spectra)\n', (7625, 7641), False, 'import torch\n'), ((7744, 7759), 'torch.abs', 'torch.abs', (['loss'], {}), '(loss)\n', (7753, 7759), False, 'import torch\n'), ((7771, 7794), 'torch.sum', 'torch.sum', (['loss'], {'axis': '(1)'}), '(loss, axis=1)\n', (7780, 7794), False, 'import torch\n'), ((8056, 8087), 'tqdm.trange', 'trange', (['(0)', 'num_iters', 'iter_step'], {}), '(0, num_iters, iter_step)\n', (8062, 8087), False, 'from tqdm import trange\n'), ((8888, 8939), 'torch.eye', 'torch.eye', (['length'], {'dtype': 'float', 'device': 'torch_device'}), '(length, dtype=float, device=torch_device)\n', (8897, 8939), False, 'import torch\n'), ((11221, 11285), 'torch.zeros', 'torch.zeros', (['[0, number_pairs]'], {'device': 'torch_device', 'dtype': 'float'}), '([0, number_pairs], device=torch_device, dtype=float)\n', (11232, 11285), False, 'import torch\n'), ((13157, 13193), 'numpy.array', 'np.array', (['spectral_mask'], {'dtype': 'float'}), '(spectral_mask, dtype=float)\n', (13165, 13193), True, 'import numpy as np\n'), ((13334, 13365), 'tqdm.trange', 'trange', (['(0)', 'num_iters', 'iter_step'], {}), '(0, num_iters, iter_step)\n', (13340, 13365), False, 'from tqdm import trange\n'), ((351, 378), 'torch.isnan', 'torch.isnan', (['target_spectra'], {}), '(target_spectra)\n', (362, 378), False, 'import torch\n'), ((379, 405), 'torch.isnan', 'torch.isnan', (['model_spectra'], {}), '(model_spectra)\n', (390, 405), False, 'import torch\n'), ((660, 706), 'torch.where', 'torch.where', (['nan_mask', 'zero_sub', 'model_spectra'], {}), '(nan_mask, zero_sub, model_spectra)\n', (671, 706), False, 'import torch\n'), ((946, 974), 'torch.tensor', 'torch.tensor', (['target_spectra'], {}), '(target_spectra)\n', (958, 974), False, 'import torch\n'), ((1633, 1660), 'torch.isnan', 'torch.isnan', (['target_spectra'], {}), '(target_spectra)\n', (1644, 1660), False, 'import torch\n'), ((1661, 1687), 'torch.isnan', 'torch.isnan', (['model_spectra'], {}), '(model_spectra)\n', (1672, 1687), False, 'import torch\n'), ((1942, 1988), 'torch.where', 'torch.where', (['nan_mask', 'zero_sub', 'model_spectra'], {}), '(nan_mask, zero_sub, model_spectra)\n', (1953, 1988), False, 'import torch\n'), ((2223, 2251), 'torch.tensor', 'torch.tensor', (['target_spectra'], {}), '(target_spectra)\n', (2235, 2251), False, 'import torch\n'), ((2874, 2897), 'torch.sum', 'torch.sum', (['loss'], {'axis': '(1)'}), '(loss, axis=1)\n', (2883, 2897), False, 'import torch\n'), ((3139, 3166), 'torch.isnan', 'torch.isnan', (['target_spectra'], {}), '(target_spectra)\n', (3150, 3166), False, 'import torch\n'), ((3167, 3193), 'torch.isnan', 'torch.isnan', (['model_spectra'], {}), '(model_spectra)\n', (3178, 3193), False, 'import torch\n'), ((3448, 3494), 'torch.where', 'torch.where', (['nan_mask', 'zero_sub', 'model_spectra'], {}), '(nan_mask, zero_sub, model_spectra)\n', (3459, 3494), False, 'import torch\n'), ((3734, 3762), 'torch.tensor', 'torch.tensor', (['target_spectra'], {}), '(target_spectra)\n', (3746, 3762), False, 'import torch\n'), ((3975, 4039), 'torch.div', 'torch.div', (['((model_spectra - target_spectra) ** 2)', 'target_spectra'], {}), '((model_spectra - target_spectra) ** 2, target_spectra)\n', (3984, 4039), False, 'import torch\n'), ((4281, 4308), 'torch.isnan', 'torch.isnan', (['target_spectra'], {}), '(target_spectra)\n', (4292, 4308), False, 'import torch\n'), ((4309, 4335), 'torch.isnan', 'torch.isnan', (['model_spectra'], {}), '(model_spectra)\n', (4320, 4335), False, 'import torch\n'), ((4590, 4636), 'torch.where', 'torch.where', (['nan_mask', 'zero_sub', 'model_spectra'], {}), '(nan_mask, zero_sub, model_spectra)\n', (4601, 4636), False, 'import torch\n'), ((4876, 4904), 'torch.tensor', 'torch.tensor', (['target_spectra'], {}), '(target_spectra)\n', (4888, 4904), False, 'import torch\n'), ((5430, 5457), 'torch.isnan', 'torch.isnan', (['target_spectra'], {}), '(target_spectra)\n', (5441, 5457), False, 'import torch\n'), ((5458, 5484), 'torch.isnan', 'torch.isnan', (['model_spectra'], {}), '(model_spectra)\n', (5469, 5484), False, 'import torch\n'), ((5739, 5785), 'torch.where', 'torch.where', (['nan_mask', 'zero_sub', 'model_spectra'], {}), '(nan_mask, zero_sub, model_spectra)\n', (5750, 5785), False, 'import torch\n'), ((6025, 6053), 'torch.tensor', 'torch.tensor', (['target_spectra'], {}), '(target_spectra)\n', (6037, 6053), False, 'import torch\n'), ((6552, 6579), 'torch.isnan', 'torch.isnan', (['target_spectra'], {}), '(target_spectra)\n', (6563, 6579), False, 'import torch\n'), ((6580, 6606), 'torch.isnan', 'torch.isnan', (['model_spectra'], {}), '(model_spectra)\n', (6591, 6606), False, 'import torch\n'), ((6861, 6907), 'torch.where', 'torch.where', (['nan_mask', 'zero_sub', 'model_spectra'], {}), '(nan_mask, zero_sub, model_spectra)\n', (6872, 6907), False, 'import torch\n'), ((7145, 7173), 'torch.tensor', 'torch.tensor', (['target_spectra'], {}), '(target_spectra)\n', (7157, 7173), False, 'import torch\n'), ((7706, 7732), 'torch.mul', 'torch.mul', (['cum_targets', '(-1)'], {}), '(cum_targets, -1)\n', (7715, 7732), False, 'import torch\n'), ((8102, 8117), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8115, 8117), False, 'import torch\n'), ((8212, 8265), 'torch.tensor', 'torch.tensor', (['batch'], {'dtype': 'float', 'device': 'torch_device'}), '(batch, dtype=float, device=torch_device)\n', (8224, 8265), False, 'import torch\n'), ((8334, 8352), 'torch.isnan', 'torch.isnan', (['batch'], {}), '(batch)\n', (8345, 8352), False, 'import torch\n'), ((8408, 8432), 'torch.sum', 'torch.sum', (['batch'], {'axis': '(1)'}), '(batch, axis=1)\n', (8417, 8432), False, 'import torch\n'), ((8457, 8492), 'torch.unsqueeze', 'torch.unsqueeze', (['batch_sums'], {'axis': '(1)'}), '(batch_sums, axis=1)\n', (8472, 8492), False, 'import torch\n'), ((8517, 8545), 'torch.div', 'torch.div', (['batch', 'batch_sums'], {}), '(batch, batch_sums)\n', (8526, 8545), False, 'import torch\n'), ((11336, 11351), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11349, 11351), False, 'import torch\n'), ((11420, 11447), 'torch.isnan', 'torch.isnan', (['mol_spectra[0]'], {}), '(mol_spectra[0])\n', (11431, 11447), False, 'import torch\n'), ((11799, 11829), 'torch.sum', 'torch.sum', (['mol_spectra'], {'axis': '(1)'}), '(mol_spectra, axis=1)\n', (11808, 11829), False, 'import torch\n'), ((11857, 11890), 'torch.unsqueeze', 'torch.unsqueeze', (['mol_sums'], {'axis': '(1)'}), '(mol_sums, axis=1)\n', (11872, 11890), False, 'import torch\n'), ((11919, 11951), 'torch.div', 'torch.div', (['mol_spectra', 'mol_sums'], {}), '(mol_spectra, mol_sums)\n', (11928, 11951), False, 'import torch\n'), ((12023, 12088), 'torch.zeros', 'torch.zeros', (['[0, spectrum_size]'], {'device': 'torch_device', 'dtype': 'float'}), '([0, spectrum_size], device=torch_device, dtype=float)\n', (12034, 12088), False, 'import torch\n'), ((12122, 12187), 'torch.zeros', 'torch.zeros', (['[0, spectrum_size]'], {'device': 'torch_device', 'dtype': 'float'}), '([0, spectrum_size], device=torch_device, dtype=float)\n', (12133, 12187), False, 'import torch\n'), ((12440, 12505), 'torch.zeros_like', 'torch.zeros_like', (['ensemble_head'], {'device': 'torch_device', 'dtype': 'float'}), '(ensemble_head, device=torch_device, dtype=float)\n', (12456, 12505), False, 'import torch\n'), ((12763, 12790), 'torch.sum', 'torch.sum', (['mol_loss'], {'axis': '(1)'}), '(mol_loss, axis=1)\n', (12772, 12790), False, 'import torch\n'), ((12816, 12849), 'torch.unsqueeze', 'torch.unsqueeze', (['mol_loss'], {'axis': '(0)'}), '(mol_loss, axis=0)\n', (12831, 12849), False, 'import torch\n'), ((12877, 12921), 'torch.cat', 'torch.cat', (['(ensemble_sids, mol_loss)'], {'axis': '(0)'}), '((ensemble_sids, mol_loss), axis=0)\n', (12886, 12921), False, 'import torch\n'), ((13211, 13242), 'torch.from_numpy', 'torch.from_numpy', (['spectral_mask'], {}), '(spectral_mask)\n', (13227, 13242), False, 'import torch\n'), ((13380, 13395), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13393, 13395), False, 'import torch\n'), ((13561, 13622), 'torch.tensor', 'torch.tensor', (['batch_spectra'], {'dtype': 'float', 'device': 'torch_device'}), '(batch_spectra, dtype=float, device=torch_device)\n', (13573, 13622), False, 'import torch\n'), ((13650, 13712), 'torch.tensor', 'torch.tensor', (['batch_features'], {'dtype': 'float', 'device': 'torch_device'}), '(batch_features, dtype=float, device=torch_device)\n', (13662, 13712), False, 'import torch\n'), ((1196, 1236), 'torch.div', 'torch.div', (['model_spectra', 'target_spectra'], {}), '(model_spectra, target_spectra)\n', (1205, 1236), False, 'import torch\n'), ((1284, 1324), 'torch.div', 'torch.div', (['target_spectra', 'model_spectra'], {}), '(target_spectra, model_spectra)\n', (1293, 1324), False, 'import torch\n'), ((2692, 2729), 'torch.div', 'torch.div', (['model_spectra', 'avg_spectra'], {}), '(model_spectra, avg_spectra)\n', (2701, 2729), False, 'import torch\n'), ((2777, 2815), 'torch.div', 'torch.div', (['target_spectra', 'avg_spectra'], {}), '(target_spectra, avg_spectra)\n', (2786, 2815), False, 'import torch\n'), ((11738, 11776), 'torch.matmul', 'torch.matmul', (['mol_spectra', 'conv_matrix'], {}), '(mol_spectra, conv_matrix)\n', (11750, 11776), False, 'import torch\n'), ((12270, 12322), 'torch.cat', 'torch.cat', (['(ensemble_tail, mol_norm[j + 1:])'], {'axis': '(0)'}), '((ensemble_tail, mol_norm[j + 1:]), axis=0)\n', (12279, 12322), False, 'import torch\n'), ((12359, 12412), 'torch.cat', 'torch.cat', (['(ensemble_head, mol_norm[:-j - 1])'], {'axis': '(0)'}), '((ensemble_head, mol_norm[:-j - 1]), axis=0)\n', (12368, 12412), False, 'import torch\n'), ((13860, 13903), 'torch.matmul', 'torch.matmul', (['phase_features', 'spectral_mask'], {}), '(phase_features, spectral_mask)\n', (13872, 13903), False, 'import torch\n'), ((12555, 12594), 'torch.div', 'torch.div', (['ensemble_head', 'ensemble_tail'], {}), '(ensemble_head, ensemble_tail)\n', (12564, 12594), False, 'import torch\n'), ((12650, 12689), 'torch.div', 'torch.div', (['ensemble_tail', 'ensemble_head'], {}), '(ensemble_tail, ensemble_head)\n', (12659, 12689), False, 'import torch\n'), ((9135, 9151), 'numpy.square', 'np.square', (['stdev'], {}), '(stdev)\n', (9144, 9151), True, 'import numpy as np\n'), ((9165, 9184), 'numpy.square', 'np.square', (['vector_i'], {}), '(vector_i)\n', (9174, 9184), True, 'import numpy as np\n'), ((9188, 9204), 'numpy.square', 'np.square', (['stdev'], {}), '(stdev)\n', (9197, 9204), True, 'import numpy as np\n')] |
import numpy as np
import mathutils
import itertools
class Environment:
def __init__(self, dimension, num_peaks, initial_angle):
self.H = np.zeros((dimension, num_peaks))
self.W = np.zeros((dimension, num_peaks))
self.C = np.zeros((dimension, num_peaks))
self.S = 2 # initial angle
self.timeStep = 0
class RMPB:
def __init__(self):
#CONSTANTS
# search space boundaries
self.X_MIN = -25.
self.X_MAX = 25.
#search space dimension
self.DIM = 2
# height
self.H_MIN = 30.
self.H_MAX = 70.
self.H_SEV = 5.
# width
self.W_MIN = 1.
self.W_MAX = 13.
self.W_SEV = 0.5
# angle
self.S_MIN = -np.pi
self.S_MAX = np.pi
self.S_SEV = 1.0
# initial angle for rotation
self.INITIAL_ANGLE = 0.
# chaotic constant
self.A = 3.67
# gamma
self.GAMMA = 0.04
# gamma max
self.GAMMA_MAX = 0.1
# period
self.PERIOD = 12
# noisy severity
self.NOISY_SEV = 0.8
self.RAND_SEED = 12345
## Factors subject of experimentation
#number of peaks for each dimension
self.num_peaks = 5
#number of environment to learn
self.learning_period = 20
# time windows : number of future environments for R estimation
self.time_windows = 2
# number of function evaluations before a change
self.computational_budget = 2500
# number of changes for each simulation (run) : number of environments
self.num_changes = 100
# change type experimented for each simulation (run)
self.change_type = 1
## Internal attributes
# environments (corresponding to the initial and
# those obtained after a change)
self.environments = []
self.curr_env = 0
self.C_change = self.rotate_position
self.P_change = self.ct_small_step
self.ss = []
self.minimize = False
def init(self):
self.rnd = np.random.RandomState(self.RAND_SEED)
if(self.change_type == 1):
self.P_change = self.ct_small_step
elif(self.change_type == 2):
self.P_change = self.ct_large_step
elif(self.change_type == 3):
self.P_change = self.ct_random
elif(self.change_type == 4):
self.P_change = self.ct_chaotic
self.C_change = self.ct_dummy
elif(self.change_type == 5):
self.P_change = self.ct_recurrent
elif(self.change_type == 6):
self.P_change = self.ct_recurrent_with_noise
self.curr_env = 0
#self.rnd = np.random.RandomState(self.RAND_SEED)
# initilizing the environments
self.build_environments()
def build_environments(self):
self.environments = []
self.ss = []
#initial environment without change
env0 = Environment(self.DIM, self.num_peaks, self.INITIAL_ANGLE)
env0.C = self.rnd.uniform(low = self.X_MIN, high = self.X_MAX, size = (self.DIM, self.num_peaks))
env0.H = self.rnd.uniform(low = self.H_MIN, high = self.H_MAX, size = (self.DIM, self.num_peaks))
env0.W = self.rnd.uniform(low = self.W_MIN, high = self.W_MAX, size = (self.DIM, self.num_peaks))
env0.timeStep = 0
self.environments.append(env0)
self.ss.append(env0.S)
#generate the rest of the environments from env0
for i in range(1, self.num_changes + self.time_windows):
env = Environment(dimension=self.DIM, num_peaks=self.num_peaks, initial_angle=self.INITIAL_ANGLE)
env.timeStep = i
self.environments.append(env)
self.P_change(i)
self.C_change(i)
self.ss.append(env.S)
def evaluate(self, x):
return self.eval_env(x, self.curr_env)
def evaluate_vect(self, x):
return np.apply_along_axis(self.evaluate, 1, x)
def eval_env(self, x, env_id):
env = self.environments[env_id]
all_peaks = env.H - env.W * np.abs(env.C - np.tile(x, (self.num_peaks, 1)).transpose())
max_peaks = np.max(all_peaks, axis=1)
return np.mean(max_peaks)
def true_robusteness_eval(self, x):
result = [self.eval_env(x, env_id) for env_id in range(self.curr_env, self.curr_env + self.time_windows - 1)]
return self.robustness_definition(result)
def true_robusteness_eval_vect(self, x):
return np.apply_along_axis(self.true_robusteness_eval, 1, x)
def robustness_definition(self, vect_f):
return np.mean(vect_f)
def change(self):
self.curr_env += 1
def rotate_position(self, env_id):
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
#Rotation matrix
c, s = np.cos(env.S), np.sin(env.S)
rot_mat = np.array(((c, -s), (s, c)))
def apply_rotation(col_vect):
return np.dot(col_vect, rot_mat)
env.C = np.apply_along_axis(apply_rotation, 0, prev_env.C)
env.C = np.clip(env.C, self.X_MIN, self.X_MAX)
def ct_small_step(self, env_id):
def change(prev_data, min_val, max_val, sev, gamma, rnd_val):
result = prev_data + gamma * (max_val - min_val) * sev * (2* rnd_val - 1)
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
env.H = change(prev_env.H, self.H_MIN, self.H_MAX, self.H_SEV, self.GAMMA, self.rnd.uniform(size=prev_env.H.shape))
env.W = change(prev_env.W, self.W_MIN, self.W_MAX, self.W_SEV, self.GAMMA, self.rnd.uniform(size=prev_env.W.shape))
env.S = change(np.array([prev_env.S]), self.S_MIN, self.S_MAX, self.S_SEV, self.GAMMA, self.rnd.uniform(size=(1,)))
env.S = env.S[0]
def ct_large_step(self, env_id):
def change(prev_data, min_val, max_val, sev, gamma, rnd_val):
result = 2 * rnd_val - 1
result = prev_data + (max_val-min_val)*(gamma * mathutils.sign(result) + (self.GAMMA_MAX - gamma)* result) * sev
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
env.H = change(prev_env.H, self.H_MIN, self.H_MAX, self.H_SEV, self.GAMMA, self.rnd.uniform(size=prev_env.H.shape))
env.W = change(prev_env.W, self.W_MIN, self.W_MAX, self.W_SEV, self.GAMMA, self.rnd.uniform(size=prev_env.W.shape))
env.S = change(np.array([prev_env.S]), self.S_MIN, self.S_MAX, self.S_SEV, self.GAMMA, self.rnd.uniform(size=(1,)))
env.S = env.S[0]
def ct_random(self, env_id):
def change(prev_data, min_val, max_val, sev, rnd_val):
result = prev_data * rnd_val * sev
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
env.H = change(prev_env.H, self.H_MIN, self.H_MAX, self.H_SEV, self.rnd.normal(size=prev_env.H.shape))
env.W = change(prev_env.W, self.W_MIN, self.W_MAX, self.W_SEV, self.rnd.normal(size=prev_env.W.shape))
env.S = change(np.array([prev_env.S]), self.S_MIN, self.S_MAX, self.S_SEV, self.rnd.normal(size=(1,)))
env.S = env.S[0]
def ct_dummy(self, env_id):
pass
def ct_chaotic(self, env_id):
def change(prev_data, min_val, max_val):
result = min_val * self.A * (prev_data - min_val) * (1 - (prev_data - min_val)/(max_val-min_val))
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
env.H = change(prev_env.H, self.H_MIN, self.H_MAX)
env.W = change(prev_env.W, self.W_MIN, self.W_MAX)
env.C = change(prev_env.C, self.X_MIN, self.X_MAX)
#env.S = env.S[0]
def ct_recurrent(self, env_id):
def change(prev_data, min_val, max_val, angle):
result = min_val + (max_val-min_val) *(np.sin(2*(np.pi*env_id)/self.PERIOD + angle) + 1)/2.;
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
angles = np.array([x + y for x in range(self.DIM) for y in range(self.num_peaks)])
angles = self.PERIOD * angles/(self.DIM + self.num_peaks)
angles = np.reshape(angles, (self.DIM, self.num_peaks))
env.H = change(prev_env.H, self.H_MIN, self.H_MAX, angles)
env.W = change(prev_env.W, self.W_MIN, self.W_MAX, angles)
env.S = 2*np.pi/self.PERIOD
def ct_recurrent_with_noise(self, env_id):
def change(prev_data, min_val, max_val, angle, rnd_val):
result = min_val + (max_val-min_val) *(np.sin(2*(np.pi*env_id)/self.PERIOD + angle) + 1)/2.;
result = result + self.NOISY_SEV*rnd_val
return result.clip(min_val, max_val)
env = self.environments[env_id]
prev_env = self.environments[env.timeStep-1]
angles = np.array([x + y for x in range(self.DIM) for y in range(self.num_peaks)])
angles = self.PERIOD * angles/(self.DIM + self.num_peaks)
angles = np.reshape(angles, (self.DIM, self.num_peaks))
env.H = change(prev_env.H, self.H_MIN, self.H_MAX, angles, self.rnd.normal(size=prev_env.H.shape))
env.W = change(prev_env.W, self.W_MIN, self.W_MAX, angles, self.rnd.normal(size=prev_env.W.shape))
env.S = 2*np.pi/self.PERIOD
| [
"numpy.clip",
"numpy.mean",
"numpy.tile",
"numpy.reshape",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.apply_along_axis",
"numpy.dot",
"numpy.cos",
"mathutils.sign",
"numpy.sin",
"numpy.random.RandomState"
] | [((151, 183), 'numpy.zeros', 'np.zeros', (['(dimension, num_peaks)'], {}), '((dimension, num_peaks))\n', (159, 183), True, 'import numpy as np\n'), ((201, 233), 'numpy.zeros', 'np.zeros', (['(dimension, num_peaks)'], {}), '((dimension, num_peaks))\n', (209, 233), True, 'import numpy as np\n'), ((251, 283), 'numpy.zeros', 'np.zeros', (['(dimension, num_peaks)'], {}), '((dimension, num_peaks))\n', (259, 283), True, 'import numpy as np\n'), ((2123, 2160), 'numpy.random.RandomState', 'np.random.RandomState', (['self.RAND_SEED'], {}), '(self.RAND_SEED)\n', (2144, 2160), True, 'import numpy as np\n'), ((4005, 4045), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.evaluate', '(1)', 'x'], {}), '(self.evaluate, 1, x)\n', (4024, 4045), True, 'import numpy as np\n'), ((4238, 4263), 'numpy.max', 'np.max', (['all_peaks'], {'axis': '(1)'}), '(all_peaks, axis=1)\n', (4244, 4263), True, 'import numpy as np\n'), ((4279, 4297), 'numpy.mean', 'np.mean', (['max_peaks'], {}), '(max_peaks)\n', (4286, 4297), True, 'import numpy as np\n'), ((4568, 4621), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.true_robusteness_eval', '(1)', 'x'], {}), '(self.true_robusteness_eval, 1, x)\n', (4587, 4621), True, 'import numpy as np\n'), ((4683, 4698), 'numpy.mean', 'np.mean', (['vect_f'], {}), '(vect_f)\n', (4690, 4698), True, 'import numpy as np\n'), ((4970, 4997), 'numpy.array', 'np.array', (['((c, -s), (s, c))'], {}), '(((c, -s), (s, c)))\n', (4978, 4997), True, 'import numpy as np\n'), ((5099, 5149), 'numpy.apply_along_axis', 'np.apply_along_axis', (['apply_rotation', '(0)', 'prev_env.C'], {}), '(apply_rotation, 0, prev_env.C)\n', (5118, 5149), True, 'import numpy as np\n'), ((5166, 5204), 'numpy.clip', 'np.clip', (['env.C', 'self.X_MIN', 'self.X_MAX'], {}), '(env.C, self.X_MIN, self.X_MAX)\n', (5173, 5204), True, 'import numpy as np\n'), ((8509, 8555), 'numpy.reshape', 'np.reshape', (['angles', '(self.DIM, self.num_peaks)'], {}), '(angles, (self.DIM, self.num_peaks))\n', (8519, 8555), True, 'import numpy as np\n'), ((9316, 9362), 'numpy.reshape', 'np.reshape', (['angles', '(self.DIM, self.num_peaks)'], {}), '(angles, (self.DIM, self.num_peaks))\n', (9326, 9362), True, 'import numpy as np\n'), ((4923, 4936), 'numpy.cos', 'np.cos', (['env.S'], {}), '(env.S)\n', (4929, 4936), True, 'import numpy as np\n'), ((4938, 4951), 'numpy.sin', 'np.sin', (['env.S'], {}), '(env.S)\n', (4944, 4951), True, 'import numpy as np\n'), ((5056, 5081), 'numpy.dot', 'np.dot', (['col_vect', 'rot_mat'], {}), '(col_vect, rot_mat)\n', (5062, 5081), True, 'import numpy as np\n'), ((5816, 5838), 'numpy.array', 'np.array', (['[prev_env.S]'], {}), '([prev_env.S])\n', (5824, 5838), True, 'import numpy as np\n'), ((6630, 6652), 'numpy.array', 'np.array', (['[prev_env.S]'], {}), '([prev_env.S])\n', (6638, 6652), True, 'import numpy as np\n'), ((7292, 7314), 'numpy.array', 'np.array', (['[prev_env.S]'], {}), '([prev_env.S])\n', (7300, 7314), True, 'import numpy as np\n'), ((8137, 8187), 'numpy.sin', 'np.sin', (['(2 * (np.pi * env_id) / self.PERIOD + angle)'], {}), '(2 * (np.pi * env_id) / self.PERIOD + angle)\n', (8143, 8187), True, 'import numpy as np\n'), ((8891, 8941), 'numpy.sin', 'np.sin', (['(2 * (np.pi * env_id) / self.PERIOD + angle)'], {}), '(2 * (np.pi * env_id) / self.PERIOD + angle)\n', (8897, 8941), True, 'import numpy as np\n'), ((4173, 4204), 'numpy.tile', 'np.tile', (['x', '(self.num_peaks, 1)'], {}), '(x, (self.num_peaks, 1))\n', (4180, 4204), True, 'import numpy as np\n'), ((6150, 6172), 'mathutils.sign', 'mathutils.sign', (['result'], {}), '(result)\n', (6164, 6172), False, 'import mathutils\n')] |
import winsound
import cv2
import numpy as np
learning_parameter = 0.005
def main():
cam = cv2.VideoCapture(0)
sub_mog2 = cv2.createBackgroundSubtractorMOG2()
while True:
ret_val, img = cam.read()
img = cv2.flip(img, 1)
cv2.imshow('my webcam', img)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gray = cv2.resize(img_gray, (0, 0), fx=0.25, fy=0.25)
mask_sub_mog2 = sub_mog2.apply(img_gray, learningRate=learning_parameter)
mask_binary = np.array(mask_sub_mog2 >= 127, dtype='uint8')
move_percentage = np.sum(mask_binary) / mask_sub_mog2.size
frequency = int(3800 * move_percentage / 2 + 100)
winsound.Beep(frequency, 100)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| [
"cv2.createBackgroundSubtractorMOG2",
"cv2.flip",
"cv2.imshow",
"numpy.array",
"numpy.sum",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"winsound.Beep",
"cv2.resize",
"cv2.waitKey"
] | [((99, 118), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (115, 118), False, 'import cv2\n'), ((134, 170), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (168, 170), False, 'import cv2\n'), ((797, 820), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (818, 820), False, 'import cv2\n'), ((235, 251), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (243, 251), False, 'import cv2\n'), ((260, 288), 'cv2.imshow', 'cv2.imshow', (['"""my webcam"""', 'img'], {}), "('my webcam', img)\n", (270, 288), False, 'import cv2\n'), ((309, 346), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (321, 346), False, 'import cv2\n'), ((366, 412), 'cv2.resize', 'cv2.resize', (['img_gray', '(0, 0)'], {'fx': '(0.25)', 'fy': '(0.25)'}), '(img_gray, (0, 0), fx=0.25, fy=0.25)\n', (376, 412), False, 'import cv2\n'), ((517, 562), 'numpy.array', 'np.array', (['(mask_sub_mog2 >= 127)'], {'dtype': '"""uint8"""'}), "(mask_sub_mog2 >= 127, dtype='uint8')\n", (525, 562), True, 'import numpy as np\n'), ((696, 725), 'winsound.Beep', 'winsound.Beep', (['frequency', '(100)'], {}), '(frequency, 100)\n', (709, 725), False, 'import winsound\n'), ((589, 608), 'numpy.sum', 'np.sum', (['mask_binary'], {}), '(mask_binary)\n', (595, 608), True, 'import numpy as np\n'), ((738, 752), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (749, 752), False, 'import cv2\n')] |
import numpy as np
from poppy.wfe import WavefrontError
from astropy import units as u
class PetaledM4(WavefrontError):
'''
A simplified 6-petals deformable mirror (like E-ELT's M4) with
piston control on each petal
'''
N_PETALS = 6
def __init__(self, piston=None, name="M4", **kwargs):
if piston is None:
piston = np.zeros(self.N_PETALS) * u.nm
self._piston = piston
kwargs.update({'name': name})
super(PetaledM4, self).__init__(**kwargs)
def _mask_for_petal(self, x, y, petal_idx):
return np.logical_and(
np.arctan2(-x, -y) < (petal_idx - 2) * np.pi / 3,
np.arctan2(-x, -y) > (petal_idx - 3) * np.pi / 3)
def get_opd(self, wave):
y, x = self.get_coordinates(wave) # in meters
opd = np.zeros(wave.shape, dtype=np.float64)
for petal_idx in range(self.N_PETALS):
mask = self._mask_for_petal(x, y, petal_idx)
opd[mask] = self._piston[petal_idx].to(u.m).value
return opd
| [
"numpy.zeros",
"numpy.arctan2"
] | [((815, 853), 'numpy.zeros', 'np.zeros', (['wave.shape'], {'dtype': 'np.float64'}), '(wave.shape, dtype=np.float64)\n', (823, 853), True, 'import numpy as np\n'), ((363, 386), 'numpy.zeros', 'np.zeros', (['self.N_PETALS'], {}), '(self.N_PETALS)\n', (371, 386), True, 'import numpy as np\n'), ((604, 622), 'numpy.arctan2', 'np.arctan2', (['(-x)', '(-y)'], {}), '(-x, -y)\n', (614, 622), True, 'import numpy as np\n'), ((666, 684), 'numpy.arctan2', 'np.arctan2', (['(-x)', '(-y)'], {}), '(-x, -y)\n', (676, 684), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
try:
from PyQt5 import QtWidgets
from xpdview.viewer_qt5 import XpdView
app = QtWidgets.QApplication(sys.argv)
print("INFO: Use PyQt5 backend")
except:
from PyQt4 import QtGui
from xpdview.viewer_qt4 import XpdView
app = QtGui.QApplication(sys.argv)
print("INFO: Use PyQt4 backend")
viewer = XpdView()
viewer.show()
# def data list to test
img_data_list = []
key_list = []
int_data_list = []
for i in range(5):
key_list.append(str(i))
img_data_list.append(np.random.rand(50, 50))
int_data_list.append((np.linspace(0, 200, 200),
np.random.rand(200,1)))
| [
"PyQt4.QtGui.QApplication",
"numpy.random.rand",
"xpdview.viewer_qt4.XpdView",
"numpy.linspace",
"PyQt5.QtWidgets.QApplication"
] | [((364, 373), 'xpdview.viewer_qt4.XpdView', 'XpdView', ([], {}), '()\n', (371, 373), False, 'from xpdview.viewer_qt4 import XpdView\n'), ((130, 162), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (152, 162), False, 'from PyQt5 import QtWidgets\n'), ((289, 317), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (307, 317), False, 'from PyQt4 import QtGui\n'), ((537, 559), 'numpy.random.rand', 'np.random.rand', (['(50)', '(50)'], {}), '(50, 50)\n', (551, 559), True, 'import numpy as np\n'), ((587, 611), 'numpy.linspace', 'np.linspace', (['(0)', '(200)', '(200)'], {}), '(0, 200, 200)\n', (598, 611), True, 'import numpy as np\n'), ((639, 661), 'numpy.random.rand', 'np.random.rand', (['(200)', '(1)'], {}), '(200, 1)\n', (653, 661), True, 'import numpy as np\n')] |
import numpy as np
from typing import Iterable, Tuple
import warnings
try:
from swarmlib.util.problem_base import ProblemBase
from swarmlib.pso.particle import Particle as PSOParticle
using_swarmlib = True
except ImportError:
using_swarmlib = False
# I like to give the little guys a run...but this isn't quite there.
# This rips out the guts of swarmlib so that it isn't bundled to visualization
# But it's still only good for 1,2,3 dim, and only really works for 2-dim.
# Here we convert 3-dim to 2-dim using space filling curve, which may not be the best idea.
# Hey, I tried.
if using_swarmlib:
class NoVisualizer():
def __init__(self, **kwargs):
self.__lower_boundary = kwargs.get('lower_boundary', 0.)
self.__upper_boundary = kwargs.get('upper_boundary', 4.)
self.__iteration_number = kwargs.get('iteration_number', 10)
self.__intervals = self.__iteration_number + 2 # Two extra intervals for unanimated start and end pose
self.__interval_ms = kwargs.get('interval', 1000)
self.__continuous = kwargs.get('continuous', False)
self._dark = kwargs.get('dark', False)
self.__function = kwargs['function']
self._marker_size = 0
self._index = 0
self._vel_color = '#CFCFCF'
self._marker_color = '#0078D7' if self._dark else '#FF0000'
self._marker_colors = np.empty(0)
self._positions = []
self._velocities = []
self.__frame_interval = 50 # ms
def add_data(self, **kwargs) -> None:
positions: Iterable[Tuple[float, float]] = kwargs['positions']
self._positions.append(np.transpose(positions))
if len(self._positions) == 1:
# Insert the first position twice to show it "unanimated" first.
self._positions.append(np.transpose(positions))
# Calculate at time t the velocity for step t-1
self._velocities.append(self._positions[-1] - self._positions[-2])
class InvisiblePSOProblem(ProblemBase):
def __init__(self, **kwargs):
"""
Initialize a new particle swarm optimization problem.
"""
super().__init__(**kwargs)
self.__iteration_number = kwargs['iteration_number']
self.__particles = [
PSOParticle(**kwargs, bit_generator=self._random)
for _ in range(kwargs['particles'])
]
# The library stores particles in the visualizer .... groan
positions = [particle.position for particle in self.__particles]
self._visualizer = NoVisualizer(**kwargs)
self._visualizer.add_data(positions=positions)
def solve(self) -> PSOParticle:
# And also update global_best_particle
for _ in range(self.__iteration_number):
# Update global best
global_best_particle = min(self.__particles)
for particle in self.__particles:
particle.step(global_best_particle.position)
# Add data for plot
positions = [particle.position for particle in self.__particles]
self._visualizer.add_data(positions=positions)
return global_best_particle
def swarmlib_cube(objective, n_trials, n_dim, with_count=False, algo=None):
""" Minimize a function on the cube using HyperOpt, and audit # of function calls
:param objective: function on (0,1)^n_dim
:param n_trials: Guideline for function evaluations
:param n_dim:
:param with_count:
:return:
"""
assert algo=='pso'
assert n_dim==2,'yeah, sorry'
global feval_count
feval_count = 0
def cube_objective(us):
# PSO only handles 2-dim
assert all( [ 0<=ui<=1 for ui in us]),' expecting value on square '
global feval_count
feval_count +=1
return objective(us)
iteration_number = 5 if n_trials < 50 else 10
particles = max( int( n_trials / iteration_number), 1)
problem = InvisiblePSOProblem(function=cube_objective, particles=particles, iteration_number=iteration_number,
lower_boundary=0., upper_boundary=1.0)
best_particle = problem.solve()
best_x = best_particle.position.tolist()
best_val = best_particle.value
return (best_val, best_x, feval_count) if with_count else (best_val, best_x)
def swarmlib_pso_cube(objective, n_trials, n_dim, with_count=False):
return swarmlib_cube(objective=objective, n_trials=n_trials, n_dim=n_dim, with_count=with_count, algo='pso')
SWARMLIB_OPTIZERS = [] # Not ready for the A-league yet.
if __name__ == '__main__':
from humpday.objectives.classic import CLASSIC_OBJECTIVES
for objective in CLASSIC_OBJECTIVES:
print(' ')
print(objective.__name__)
for n_dim in range(2,4):
print('n_dim='+str(n_dim))
for optimizer in SWARMLIB_OPTIZERS:
print(optimizer(objective, n_trials=100, n_dim=n_dim, with_count=True))
| [
"swarmlib.pso.particle.Particle",
"numpy.transpose",
"numpy.empty"
] | [((1449, 1460), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1457, 1460), True, 'import numpy as np\n'), ((1732, 1755), 'numpy.transpose', 'np.transpose', (['positions'], {}), '(positions)\n', (1744, 1755), True, 'import numpy as np\n'), ((2420, 2469), 'swarmlib.pso.particle.Particle', 'PSOParticle', ([], {'bit_generator': 'self._random'}), '(**kwargs, bit_generator=self._random)\n', (2431, 2469), True, 'from swarmlib.pso.particle import Particle as PSOParticle\n'), ((1920, 1943), 'numpy.transpose', 'np.transpose', (['positions'], {}), '(positions)\n', (1932, 1943), True, 'import numpy as np\n')] |
########################################
# plot_fig05_barplot.py
#
# Description. Script used to actually plot Fig. 5 of the paper.
#
# Author. @victorcroisfelt
#
# Date. December 31, 2021
#
# This code is part of the code package used to generate the numeric results
# of the paper:
#
# <NAME>., <NAME>., and <NAME>., “User-Centric Perspective in
# Random Access Cell-Free Aided by Spatial Separability”, arXiv e-prints, 2021.
#
# Available on:
#
# https://arxiv.org/abs/2107.10294
#
# Comment. Please, make sure that you have the required data files. They are
# obtained by running the scripts:
#
# - data_fig05_barplot_cellfree.py
# - data_fig05_barplot_cellular.py
#
########################################
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
import warnings
########################################
# Preamble
########################################
# Comment the line below to see possible warnings related to python version
# issues
warnings.filterwarnings("ignore")
axis_font = {'size':'8'}
plt.rcParams.update({'font.size': 8})
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('ytick', labelsize=8)
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
########################################
# Lookup table
########################################
# Load best pair look up table
load = np.load("lookup/lookup_fig05_best_pair_est1.npz", allow_pickle=True)
best_pair_lookup_est1 = load["best_pair"]
best_pair_lookup_est1 = best_pair_lookup_est1.item()
load = np.load("lookup/lookup_fig05_best_pair_est2.npz", allow_pickle=True)
best_pair_lookup_est2 = load["best_pair"]
best_pair_lookup_est2 = best_pair_lookup_est2.item()
load = np.load("lookup/lookup_fig05_best_pair_est3.npz", allow_pickle=True)
best_pair_lookup_est3 = load["best_pair"]
best_pair_lookup_est3 = best_pair_lookup_est3.item()
# Load possible values of delta for Estimator 3
load = np.load("lookup/lookup_fig05_06_delta.npz", allow_pickle=True)
delta_lookup = load["delta"]
delta_lookup = delta_lookup.item()
# Range of collision sizes
collisions = np.arange(1, 11)
best_delta = np.zeros((collisions.size))
# Go throguh all collisions
for cs, collisionSize in enumerate(collisions):
best_delta[cs] = delta_lookup[(collisionSize, 8, (best_pair_lookup_est3[(collisionSize, 8)])[1])]
########################################
# Loading data
########################################
print('--------------------------------------------------')
print('Fig 05: barplot')
print('--------------------------------------------------\n')
# Load data
data_cellular = np.load("data/fig05_barplot_cellular.npz")
data_cellfree_est1 = np.load("data/fig05_barplot_cellfree_est1.npz", allow_pickle=True)
data_cellfree_est2 = np.load("data/fig05_barplot_cellfree_est2.npz", allow_pickle=True)
data_cellfree_est3 = np.load("data/fig05_barplot_cellfree_est3.npz", allow_pickle=True)
# Print Table II
print("*** TABLE II ****")
print("\n")
print("Estimator 1: " + str(best_pair_lookup_est1.values()))
print("\n")
print("Estimator 2: " + str(best_pair_lookup_est2))
print("\n")
print("Estimator 3: " + str(best_pair_lookup_est3))
print(" " + str())
print("\n")
print("*****************\n")
print("wait for the plot...\n")
# Extract NMSEs
nmse_cellular = data_cellular["nmse"]
nmse_cellfree_est1 = data_cellfree_est1["nmse"]
nmse_cellfree_est2 = data_cellfree_est2["nmse"]
nmse_cellfree_est3 = data_cellfree_est3["nmse"]
########################################
# Plot
########################################
fig, ax = plt.subplots(figsize=(3.15, 1.5))
width = 0.2
error_kw = dict(lw=1, capsize=1, capthick=1)
dy_cellular = [-(nmse_cellular[0] - nmse_cellular[1]), nmse_cellular[-1] - nmse_cellular[1]]
dy_cellfree_est1 = [-(nmse_cellfree_est1[0] - nmse_cellfree_est1[1]), nmse_cellfree_est1[-1] - nmse_cellfree_est1[1]]
dy_cellfree_est2 = [-(nmse_cellfree_est2[0] - nmse_cellfree_est2[1]), nmse_cellfree_est2[-1] - nmse_cellfree_est2[1]]
dy_cellfree_est3 = [-(nmse_cellfree_est3[0] - nmse_cellfree_est3[1]), nmse_cellfree_est3[-1] - nmse_cellfree_est3[1]]
ax.bar(collisions - 3/2*width, nmse_cellular[1], yerr=dy_cellular, width=width, linewidth=2.0, color='black', label='Cellular', align='center', alpha=0.5, log=True, error_kw=error_kw)
ax.bar(collisions - 1/2*width, nmse_cellfree_est1[1], yerr=dy_cellfree_est1, width=width, linewidth=2.0, label='Cell-free: Est. 1, $N=8$', align='center', alpha=0.5, log=True, error_kw=error_kw)
ax.bar(collisions + 1/2*width, nmse_cellfree_est2[1], yerr=dy_cellfree_est2, width=width, linewidth=2.0, label='Cell-free: Est. 2, $N=8$', align='center', alpha=0.5, log=True, error_kw=error_kw)
ax.bar(collisions + 3/2*width, nmse_cellfree_est3[1], yerr=dy_cellfree_est3, width=width, linewidth=2.0, label='Cell-free: Est. 3, $N=8$', align='center', alpha=0.5, log=True, error_kw=error_kw)
ax.set_xlabel("collision size $|\mathcal{S}_t|$")
ax.set_ylabel("${\mathrm{NMSE}}$")
ax.set_xticks(collisions)
ax.legend(fontsize='xx-small')
plt.show()
print("------------------- all done :) ------------------")
| [
"matplotlib.pyplot.rcParams.update",
"numpy.zeros",
"matplotlib.rc",
"matplotlib.pyplot.subplots",
"numpy.load",
"warnings.filterwarnings",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1046, 1079), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1069, 1079), False, 'import warnings\n'), ((1107, 1144), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 8}"], {}), "({'font.size': 8})\n", (1126, 1144), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1181), 'matplotlib.rc', 'matplotlib.rc', (['"""xtick"""'], {'labelsize': '(8)'}), "('xtick', labelsize=8)\n", (1159, 1181), False, 'import matplotlib\n'), ((1182, 1217), 'matplotlib.rc', 'matplotlib.rc', (['"""ytick"""'], {'labelsize': '(8)'}), "('ytick', labelsize=8)\n", (1195, 1217), False, 'import matplotlib\n'), ((1219, 1253), 'matplotlib.rc', 'matplotlib.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1232, 1253), False, 'import matplotlib\n'), ((1460, 1528), 'numpy.load', 'np.load', (['"""lookup/lookup_fig05_best_pair_est1.npz"""'], {'allow_pickle': '(True)'}), "('lookup/lookup_fig05_best_pair_est1.npz', allow_pickle=True)\n", (1467, 1528), True, 'import numpy as np\n'), ((1632, 1700), 'numpy.load', 'np.load', (['"""lookup/lookup_fig05_best_pair_est2.npz"""'], {'allow_pickle': '(True)'}), "('lookup/lookup_fig05_best_pair_est2.npz', allow_pickle=True)\n", (1639, 1700), True, 'import numpy as np\n'), ((1804, 1872), 'numpy.load', 'np.load', (['"""lookup/lookup_fig05_best_pair_est3.npz"""'], {'allow_pickle': '(True)'}), "('lookup/lookup_fig05_best_pair_est3.npz', allow_pickle=True)\n", (1811, 1872), True, 'import numpy as np\n'), ((2024, 2086), 'numpy.load', 'np.load', (['"""lookup/lookup_fig05_06_delta.npz"""'], {'allow_pickle': '(True)'}), "('lookup/lookup_fig05_06_delta.npz', allow_pickle=True)\n", (2031, 2086), True, 'import numpy as np\n'), ((2192, 2208), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (2201, 2208), True, 'import numpy as np\n'), ((2223, 2248), 'numpy.zeros', 'np.zeros', (['collisions.size'], {}), '(collisions.size)\n', (2231, 2248), True, 'import numpy as np\n'), ((2701, 2743), 'numpy.load', 'np.load', (['"""data/fig05_barplot_cellular.npz"""'], {}), "('data/fig05_barplot_cellular.npz')\n", (2708, 2743), True, 'import numpy as np\n'), ((2766, 2832), 'numpy.load', 'np.load', (['"""data/fig05_barplot_cellfree_est1.npz"""'], {'allow_pickle': '(True)'}), "('data/fig05_barplot_cellfree_est1.npz', allow_pickle=True)\n", (2773, 2832), True, 'import numpy as np\n'), ((2854, 2920), 'numpy.load', 'np.load', (['"""data/fig05_barplot_cellfree_est2.npz"""'], {'allow_pickle': '(True)'}), "('data/fig05_barplot_cellfree_est2.npz', allow_pickle=True)\n", (2861, 2920), True, 'import numpy as np\n'), ((2942, 3008), 'numpy.load', 'np.load', (['"""data/fig05_barplot_cellfree_est3.npz"""'], {'allow_pickle': '(True)'}), "('data/fig05_barplot_cellfree_est3.npz', allow_pickle=True)\n", (2949, 3008), True, 'import numpy as np\n'), ((3661, 3694), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.15, 1.5)'}), '(figsize=(3.15, 1.5))\n', (3673, 3694), True, 'import matplotlib.pyplot as plt\n'), ((5117, 5127), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5125, 5127), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright (c) 2019 The Boule Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Module for defining and setting the reference ellipsoid.
"""
from warnings import warn
import attr
import numpy as np
# Don't let ellipsoid parameters be changed to avoid messing up calculations
# accidentally.
@attr.s(frozen=True)
class Ellipsoid:
"""
Reference oblate ellipsoid.
The ellipsoid is oblate and spins around it's minor axis. It is defined by
four parameters (semi-major axis, flattening, geocentric gravitational
constant, and angular velocity) and offers other derived quantities.
**All attributes of this class are read-only and cannot be changed after
instantiation.**
All parameters are in SI units.
.. note::
Use :class:`boule.Sphere` if you desire zero flattening because there
are singularities for this particular case in the normal gravity
calculations.
Parameters
----------
name : str
A short name for the ellipsoid, for example ``'WGS84'``.
semimajor_axis : float
The semi-major axis of the ellipsoid (equatorial radius), usually
represented by "a" [meters].
flattening : float
The flattening of the ellipsoid (f) [adimensional].
geocentric_grav_const : float
The geocentric gravitational constant (GM) [m^3 s^-2].
angular_velocity : float
The angular velocity of the rotating ellipsoid (omega) [rad s^-1].
long_name : str or None
A long name for the ellipsoid, for example ``"World Geodetic System
1984"`` (optional).
reference : str or None
Citation for the ellipsoid parameter values (optional).
Examples
--------
We can define an ellipsoid by setting the 4 key numerical parameters:
>>> ellipsoid = Ellipsoid(
... name="oblate-ellipsoid",
... long_name="Oblate Ellipsoid",
... semimajor_axis=1,
... flattening=0.5,
... geocentric_grav_const=1,
... angular_velocity=0,
... )
>>> print(ellipsoid) # doctest: +ELLIPSIS
Ellipsoid(name='oblate-ellipsoid', ...)
>>> print(ellipsoid.long_name)
Oblate Ellipsoid
The class defines several derived attributes based on the input parameters:
>>> print("{:.2f}".format(ellipsoid.semiminor_axis))
0.50
>>> print("{:.2f}".format(ellipsoid.mean_radius))
0.83
>>> print("{:.2f}".format(ellipsoid.linear_eccentricity))
0.87
>>> print("{:.2f}".format(ellipsoid.first_eccentricity))
0.87
>>> print("{:.2f}".format(ellipsoid.second_eccentricity))
1.73
"""
name = attr.ib()
semimajor_axis = attr.ib()
flattening = attr.ib()
geocentric_grav_const = attr.ib()
angular_velocity = attr.ib()
long_name = attr.ib(default=None)
reference = attr.ib(default=None)
@flattening.validator
def _check_flattening(
self, flattening, value
): # pylint: disable=no-self-use,unused-argument
"""
Check if flattening is valid
"""
if value < 0 or value >= 1:
raise ValueError(
f"Invalid flattening '{value}'. "
"Should be greater than zero and lower than 1."
)
if value == 0:
raise ValueError(
"Flattening equal to zero will lead to errors in normal gravity. "
"Use boule.Sphere for representing ellipsoids with zero flattening."
)
if value < 1e-7:
warn(
f"Flattening is too close to zero ('{value}'). "
"This may lead to inaccurate results and division by zero errors. "
"Use boule.Sphere for representing ellipsoids with zero flattening."
)
@semimajor_axis.validator
def _check_semimajor_axis(
self, semimajor_axis, value
): # pylint: disable=no-self-use,unused-argument
"""
Check if semimajor_axis is positive
"""
if not value > 0:
raise ValueError(
f"Invalid semi-major axis '{value}'. Should be greater than zero."
)
@geocentric_grav_const.validator
def _check_geocentric_grav_const(
self, geocentric_grav_const, value
): # pylint: disable=no-self-use,unused-argument
"""
Warn if geocentric_grav_const is negative
"""
if value < 0:
warn(f"The geocentric gravitational constant is negative: '{value}'")
@property
def semiminor_axis(self):
"The small (polar) axis of the ellipsoid [meters]"
return self.semimajor_axis * (1 - self.flattening)
@property
def linear_eccentricity(self):
"The linear eccentricity [meters]"
return np.sqrt(self.semimajor_axis ** 2 - self.semiminor_axis ** 2)
@property
def first_eccentricity(self):
"The first eccentricity [adimensional]"
return self.linear_eccentricity / self.semimajor_axis
@property
def second_eccentricity(self):
"The second eccentricity [adimensional]"
return self.linear_eccentricity / self.semiminor_axis
@property
def mean_radius(self):
"""
The arithmetic mean radius :math:`R_1=(2a+b)/3` [Moritz1988]_ [meters]
"""
return 1 / 3 * (2 * self.semimajor_axis + self.semiminor_axis)
@property
def emm(self):
r"Auxiliary quantity :math:`m = \omega^2 a^2 b / (GM)`"
return (
self.angular_velocity ** 2
* self.semimajor_axis ** 2
* self.semiminor_axis
/ self.geocentric_grav_const
)
@property
def gravity_equator(self):
"""
The norm of the gravity vector on the ellipsoid at the equator [m/s²]
"""
ratio = self.semiminor_axis / self.linear_eccentricity
arctan = np.arctan2(self.linear_eccentricity, self.semiminor_axis)
aux = (
self.second_eccentricity
* (3 * (1 + ratio ** 2) * (1 - ratio * arctan) - 1)
/ (3 * ((1 + 3 * ratio ** 2) * arctan - 3 * ratio))
)
axis_mul = self.semimajor_axis * self.semiminor_axis
result = self.geocentric_grav_const * (1 - self.emm - self.emm * aux) / axis_mul
return result
@property
def gravity_pole(self):
"The norm of the gravity vector on the ellipsoid at the poles [m/s²]"
ratio = self.semiminor_axis / self.linear_eccentricity
arctan = np.arctan2(self.linear_eccentricity, self.semiminor_axis)
aux = (
self.second_eccentricity
* (3 * (1 + ratio ** 2) * (1 - ratio * arctan) - 1)
/ (1.5 * ((1 + 3 * ratio ** 2) * arctan - 3 * ratio))
)
result = (
self.geocentric_grav_const * (1 + self.emm * aux) / self.semimajor_axis ** 2
)
return result
def geocentric_radius(self, latitude, geodetic=True):
r"""
Distance from the center of the ellipsoid to its surface.
The geocentric radius and is a function of the geodetic latitude
:math:`\phi` and the semi-major and semi-minor axis, a and b:
.. math::
R(\phi) = \sqrt{\dfrac{
(a^2\cos\phi)^2 + (b^2\sin\phi)^2}{
(a\cos\phi)^2 + (b\sin\phi)^2 }
}
See https://en.wikipedia.org/wiki/Earth_radius#Geocentric_radius
The same could be achieved with
:meth:`boule.Ellipsoid.geodetic_to_spherical` by passing any value for
the longitudes and heights equal to zero. This method provides a
simpler and possibly faster alternative.
Alternatively, the geocentric radius can also be expressed in terms of
the geocentric (spherical) latitude :math:`\theta`:
.. math::
R(\theta) = \sqrt{\dfrac{1}{
(\frac{\cos\theta}{a})^2 + (\frac{\sin\theta}{b})^2 }
}
This can be useful if you already have the geocentric latitudes and
need the geocentric radius of the ellipsoid (for example, in spherical
harmonic analysis). In these cases, the coordinate conversion route is
not possible since we need the radial coordinates to do that in the
first place.
.. note::
No elevation is taken into account (the height is zero). If you
need the geocentric radius at a height other than zero, use
:meth:`boule.Ellipsoid.geodetic_to_spherical` instead.
Parameters
----------
latitude : float or array
Latitude coordinates on geodetic coordinate system in degrees.
geodetic : bool
If True (default), will assume that latitudes are geodetic
latitudes. Otherwise, will that they are geocentric spherical
latitudes.
Returns
-------
geocentric_radius : float or array
The geocentric radius for the given latitude(s) in the same units
as the ellipsoid axis.
"""
latitude_rad = np.radians(latitude)
coslat, sinlat = np.cos(latitude_rad), np.sin(latitude_rad)
# Avoid doing this in favour of having the user do the conversions when
# possible. It's not the case here, so we made an exception.
if geodetic:
radius = np.sqrt(
(
(self.semimajor_axis ** 2 * coslat) ** 2
+ (self.semiminor_axis ** 2 * sinlat) ** 2
)
/ (
(self.semimajor_axis * coslat) ** 2
+ (self.semiminor_axis * sinlat) ** 2
)
)
else:
radius = np.sqrt(
1
/ (
(coslat / self.semimajor_axis) ** 2
+ (sinlat / self.semiminor_axis) ** 2
)
)
return radius
def prime_vertical_radius(self, sinlat):
r"""
Calculate the prime vertical radius for a given geodetic latitude
The prime vertical radius is defined as:
.. math::
N(\phi) = \frac{a}{\sqrt{1 - e^2 \sin^2(\phi)}}
Where :math:`a` is the semi-major axis and :math:`e` is the first
eccentricity.
This function receives the sine of the latitude as input to avoid
repeated computations of trigonometric functions.
Parameters
----------
sinlat : float or array-like
Sine of the latitude angle.
Returns
-------
prime_vertical_radius : float or array-like
Prime vertical radius given in the same units as the semi-major
axis
"""
return self.semimajor_axis / np.sqrt(
1 - self.first_eccentricity ** 2 * sinlat ** 2
)
def geodetic_to_spherical(self, longitude, latitude, height):
"""
Convert from geodetic to geocentric spherical coordinates.
The geodetic datum is defined by this ellipsoid. The coordinates are
converted following [Vermeille2002]_.
Parameters
----------
longitude : array
Longitude coordinates on geodetic coordinate system in degrees.
latitude : array
Latitude coordinates on geodetic coordinate system in degrees.
height : array
Ellipsoidal heights in meters.
Returns
-------
longitude : array
Longitude coordinates on geocentric spherical coordinate system in
degrees.
The longitude coordinates are not modified during this conversion.
spherical_latitude : array
Converted latitude coordinates on geocentric spherical coordinate
system in degrees.
radius : array
Converted spherical radius coordinates in meters.
"""
latitude_rad = np.radians(latitude)
coslat, sinlat = np.cos(latitude_rad), np.sin(latitude_rad)
prime_vertical_radius = self.prime_vertical_radius(sinlat)
# Instead of computing X and Y, we only compute the projection on the
# XY plane: xy_projection = sqrt( X**2 + Y**2 )
xy_projection = (height + prime_vertical_radius) * coslat
z_cartesian = (
height + (1 - self.first_eccentricity ** 2) * prime_vertical_radius
) * sinlat
radius = np.sqrt(xy_projection ** 2 + z_cartesian ** 2)
spherical_latitude = np.degrees(np.arcsin(z_cartesian / radius))
return longitude, spherical_latitude, radius
def spherical_to_geodetic(self, longitude, spherical_latitude, radius):
"""
Convert from geocentric spherical to geodetic coordinates.
The geodetic datum is defined by this ellipsoid. The coordinates are
converted following [Vermeille2002]_.
Parameters
----------
longitude : array
Longitude coordinates on geocentric spherical coordinate system in
degrees.
spherical_latitude : array
Latitude coordinates on geocentric spherical coordinate system in
degrees.
radius : array
Spherical radius coordinates in meters.
Returns
-------
longitude : array
Longitude coordinates on geodetic coordinate system in degrees.
The longitude coordinates are not modified during this conversion.
latitude : array
Converted latitude coordinates on geodetic coordinate system in
degrees.
height : array
Converted ellipsoidal height coordinates in meters.
"""
spherical_latitude = np.radians(spherical_latitude)
k, big_z, big_d = self._spherical_to_geodetic_terms(spherical_latitude, radius)
latitude = np.degrees(
2 * np.arctan(big_z / (big_d + np.sqrt(big_d ** 2 + big_z ** 2)))
)
height = (
(k + self.first_eccentricity ** 2 - 1)
/ k
* np.sqrt(big_d ** 2 + big_z ** 2)
)
return longitude, latitude, height
def _spherical_to_geodetic_terms(self, spherical_latitude, radius):
"Calculate intermediate terms needed for the conversion."
# Offload computation of these intermediate variables here to clean up
# the main function body
cos_latitude = np.cos(spherical_latitude)
big_z = radius * np.sin(spherical_latitude)
p_0 = radius ** 2 * cos_latitude ** 2 / self.semimajor_axis ** 2
q_0 = (1 - self.first_eccentricity ** 2) / self.semimajor_axis ** 2 * big_z ** 2
r_0 = (p_0 + q_0 - self.first_eccentricity ** 4) / 6
s_0 = self.first_eccentricity ** 4 * p_0 * q_0 / 4 / r_0 ** 3
t_0 = np.cbrt(1 + s_0 + np.sqrt(2 * s_0 + s_0 ** 2))
u_0 = r_0 * (1 + t_0 + 1 / t_0)
v_0 = np.sqrt(u_0 ** 2 + q_0 * self.first_eccentricity ** 4)
w_0 = self.first_eccentricity ** 2 * (u_0 + v_0 - q_0) / 2 / v_0
k = np.sqrt(u_0 + v_0 + w_0 ** 2) - w_0
big_d = k * radius * cos_latitude / (k + self.first_eccentricity ** 2)
return k, big_z, big_d
def normal_gravity(
self, latitude, height, si_units=False
): # pylint: disable=too-many-locals
"""
Calculate normal gravity at any latitude and height.
Computes the magnitude of the gradient of the gravity potential
(gravitational + centrifugal) generated by the ellipsoid at the given
latitude and (geometric) height. Uses of a closed form expression of
[LiGotze2001]_.
Parameters
----------
latitude : float or array
The (geodetic) latitude where the normal gravity will be computed
(in degrees).
height : float or array
The ellipsoidal (geometric) height of computation the point (in
meters).
si_units : bool
Return the value in mGal (False, default) or SI units (True)
Returns
-------
gamma : float or array
The normal gravity in mGal.
"""
sinlat = np.sin(np.deg2rad(latitude))
coslat = np.sqrt(1 - sinlat ** 2)
# The terms below follow the variable names from Li and Goetze (2001)
cosbeta_l2, sinbeta_l2, b_l, q_0, q_l, big_w = self._normal_gravity_terms(
sinlat, coslat, height
)
# Put together gamma using 3 terms
term1 = self.geocentric_grav_const / (b_l ** 2 + self.linear_eccentricity ** 2)
term2 = (0.5 * sinbeta_l2 - 1 / 6) * (
self.semimajor_axis ** 2
* self.linear_eccentricity
* q_l
* self.angular_velocity ** 2
/ ((b_l ** 2 + self.linear_eccentricity ** 2) * q_0)
)
term3 = -cosbeta_l2 * b_l * self.angular_velocity ** 2
gamma = (term1 + term2 + term3) / big_w
if si_units:
return gamma
# Convert gamma from SI to mGal
return gamma * 1e5
def _normal_gravity_terms(self, sinlat, coslat, height):
"Calculate intermediate terms needed for the calculations."
# Offload computation of these intermediate variables here to clean up
# the main function body
beta = np.arctan2(self.semiminor_axis * sinlat, self.semimajor_axis * coslat)
zl2 = (self.semiminor_axis * np.sin(beta) + height * sinlat) ** 2
rl2 = (self.semimajor_axis * np.cos(beta) + height * coslat) ** 2
big_d = (rl2 - zl2) / self.linear_eccentricity ** 2
big_r = (rl2 + zl2) / self.linear_eccentricity ** 2
cosbeta_l2 = 0.5 * (1 + big_r) - np.sqrt(0.25 * (1 + big_r ** 2) - 0.5 * big_d)
sinbeta_l2 = 1 - cosbeta_l2
b_l = np.sqrt(rl2 + zl2 - self.linear_eccentricity ** 2 * cosbeta_l2)
q_0 = 0.5 * (
(1 + 3 * (self.semiminor_axis / self.linear_eccentricity) ** 2)
* np.arctan2(self.linear_eccentricity, self.semiminor_axis)
- 3 * self.semiminor_axis / self.linear_eccentricity
)
q_l = (
3
* (1 + (b_l / self.linear_eccentricity) ** 2)
* (
1
- b_l
/ self.linear_eccentricity
* np.arctan2(self.linear_eccentricity, b_l)
)
- 1
)
big_w = np.sqrt(
(b_l ** 2 + self.linear_eccentricity ** 2 * sinbeta_l2)
/ (b_l ** 2 + self.linear_eccentricity ** 2)
)
return cosbeta_l2, sinbeta_l2, b_l, q_0, q_l, big_w
| [
"numpy.radians",
"attr.s",
"numpy.sqrt",
"numpy.sin",
"numpy.arcsin",
"numpy.deg2rad",
"numpy.arctan2",
"numpy.cos",
"warnings.warn",
"attr.ib"
] | [((443, 462), 'attr.s', 'attr.s', ([], {'frozen': '(True)'}), '(frozen=True)\n', (449, 462), False, 'import attr\n'), ((2770, 2779), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2777, 2779), False, 'import attr\n'), ((2801, 2810), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2808, 2810), False, 'import attr\n'), ((2828, 2837), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2835, 2837), False, 'import attr\n'), ((2866, 2875), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2873, 2875), False, 'import attr\n'), ((2899, 2908), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2906, 2908), False, 'import attr\n'), ((2925, 2946), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (2932, 2946), False, 'import attr\n'), ((2963, 2984), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (2970, 2984), False, 'import attr\n'), ((4901, 4961), 'numpy.sqrt', 'np.sqrt', (['(self.semimajor_axis ** 2 - self.semiminor_axis ** 2)'], {}), '(self.semimajor_axis ** 2 - self.semiminor_axis ** 2)\n', (4908, 4961), True, 'import numpy as np\n'), ((6004, 6061), 'numpy.arctan2', 'np.arctan2', (['self.linear_eccentricity', 'self.semiminor_axis'], {}), '(self.linear_eccentricity, self.semiminor_axis)\n', (6014, 6061), True, 'import numpy as np\n'), ((6626, 6683), 'numpy.arctan2', 'np.arctan2', (['self.linear_eccentricity', 'self.semiminor_axis'], {}), '(self.linear_eccentricity, self.semiminor_axis)\n', (6636, 6683), True, 'import numpy as np\n'), ((9202, 9222), 'numpy.radians', 'np.radians', (['latitude'], {}), '(latitude)\n', (9212, 9222), True, 'import numpy as np\n'), ((12062, 12082), 'numpy.radians', 'np.radians', (['latitude'], {}), '(latitude)\n', (12072, 12082), True, 'import numpy as np\n'), ((12558, 12604), 'numpy.sqrt', 'np.sqrt', (['(xy_projection ** 2 + z_cartesian ** 2)'], {}), '(xy_projection ** 2 + z_cartesian ** 2)\n', (12565, 12604), True, 'import numpy as np\n'), ((13850, 13880), 'numpy.radians', 'np.radians', (['spherical_latitude'], {}), '(spherical_latitude)\n', (13860, 13880), True, 'import numpy as np\n'), ((14548, 14574), 'numpy.cos', 'np.cos', (['spherical_latitude'], {}), '(spherical_latitude)\n', (14554, 14574), True, 'import numpy as np\n'), ((15035, 15089), 'numpy.sqrt', 'np.sqrt', (['(u_0 ** 2 + q_0 * self.first_eccentricity ** 4)'], {}), '(u_0 ** 2 + q_0 * self.first_eccentricity ** 4)\n', (15042, 15089), True, 'import numpy as np\n'), ((16343, 16367), 'numpy.sqrt', 'np.sqrt', (['(1 - sinlat ** 2)'], {}), '(1 - sinlat ** 2)\n', (16350, 16367), True, 'import numpy as np\n'), ((17443, 17513), 'numpy.arctan2', 'np.arctan2', (['(self.semiminor_axis * sinlat)', '(self.semimajor_axis * coslat)'], {}), '(self.semiminor_axis * sinlat, self.semimajor_axis * coslat)\n', (17453, 17513), True, 'import numpy as np\n'), ((17920, 17983), 'numpy.sqrt', 'np.sqrt', (['(rl2 + zl2 - self.linear_eccentricity ** 2 * cosbeta_l2)'], {}), '(rl2 + zl2 - self.linear_eccentricity ** 2 * cosbeta_l2)\n', (17927, 17983), True, 'import numpy as np\n'), ((18532, 18645), 'numpy.sqrt', 'np.sqrt', (['((b_l ** 2 + self.linear_eccentricity ** 2 * sinbeta_l2) / (b_l ** 2 + self\n .linear_eccentricity ** 2))'], {}), '((b_l ** 2 + self.linear_eccentricity ** 2 * sinbeta_l2) / (b_l ** 2 +\n self.linear_eccentricity ** 2))\n', (18539, 18645), True, 'import numpy as np\n'), ((3652, 3847), 'warnings.warn', 'warn', (['f"""Flattening is too close to zero (\'{value}\'). This may lead to inaccurate results and division by zero errors. Use boule.Sphere for representing ellipsoids with zero flattening."""'], {}), '(\n f"Flattening is too close to zero (\'{value}\'). This may lead to inaccurate results and division by zero errors. Use boule.Sphere for representing ellipsoids with zero flattening."\n )\n', (3656, 3847), False, 'from warnings import warn\n'), ((4560, 4629), 'warnings.warn', 'warn', (['f"""The geocentric gravitational constant is negative: \'{value}\'"""'], {}), '(f"The geocentric gravitational constant is negative: \'{value}\'")\n', (4564, 4629), False, 'from warnings import warn\n'), ((9248, 9268), 'numpy.cos', 'np.cos', (['latitude_rad'], {}), '(latitude_rad)\n', (9254, 9268), True, 'import numpy as np\n'), ((9270, 9290), 'numpy.sin', 'np.sin', (['latitude_rad'], {}), '(latitude_rad)\n', (9276, 9290), True, 'import numpy as np\n'), ((9482, 9663), 'numpy.sqrt', 'np.sqrt', (['(((self.semimajor_axis ** 2 * coslat) ** 2 + (self.semiminor_axis ** 2 *\n sinlat) ** 2) / ((self.semimajor_axis * coslat) ** 2 + (self.\n semiminor_axis * sinlat) ** 2))'], {}), '(((self.semimajor_axis ** 2 * coslat) ** 2 + (self.semiminor_axis **\n 2 * sinlat) ** 2) / ((self.semimajor_axis * coslat) ** 2 + (self.\n semiminor_axis * sinlat) ** 2))\n', (9489, 9663), True, 'import numpy as np\n'), ((9852, 9945), 'numpy.sqrt', 'np.sqrt', (['(1 / ((coslat / self.semimajor_axis) ** 2 + (sinlat / self.semiminor_axis) **\n 2))'], {}), '(1 / ((coslat / self.semimajor_axis) ** 2 + (sinlat / self.\n semiminor_axis) ** 2))\n', (9859, 9945), True, 'import numpy as np\n'), ((10904, 10959), 'numpy.sqrt', 'np.sqrt', (['(1 - self.first_eccentricity ** 2 * sinlat ** 2)'], {}), '(1 - self.first_eccentricity ** 2 * sinlat ** 2)\n', (10911, 10959), True, 'import numpy as np\n'), ((12108, 12128), 'numpy.cos', 'np.cos', (['latitude_rad'], {}), '(latitude_rad)\n', (12114, 12128), True, 'import numpy as np\n'), ((12130, 12150), 'numpy.sin', 'np.sin', (['latitude_rad'], {}), '(latitude_rad)\n', (12136, 12150), True, 'import numpy as np\n'), ((12645, 12676), 'numpy.arcsin', 'np.arcsin', (['(z_cartesian / radius)'], {}), '(z_cartesian / radius)\n', (12654, 12676), True, 'import numpy as np\n'), ((14188, 14220), 'numpy.sqrt', 'np.sqrt', (['(big_d ** 2 + big_z ** 2)'], {}), '(big_d ** 2 + big_z ** 2)\n', (14195, 14220), True, 'import numpy as np\n'), ((14600, 14626), 'numpy.sin', 'np.sin', (['spherical_latitude'], {}), '(spherical_latitude)\n', (14606, 14626), True, 'import numpy as np\n'), ((15175, 15204), 'numpy.sqrt', 'np.sqrt', (['(u_0 + v_0 + w_0 ** 2)'], {}), '(u_0 + v_0 + w_0 ** 2)\n', (15182, 15204), True, 'import numpy as np\n'), ((16304, 16324), 'numpy.deg2rad', 'np.deg2rad', (['latitude'], {}), '(latitude)\n', (16314, 16324), True, 'import numpy as np\n'), ((17823, 17869), 'numpy.sqrt', 'np.sqrt', (['(0.25 * (1 + big_r ** 2) - 0.5 * big_d)'], {}), '(0.25 * (1 + big_r ** 2) - 0.5 * big_d)\n', (17830, 17869), True, 'import numpy as np\n'), ((14952, 14979), 'numpy.sqrt', 'np.sqrt', (['(2 * s_0 + s_0 ** 2)'], {}), '(2 * s_0 + s_0 ** 2)\n', (14959, 14979), True, 'import numpy as np\n'), ((17551, 17563), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (17557, 17563), True, 'import numpy as np\n'), ((17625, 17637), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (17631, 17637), True, 'import numpy as np\n'), ((18096, 18153), 'numpy.arctan2', 'np.arctan2', (['self.linear_eccentricity', 'self.semiminor_axis'], {}), '(self.linear_eccentricity, self.semiminor_axis)\n', (18106, 18153), True, 'import numpy as np\n'), ((18434, 18475), 'numpy.arctan2', 'np.arctan2', (['self.linear_eccentricity', 'b_l'], {}), '(self.linear_eccentricity, b_l)\n', (18444, 18475), True, 'import numpy as np\n'), ((14043, 14075), 'numpy.sqrt', 'np.sqrt', (['(big_d ** 2 + big_z ** 2)'], {}), '(big_d ** 2 + big_z ** 2)\n', (14050, 14075), True, 'import numpy as np\n')] |
import pdb
import warnings
import numpy as np
from scipy import stats
import astropy.units as units
import astropy.constants as const
from astropy.cosmology import WMAP7
class obs_lens_system:
"""
This class constructs an object representing an observer-lens-source system, which contains the
lens redshift, and data vectors for the background sources of a given lens, including the lensing
geometry, and methods to perform computations on that data.
Parameters
----------
zl : float
The redshift of the lens.
cosmo : object, optional
An astropy cosmology object (defaults to `WMAP7`).
Attributes
----------
zl : float
The redshift of the lens.
has_sources : boolean
Whether or not the background population has been set for this instance
(`False` until `set_background()` is called).
bg_theta1 : float array
The source lens-centric azimuthal angular coordinates, in arcseconds
(uninitialized until `set_background()` is called).
bg_theta2 : float array
The source lens-centric coaltitude angular coordinates, in arcseconds
(uninitialized until `set_background()` is called).
zs : float array
Redshifts of background sources
(uninitialized until `set_background()` is called).
r : float array
Projected separation of each source at the redshift `zl`, in comoving :math:`\\text{Mpc}`
(uninitialized until `set_background()` is called).
y1 : float array
The real component of the source shears.
y2 : float array
The imaginary component of the source shears.
yt : float array
The source tangential shears.
k : float array
The source convergences.
Methods
-------
set_background(theta1, theta2, zs, y1, y2)
Defines and assigns background souce data vectors to attributes of the lens object.
get_background()
Returns the source population data vectors to the caller, as a list.
calc_sigma_crit()
Computes the critical surface density at the redshift `zl`.
"""
def __init__(self, zl, cosmo=WMAP7):
self.zl = zl
self._cosmo = cosmo
self._has_sources = False
self._has_shear12 = False
self._has_shear1 = False
self._has_shear2 = False
self._has_kappa = False
self._has_rho = False
self._has_radial_cuts = False
self._rmin = None
self._rmax = None
self._theta1 = None
self._theta2 = None
self._zs = None
self._r = None
self._phi = None
self._y1 = None
self._y2 = None
self._yt = None
self._k =None
def _check_sources(self):
"""
Checks that set_background has been called (intended to be called before any
operations on the attributes initialized by `set_background()`).
"""
assert(self._has_sources), 'sources undefined; first run set_background()'
def set_radial_cuts(self, rmin=None, rmax=None):
'''
Sets a class-wide radial mask which will be applied to data vectors returned from
`get_background()`, `calc_delta_sigma()`, `calc_delta_sigma_binned()`, and `calc_sigma_crit()`.
Parameters
----------
rmin : float, optional
Sources with halo-centric radial distances less than this value will be removed by
application of the mask constructed from this function. Defautls to None, in which case
rmin is set to `0` (i.e. nothing is masked on the upper end of the radial distribution).
rmax : float, optional
Sources with halo-centric radial distances greater than this value will be removed by
application of the mask constructed from this function. Defautls to None, in which case
rmax is set to coincide with the maximum source radial distance (i.e. nothing is masked
on the upper end of the radial distribution).
'''
self._check_sources()
if(rmin is None): rmin = 0
if(rmax is None): rmax = np.max(self._r)
self._radial_mask = np.logical_and(self._r >= rmin, self._r <= rmax)
def set_background(self, theta1, theta2, zs, y1=None, y2=None, yt=None, k=None, rho=None):
'''
Defines and assigns background souce data vectors to attributes of the lens object,
including the angular positions, redshifts, projected comoving distances from
the lens center in comoving :math:`\\text{Mpc}`, and shear components. The user
should either pass the shear components `y1` and `y2`, or the tangential shear `yt`;
if both or neither are passed, an exception will be raised.
Parameters
----------
theta1 : float array
The source lens-centric azimuthal angular coordinates, in arcseconds.
theta2 : float_array
The source lens-centric coaltitude angular coordinates, in arcseconds.
zs : float array
The source redshifts.
y1 : float array, optional
The shear component :math:`\\gamma_1`. Must be passed along with `y2`, unless passing `yt`.
y2 : float array, optional
The shear component :math:`\\gamma_2`. Must be passed along with `y1`, unless passing `yt`.
yt : float array, optional
The tangential shear :math:`\\gamma_T`. Must be passed if not passing `y1` and `y2`.
k : float array, optional
The convergence :math:`\\kappa`. Not needed for any computations of this class, but is
offered as a convenience. Defaults to `None`.
rho : float array, optional
The matter density at the projected source positions on the lens plane. Not needed for
any computations of this class, but is offered as a convenience; intended use is in the
case that the user wishes to fit `\\delta\\Sigma` directly to the projected mass density
on the grid (output prior to ray-tracing). Defaults to `None`.
'''
# make sure shear was passed correctly -- either tangenetial, or components, not both
# the _has_shear12 attribute will be used to communicate to other methods which usage
# has been invoked
if((y1 is None and y2 is None and yt is None) or
((y1 is not None or y2 is not None) and yt is not None)):
raise Exception('Either y1 and y2 must be passed, or yt must be passed, not both.')
# initialize source data vectors
self._theta1 = np.array((np.pi/180) * (theta1/3600))
self._theta2 = np.array((np.pi/180) * (theta2/3600))
self._zs = np.array(zs)
self._y1 = np.array(y1)
self._y2 = np.array(y2)
self._yt = np.array(yt)
self._k = np.array(k)
self._rho = np.array(rho)
# set flags and compute additonal quantities
if(yt is None): self._has_shear12 = True
if(k is not None): self._has_kappa = True
if(rho is not None): self._has_rho = True
self._has_sources = True
self._comp_bg_quantities()
self.set_radial_cuts(None, None)
def _comp_bg_quantities(self):
"""
Computes background source quantites that depend on the data vectors initialized in
set_background (this function meant to be called from the setter method of each
source property).
"""
self._check_sources()
# compute halo-centric projected radial separation of each source, in proper Mpc
#self._r = np.linalg.norm([np.tan(self._theta1), np.tan(self._theta2)], axis=0) * \
# self._cosmo.comoving_distance(self.zl).value
#arcsec_per_Mpc = (self._cosmo.arcsec_per_kpc_proper(self.zl)).to( units.arcsec / units.Mpc )
#angular_sep_arcsec = np.linalg.norm([180/np.pi * self._theta1 * 3600,
# 180/np.pi * self._theta2 * 3600], axis=0) * units.arcsec
#self._r = (angular_sep_arcsec / arcsec_per_Mpc).value
# Projected distance in proper Mpc; Wright & Brainerd, under Eq.10
self._r = np.linalg.norm([self._theta1, self._theta2], axis=0) * \
self._cosmo.angular_diameter_distance(self.zl).value
if(self._has_shear12):
# compute tangential shear yt
self._phi = np.arctan(self._theta2/self._theta1)
#self._yt = -(self._y1 * np.cos(2*self._phi) +
# self._y2*np.sin(2*self._phi))
self._yt = np.sqrt(self._y1**2 + self._y2**2)
def get_background(self):
'''
Returns the source population data vectors to the caller as a numpy
rec array, sorted in ascending order with respect to the halo-centric
radial distance
Returns
-------
bg : 2d numpy array
A list of the source population data vectors (2d numpy array), with
labeled columns.
If shear components are being used (see docstring for `set_background()`,
then the contents of the return array is
[theta1, theta2, r, zs, y1, y2, yt], where theta1 and theta2 are the
halo-centric angular positions of the sources in arcseconds, r is the
halo-centric projected radial distance of each source in proper
:math:`\\text{Mpc}`, zs are the source redshifts, y1 and y2 are the
shear components of the sources, and yt are the source tangential shears.
If only the tangential shear is being used, then y1 and y2 are omitted
'''
self._check_sources()
bg_arrays = [(180/np.pi * self._theta1 * 3600),
(180/np.pi * self._theta2 * 3600),
self._r, self._zs, self._yt]
bg_dtypes = [('theta1',float), ('theta2',float), ('r',float),
('zs',float), ('yt',float)]
if(self._has_shear12):
bg_arrays.append(self._y1)
bg_arrays.append(self._y2)
bg_dtypes.append(('y1', float))
bg_dtypes.append(('y2', float))
if(self._has_kappa):
bg_arrays.append(self._k)
bg_dtypes.append(('k', float))
if(self._has_rho):
bg_arrays.append(self._rho)
bg_dtypes.append(('rho', float))
bg_arrays = [arr[self._radial_mask] for arr in bg_arrays]
bg = np.rec.fromarrays(bg_arrays, dtype = bg_dtypes)
return bg
@property
def cosmo(self): return self._cosmo
@cosmo.setter
def cosmo(self, value):
self._cosmo = value
self._comp_bg_quantities()
@property
def r(self): return self._r
def r(self, value):
raise Exception('Cannot change source \'r\' value; update angular positions instead')
@property
def theta1(self): return self._theta1
@theta1.setter
def theta1(self, value):
self._theta1 = value
self._comp_bg_quantities()
@property
def theta2(self): return self._theta2
@theta2.setter
def theta2(self, value):
self._theta2 = value
self._comp_bg_quantities()
@property
def zs(self): return self._zs
@theta1.setter
def zs(self, value):
self._zs = value
self._comp_bg_quantities()
@property
def k(self): return self._k
@k.setter
def k(self, value):
self._k = value
if(value is None): self._has_kappa = False
else: self._has_kappa = True
@property
def y1(self): return self._y1
@y1.setter
def y1(self, value):
if(not self._has_shear12):
raise Exception('object initialized with yt rather than y1,y2; cannot call y1 setter')
else:
self._y1 = value
self._comp_bg_quantities()
@property
def y2(self): return self._y2
@y2.setter
def y2(self, value):
if(not self._has_shear12):
raise Exception('object initialized with yt rather than y1,y2; cannot call y2 setter')
else:
self._y2 = value
self._comp_bg_quantities()
@property
def yt(self): return self._yt
@yt.setter
def yt(self, value):
self._yt = value
if(self._has_shear12 or self._has_shear1 or self._has_shear2):
warnings.warn('Warning: setting class attribute yt, but object was initialized'
'with y1,y2 (or y1/y2 setters were called); shear components y1'
'and y2 being set to None')
self._has_shear12 = False
self._y1= None
self._y2 = None
self._comp_bg_quantities()
@property
def get_radial_cuts(self): return [self._rmin, self._rmax]
def _k_rho(self):
'''
Rescales the convergence on the lens plane into a matter density. This is mostly offered
for debugging purposes, and is only really meaningful in the case that the input is the
raytracing of a single lens plane (otherwise the recovered matter density is cumulative,
in some sense, across the line of sight).
Returns
-------
rho : float or float array
The projected mass density at the source positions on the lens plane
'''
rho = self._k * self.calc_sigma_crit()
return rho
def calc_sigma_crit(self, zs=None):
'''
Computes :math:`\\Sigma_\\text{c}(z_s)`, the critical surface density as a function of source
redshift :math:`z_s`, at the lens redshift :math:`z_l`, in proper :math:`M_{\\odot}/\\text{pc}^2`,
assuming a flat cosmology
Parameters
----------
zs : float or float array, optional
A source redshift (or array of redshifts). If None (default), then use background
source redshifts given at object instatiation, `self.zs`
Returns
-------
Sigma_crit : float or float array
The critical surface density, :math:`\\Sigma_\\text{c}`, in proper
:math:`M_{\\odot}/\\text{pc}^2`
'''
if(zs is None):
self._check_sources()
zs = self._zs[self._radial_mask]
# G in Mpc^3 M_sun^-1 Gyr^-2,
# speed of light C in Mpc Gyr^-1
# distance to lens Dl and source Ds in proper Mpc
# --> warning: this assumes a flat cosmology; or that angular diamter distance = proper distance
G = const.G.to(units.Mpc**3 / (units.M_sun * units.Gyr**2)).value
C = const.c.to(units.Mpc / units.Gyr).value
Ds = self._cosmo.angular_diameter_distance(zs).value
Dl = self._cosmo.angular_diameter_distance(self.zl).value
Dls = Ds - Dl
# critical surface mass density Σ_c in proper M_sun/pc^2;
# final quotient scales to Mpc to pc
sigma_crit = (C**2/(4*np.pi*G) * (Ds)/(Dl*Dls))
sigma_crit = sigma_crit / (1e12)
return sigma_crit
def calc_delta_sigma(self):
'''
Computes :math:`\\Delta\\Sigma = \\gamma\\Sigma_c`, the differential surface density at the lens
redshift :math:`z_l`, in proper :math:`M_{\\odot}/\\text{pc}^2`, assuming a flat cosmology.
Returns
-------
delta_sigma : float or float array
The differential surface density, :math:`\\Delta\\Sigma = \\gamma\\Sigma_c`, in proper
:math:`M_{\\odot}/\\text{pc}^2
'''
self._check_sources()
yt = self._yt[self._radial_mask]
sigma_crit = self.calc_sigma_crit()
delta_sigma = yt*sigma_crit
return delta_sigma
def calc_delta_sigma_binned(self, nbins, return_edges=False, return_std=False, return_gradients=False):
'''
Computes :math:`\\Delta\\Sigma = \\gamma\\Sigma_c`, the differential surface density at the lens
redshift :math:`z_l`, in proper :math:`M_{\\odot}/\\text{pc}^2`, assuming a flat cosmology.
Parameters
----------
nbins : int
Number of bins to place the data into. The bin edges will be distributed uniformly in radial space
(i.e. the bin widths will be constant, rather than bin areas)
return_edges : bool, optional
whether or not to return the resulting bin edges. Defautls to False
return_std : bool, optional
Whether or not to return the standard deviation and standard error of the mean of each bin.
Defaults to False.
return_gradients : bool, optional
Whether or not to return the approximate gradient of each bin. The gradient is computed by
fitting a linear form to each bin's data, and returning the slope parameter. Defaults to False.
Returns
-------
delta_sigma : float or float array
The differential surface density, :math:`\\Delta\\Sigma = \\gamma\\Sigma_c`, in proper
:math:`M_{\\odot}/\\text{pc}^2
'''
self._check_sources()
# load data and sort by increasing radial distance
r = self._r[self._radial_mask]
sorter = np.argsort(r)
r = r[sorter]
yt = self._yt[self._radial_mask][sorter]
sigma_crit = self.calc_sigma_crit()[sorter]
delta_sigma = yt*sigma_crit
# get bin means
[r_mean, bin_edges, _] = stats.binned_statistic(r, r, statistic='mean', bins=nbins)
[delta_sigma_mean,_,_] = stats.binned_statistic(r, delta_sigma, statistic='mean', bins=nbins)
return_arrays = [r_mean, delta_sigma_mean]
return_cols = ['r_mean', 'delta_sigma_mean']
# and standard deviations, errors of the mean
if(return_std):
[delta_sigma_std,_,_] = stats.binned_statistic(r, delta_sigma, statistic='std', bins=nbins)
[delta_sigma_count,_,_] = stats.binned_statistic(r, delta_sigma, statistic='count', bins=nbins)
delta_sigma_se = delta_sigma_std / delta_sigma_count
[r_std,_,_] = stats.binned_statistic(r, r, statistic='std', bins=nbins)
[r_count,_,_] = stats.binned_statistic(r, r, statistic='count', bins=nbins)
r_se = r_std / r_count
return_arrays.extend([r_std, r_se, delta_sigma_std, delta_sigma_se])
return_cols.extend(['r_std', 'r_se_mean', 'delta_sigma_std', 'delta_sigma_se_mean'])
# return bin edges
if(return_edges):
return_arrays.append(bin_edges)
return_cols.append('bin_edges')
# return bin gradient and errors... compute these manually
if(return_gradients):
bin_gradients = np.zeros(nbins)
for i in range(nbins):
bin_mask = np.logical_and(r > bin_edges[i], r < bin_edges[i+1])
if(np.sum(bin_mask) == 0): bin_gradients[i] = float('NaN')
else:
ds, dr = delta_sigma[bin_mask], r[bin_mask]
bin_gradients[i],_ = np.polyfit(dr, ds, 1)
return_arrays.append(bin_gradients)
return_cols.append('bin_grad')
# gather for return
bin_dict = {}
for i in range(len(return_arrays)): bin_dict[return_cols[i]] = return_arrays[i]
return bin_dict
| [
"scipy.stats.binned_statistic",
"numpy.sqrt",
"numpy.logical_and",
"astropy.constants.G.to",
"numpy.polyfit",
"numpy.max",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"astropy.constants.c.to",
"numpy.linalg.norm",
"warnings.warn",
"numpy.rec.fromarrays",
"numpy.arctan"
] | [((4193, 4241), 'numpy.logical_and', 'np.logical_and', (['(self._r >= rmin)', '(self._r <= rmax)'], {}), '(self._r >= rmin, self._r <= rmax)\n', (4207, 4241), True, 'import numpy as np\n'), ((6652, 6691), 'numpy.array', 'np.array', (['(np.pi / 180 * (theta1 / 3600))'], {}), '(np.pi / 180 * (theta1 / 3600))\n', (6660, 6691), True, 'import numpy as np\n'), ((6713, 6752), 'numpy.array', 'np.array', (['(np.pi / 180 * (theta2 / 3600))'], {}), '(np.pi / 180 * (theta2 / 3600))\n', (6721, 6752), True, 'import numpy as np\n'), ((6770, 6782), 'numpy.array', 'np.array', (['zs'], {}), '(zs)\n', (6778, 6782), True, 'import numpy as np\n'), ((6802, 6814), 'numpy.array', 'np.array', (['y1'], {}), '(y1)\n', (6810, 6814), True, 'import numpy as np\n'), ((6834, 6846), 'numpy.array', 'np.array', (['y2'], {}), '(y2)\n', (6842, 6846), True, 'import numpy as np\n'), ((6866, 6878), 'numpy.array', 'np.array', (['yt'], {}), '(yt)\n', (6874, 6878), True, 'import numpy as np\n'), ((6897, 6908), 'numpy.array', 'np.array', (['k'], {}), '(k)\n', (6905, 6908), True, 'import numpy as np\n'), ((6929, 6942), 'numpy.array', 'np.array', (['rho'], {}), '(rho)\n', (6937, 6942), True, 'import numpy as np\n'), ((10630, 10675), 'numpy.rec.fromarrays', 'np.rec.fromarrays', (['bg_arrays'], {'dtype': 'bg_dtypes'}), '(bg_arrays, dtype=bg_dtypes)\n', (10647, 10675), True, 'import numpy as np\n'), ((17423, 17436), 'numpy.argsort', 'np.argsort', (['r'], {}), '(r)\n', (17433, 17436), True, 'import numpy as np\n'), ((17671, 17729), 'scipy.stats.binned_statistic', 'stats.binned_statistic', (['r', 'r'], {'statistic': '"""mean"""', 'bins': 'nbins'}), "(r, r, statistic='mean', bins=nbins)\n", (17693, 17729), False, 'from scipy import stats\n'), ((17763, 17831), 'scipy.stats.binned_statistic', 'stats.binned_statistic', (['r', 'delta_sigma'], {'statistic': '"""mean"""', 'bins': 'nbins'}), "(r, delta_sigma, statistic='mean', bins=nbins)\n", (17785, 17831), False, 'from scipy import stats\n'), ((4149, 4164), 'numpy.max', 'np.max', (['self._r'], {}), '(self._r)\n', (4155, 4164), True, 'import numpy as np\n'), ((8291, 8343), 'numpy.linalg.norm', 'np.linalg.norm', (['[self._theta1, self._theta2]'], {'axis': '(0)'}), '([self._theta1, self._theta2], axis=0)\n', (8305, 8343), True, 'import numpy as np\n'), ((8542, 8580), 'numpy.arctan', 'np.arctan', (['(self._theta2 / self._theta1)'], {}), '(self._theta2 / self._theta1)\n', (8551, 8580), True, 'import numpy as np\n'), ((8717, 8755), 'numpy.sqrt', 'np.sqrt', (['(self._y1 ** 2 + self._y2 ** 2)'], {}), '(self._y1 ** 2 + self._y2 ** 2)\n', (8724, 8755), True, 'import numpy as np\n'), ((12553, 12729), 'warnings.warn', 'warnings.warn', (['"""Warning: setting class attribute yt, but object was initializedwith y1,y2 (or y1/y2 setters were called); shear components y1and y2 being set to None"""'], {}), "(\n 'Warning: setting class attribute yt, but object was initializedwith y1,y2 (or y1/y2 setters were called); shear components y1and y2 being set to None'\n )\n", (12566, 12729), False, 'import warnings\n'), ((14731, 14790), 'astropy.constants.G.to', 'const.G.to', (['(units.Mpc ** 3 / (units.M_sun * units.Gyr ** 2))'], {}), '(units.Mpc ** 3 / (units.M_sun * units.Gyr ** 2))\n', (14741, 14790), True, 'import astropy.constants as const\n'), ((14805, 14838), 'astropy.constants.c.to', 'const.c.to', (['(units.Mpc / units.Gyr)'], {}), '(units.Mpc / units.Gyr)\n', (14815, 14838), True, 'import astropy.constants as const\n'), ((18058, 18125), 'scipy.stats.binned_statistic', 'stats.binned_statistic', (['r', 'delta_sigma'], {'statistic': '"""std"""', 'bins': 'nbins'}), "(r, delta_sigma, statistic='std', bins=nbins)\n", (18080, 18125), False, 'from scipy import stats\n'), ((18164, 18233), 'scipy.stats.binned_statistic', 'stats.binned_statistic', (['r', 'delta_sigma'], {'statistic': '"""count"""', 'bins': 'nbins'}), "(r, delta_sigma, statistic='count', bins=nbins)\n", (18186, 18233), False, 'from scipy import stats\n'), ((18338, 18395), 'scipy.stats.binned_statistic', 'stats.binned_statistic', (['r', 'r'], {'statistic': '"""std"""', 'bins': 'nbins'}), "(r, r, statistic='std', bins=nbins)\n", (18360, 18395), False, 'from scipy import stats\n'), ((18424, 18483), 'scipy.stats.binned_statistic', 'stats.binned_statistic', (['r', 'r'], {'statistic': '"""count"""', 'bins': 'nbins'}), "(r, r, statistic='count', bins=nbins)\n", (18446, 18483), False, 'from scipy import stats\n'), ((18990, 19005), 'numpy.zeros', 'np.zeros', (['nbins'], {}), '(nbins)\n', (18998, 19005), True, 'import numpy as np\n'), ((19085, 19139), 'numpy.logical_and', 'np.logical_and', (['(r > bin_edges[i])', '(r < bin_edges[i + 1])'], {}), '(r > bin_edges[i], r < bin_edges[i + 1])\n', (19099, 19139), True, 'import numpy as np\n'), ((19157, 19173), 'numpy.sum', 'np.sum', (['bin_mask'], {}), '(bin_mask)\n', (19163, 19173), True, 'import numpy as np\n'), ((19340, 19361), 'numpy.polyfit', 'np.polyfit', (['dr', 'ds', '(1)'], {}), '(dr, ds, 1)\n', (19350, 19361), True, 'import numpy as np\n')] |
import numpy as np
import pylab as pl
from revoice import *
from revoice.common import *
t = np.linspace(0, 0.008, 16384)
f = np.linspace(10.0, 4000.0, 16384 * 16)
T0 = 0.008
pl.figure()
pl.subplot(111)
pl.plot(t, lfmodel.calcFlowDerivative(t, T0, 1.0, *lfmodel.fromRd(0.3)), label = "Rd = 0.3")
pl.plot(t, lfmodel.calcFlowDerivative(t, T0, 1.0, *lfmodel.fromRd(1.0)), label = "Rd = 1.0")
pl.plot(t, lfmodel.calcFlowDerivative(t, T0, 1.0, *lfmodel.fromRd(2.5)), label = "Rd = 2.5")
pl.legend()
pl.figure()
pl.plot(f, np.log(lfmodel.calcSpectrum(f, T0, 1.0, *lfmodel.fromRd(0.3))[0]), label = "Rd = 0.3")
pl.plot(f, np.log(lfmodel.calcSpectrum(f, T0, 1.0, *lfmodel.fromRd(1.0))[0]), label = "Rd = 1.0")
pl.plot(f, np.log(lfmodel.calcSpectrum(f, T0, 1.0, *lfmodel.fromRd(2.5))[0]), label = "Rd = 2.5")
pl.legend()
pl.show()
| [
"pylab.subplot",
"pylab.legend",
"pylab.figure",
"numpy.linspace",
"pylab.show"
] | [((94, 122), 'numpy.linspace', 'np.linspace', (['(0)', '(0.008)', '(16384)'], {}), '(0, 0.008, 16384)\n', (105, 122), True, 'import numpy as np\n'), ((127, 164), 'numpy.linspace', 'np.linspace', (['(10.0)', '(4000.0)', '(16384 * 16)'], {}), '(10.0, 4000.0, 16384 * 16)\n', (138, 164), True, 'import numpy as np\n'), ((177, 188), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (186, 188), True, 'import pylab as pl\n'), ((189, 204), 'pylab.subplot', 'pl.subplot', (['(111)'], {}), '(111)\n', (199, 204), True, 'import pylab as pl\n'), ((484, 495), 'pylab.legend', 'pl.legend', ([], {}), '()\n', (493, 495), True, 'import pylab as pl\n'), ((496, 507), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (505, 507), True, 'import pylab as pl\n'), ((802, 813), 'pylab.legend', 'pl.legend', ([], {}), '()\n', (811, 813), True, 'import pylab as pl\n'), ((814, 823), 'pylab.show', 'pl.show', ([], {}), '()\n', (821, 823), True, 'import pylab as pl\n')] |
import os
import tensorflow as tf
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Flatten, Dropout
from tensorflow.keras import regularizers
import dku_deeplearning_image.dku_constants as constants
from dku_deeplearning_image.keras_applications import APPLICATIONS
import threading
import json
from collections import OrderedDict
import numpy as np
from datetime import datetime
import GPUtil
import pandas as pd
from PIL import UnidentifiedImageError, ImageFile, Image
import logging
from io import BytesIO
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ImageFile.LOAD_TRUNCATED_IMAGES = True
###################################################################################################################
# MODEL UTILS
###################################################################################################################
def add_pooling(model_output, pooling):
if pooling == 'avg':
return GlobalAveragePooling2D()(model_output)
elif pooling == 'max':
return GlobalMaxPooling2D()(model_output)
else:
return Flatten()(model_output)
def add_dropout(model_output, dropout):
return Dropout(dropout)(model_output) if dropout else model_output
def get_regularizer(reg):
if reg:
reg_l1, reg_l2 = reg["l1"], reg["l2"]
if reg_l1 and reg_l2:
return regularizers.l1_l2(**reg)
elif reg_l2:
return regularizers.l2(reg["l2"])
elif reg_l1:
return regularizers.l1(reg["l1"])
return None
###################################################################################################################
# MODELS LIST
###################################################################################################################
# INFO : when adding a new architecture, you must add a select-option in
# python-runnables/dl-toolbox-download-models/runnable.json with the label architecture_trainedon to make it available,
# along with new a constant in python-lib/constants.py
def is_keras_application(architecture):
return architecture in [app.name for app in APPLICATIONS]
###############################################################
# GPU HANDLING
###############################################################
def deactivate_gpu():
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def can_use_gpu():
return len(tf.config.list_physical_devices('GPU')) > 0
def set_gpu_options(should_use_gpu, gpu_list, gpu_memory_allocation_mode, memory_limit_ratio=None):
if should_use_gpu and can_use_gpu():
logger.info("Loading GPU Options...")
gpus = tf.config.list_physical_devices('GPU')
for i, g in enumerate(gpus):
g.id = i
gpus_to_use = [gpus[int(i)] for i in gpu_list] or gpus
logger.info(f"GPUs on the machine: {[g.id for g in GPUtil.getGPUs()]}")
logger.info(f"Will use the following GPUs: {gpus_to_use}")
if gpu_memory_allocation_mode == constants.GPU_MEMORY.LIMIT and memory_limit_ratio:
for gpu in gpus_to_use:
memory_limit = calculate_gpu_memory_allocation(memory_limit_ratio, gpu)
logger.info(f"Restraining GPU {gpu} to {memory_limit} Mo ({memory_limit_ratio}%)")
tf.config.set_logical_device_configuration(
gpu,
[tf.config.LogicalDeviceConfiguration(memory_limit=int(memory_limit))]
)
elif gpu_memory_allocation_mode == constants.GPU_MEMORY.GROWTH:
map(lambda g: tf.config.experimental.set_memory_growth(g, True), gpus_to_use)
tf.config.set_visible_devices(gpus_to_use, 'GPU')
else:
logger.info("Skipping GPU Options")
deactivate_gpu()
def get_tf_strategy():
return tf.distribute.MirroredStrategy()
def calculate_gpu_memory_allocation(memory_limit_ratio, gpu_to_use):
gpu = [gpu for gpu in GPUtil.getGPUs() if gpu.id == gpu_to_use.id][0]
return int((memory_limit_ratio / 100) * gpu.memoryTotal)
###################################################################################################################
# FILES LOGIC
###################################################################################################################
def get_weights_filename(with_top=False):
return '{}{}.h5'.format(constants.WEIGHT_FILENAME, '' if with_top else constants.NOTOP_SUFFIX)
def get_file_path(folder_path, file_name):
# Be careful to enforce that folder_path and file_name are actually strings
return os.path.join(str(folder_path), str(file_name))
def get_cached_file_from_folder(folder, file_path):
if isinstance(file_path, bytes):
file_path = file_path.decode('utf-8')
filename = file_path.replace('/', '_')
if not (os.path.exists(filename)):
try:
with folder.get_download_stream(file_path) as stream:
with open(filename, 'wb') as f:
f.write(stream.read())
logger.debug(f"Cached file {file_path}")
except Exception as err:
logger.warning(f'The file {filename} does not exist in input folder. Skipping it.')
return ""
else:
logger.debug(f"Read from cache {file_path}")
return filename
def convert_target_to_np_array(target_array):
dummies = pd.get_dummies(target_array)
return {"remapped": dummies.values.astype(np.int8), "classes": list(dummies.columns)}
def get_model_config_from_file(model_folder):
return json.loads(model_folder.get_download_stream(constants.CONFIG_FILE).read())
def build_prediction_output_df(images_paths, predictions):
output = pd.DataFrame()
output["images"] = images_paths
logger.debug("------->" + str(output))
output["prediction"] = predictions["prediction"]
output["error"] = predictions["error"]
return output
###################################################################################################################
# MISC.
###################################################################################################################
def log_func(txt):
def inner(f):
def wrapper(*args, **kwargs):
logger.info(f"------ \n Info: Starting {txt} ({datetime.now().strftime('%H:%M:%S')}) \n ------")
res = f(*args, **kwargs)
logger.info(f"------ \n Info: Ending {txt} ({datetime.now().strftime('%H:%M:%S')}) \n ------")
return res
return wrapper
return inner
def format_predictions_output(predictions, errors, classify=False, labels_df=None, limit=None, min_threshold=None):
formatted_predictions = []
predictions = list(predictions)
id_pred = lambda index: labels_df.loc[index][constants.LABEL] if labels_df is not None else str(index)
for err_i in range(len(errors)):
if errors[err_i] == 1:
predictions.insert(err_i, None)
for pred_i, pred in enumerate(predictions):
if pred is not None:
if classify:
formatted_pred = OrderedDict(
[(str(id_pred(i)), float(pred[i])) for i in pred.argsort()[-limit:] if float(pred[i]) >= min_threshold])
formatted_predictions.append(json.dumps(formatted_pred))
else:
formatted_predictions.append(pred.tolist())
else:
logger.warning(f"There has been an error with prediction: {pred_i}. (It is probably not an image)")
formatted_predictions.append(None)
return {"prediction": formatted_predictions, "error": errors}
def apply_preprocess_image(tfds, input_shape, preprocessing, is_b64=False):
def _apply_preprocess_image(image_path):
return tf.numpy_function(
func=lambda x: tf.cast(preprocess_img(x, input_shape, preprocessing, is_b64), tf.float32),
inp=[image_path],
Tout=tf.float32)
def _convert_errors(images):
return tf.numpy_function(
func=lambda x: tf.cast(x.size == 0, tf.int8),
inp=[images],
Tout=tf.int8)
def _filter_errors(images):
return tf.numpy_function(
func=lambda x: x.size != 0,
inp=[images],
Tout=tf.bool)
preprocessed_images = tfds.map(map_func=_apply_preprocess_image, num_parallel_calls=constants.AUTOTUNE)
error_array = preprocessed_images.map(map_func=_convert_errors, num_parallel_calls=constants.AUTOTUNE)
preprocessed_images_filtered = preprocessed_images.filter(predicate=_filter_errors)
return preprocessed_images_filtered, error_array
def retrieve_images_to_tfds(images_folder, np_images):
def _retrieve_image_from_folder(image_fn):
return tf.numpy_function(
func=lambda x: get_cached_file_from_folder(images_folder, x),
inp=[image_fn],
Tout=tf.string)
X_tfds = tf.data.Dataset.from_tensor_slices(np_images)
return X_tfds.map(map_func=_retrieve_image_from_folder, num_parallel_calls=constants.AUTOTUNE)
def preprocess_img(img_path, img_shape, preprocessing, is_b64=False):
try:
if not img_path:
return np.array([])
if is_b64:
img_path = BytesIO(img_path)
img = Image.open(img_path).resize(img_shape[:2])
if img.mode != 'RGB':
img = img.convert('RGB')
except UnidentifiedImageError as err:
logger.warning(f'The file {img_path} is not a valid image. skipping it. Error: {err}')
return np.array([])
array = np.array(img)
array = preprocessing(array)
return array
def clean_custom_params(custom_params, params_type=""):
def string_to_arg(string):
if string.lower() == "true":
res = True
elif string.lower() == "false":
res = False
else:
try:
res = np.float(string)
except ValueError:
res = string
return res
cleaned_params = {}
params_type = " '{}'".format(params_type) if params_type else ""
for i, p in enumerate(custom_params):
if p.get("name") is None:
raise IOError(f"The {params_type} custom param #{i} must have a 'name'")
if p.get("value") is None:
raise IOError(f"The {params_type} custom param #{i} must have a 'value'")
cleaned_params[p["name"]] = string_to_arg(p["value"])
return cleaned_params
def sanitize_path(path):
return path[1:] if path.startswith('/') else path
def is_path_in_folder(path, folder):
return sanitize_path(path) in [sanitize_path(p) for p in folder.list_paths_in_partition()]
def dbg_msg(msg, title=''):
logger.debug('DEBUG : {}'.format(title).center(100, '-'))
logger.debug(msg)
logger.debug(''.center(100, '-'))
###############################################################
# THREADSAFE GENERATOR / ITERATOR
# Inspired by :
# https://github.com/fchollet/keras/issues/1638
# http://anandology.com/blog/using-iterators-and-generators/
###############################################################
''' Make the generators threadsafe in case of multiple threads '''
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return self.it.__next__()
| [
"logging.getLogger",
"GPUtil.getGPUs",
"tensorflow.numpy_function",
"io.BytesIO",
"numpy.array",
"tensorflow.config.list_physical_devices",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.cast",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"os.path.exists",
"tensorflow.data.Datase... | [((552, 579), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (569, 579), False, 'import logging\n'), ((3819, 3851), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {}), '()\n', (3849, 3851), True, 'import tensorflow as tf\n'), ((5377, 5405), 'pandas.get_dummies', 'pd.get_dummies', (['target_array'], {}), '(target_array)\n', (5391, 5405), True, 'import pandas as pd\n'), ((5704, 5718), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5716, 5718), True, 'import pandas as pd\n'), ((8910, 8955), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['np_images'], {}), '(np_images)\n', (8944, 8955), True, 'import tensorflow as tf\n'), ((9554, 9567), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9562, 9567), True, 'import numpy as np\n'), ((2668, 2706), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (2699, 2706), True, 'import tensorflow as tf\n'), ((3654, 3703), 'tensorflow.config.set_visible_devices', 'tf.config.set_visible_devices', (['gpus_to_use', '"""GPU"""'], {}), "(gpus_to_use, 'GPU')\n", (3683, 3703), True, 'import tensorflow as tf\n'), ((4823, 4847), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (4837, 4847), False, 'import os\n'), ((8159, 8232), 'tensorflow.numpy_function', 'tf.numpy_function', ([], {'func': '(lambda x: x.size != 0)', 'inp': '[images]', 'Tout': 'tf.bool'}), '(func=lambda x: x.size != 0, inp=[images], Tout=tf.bool)\n', (8176, 8232), True, 'import tensorflow as tf\n'), ((11412, 11428), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (11426, 11428), False, 'import threading\n'), ((979, 1003), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (1001, 1003), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Flatten, Dropout\n'), ((1197, 1213), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (1204, 1213), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Flatten, Dropout\n'), ((1392, 1417), 'tensorflow.keras.regularizers.l1_l2', 'regularizers.l1_l2', ([], {}), '(**reg)\n', (1410, 1417), False, 'from tensorflow.keras import regularizers\n'), ((2420, 2458), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (2451, 2458), True, 'import tensorflow as tf\n'), ((9180, 9192), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9188, 9192), True, 'import numpy as np\n'), ((9235, 9252), 'io.BytesIO', 'BytesIO', (['img_path'], {}), '(img_path)\n', (9242, 9252), False, 'from io import BytesIO\n'), ((9529, 9541), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9537, 9541), True, 'import numpy as np\n'), ((1060, 1080), 'tensorflow.keras.layers.GlobalMaxPooling2D', 'GlobalMaxPooling2D', ([], {}), '()\n', (1078, 1080), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Flatten, Dropout\n'), ((1120, 1129), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1127, 1129), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Flatten, Dropout\n'), ((1458, 1484), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (["reg['l2']"], {}), "(reg['l2'])\n", (1473, 1484), False, 'from tensorflow.keras import regularizers\n'), ((3949, 3965), 'GPUtil.getGPUs', 'GPUtil.getGPUs', ([], {}), '()\n', (3963, 3965), False, 'import GPUtil\n'), ((9267, 9287), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (9277, 9287), False, 'from PIL import UnidentifiedImageError, ImageFile, Image\n'), ((1525, 1551), 'tensorflow.keras.regularizers.l1', 'regularizers.l1', (["reg['l1']"], {}), "(reg['l1'])\n", (1540, 1551), False, 'from tensorflow.keras import regularizers\n'), ((7269, 7295), 'json.dumps', 'json.dumps', (['formatted_pred'], {}), '(formatted_pred)\n', (7279, 7295), False, 'import json\n'), ((8028, 8057), 'tensorflow.cast', 'tf.cast', (['(x.size == 0)', 'tf.int8'], {}), '(x.size == 0, tf.int8)\n', (8035, 8057), True, 'import tensorflow as tf\n'), ((9884, 9900), 'numpy.float', 'np.float', (['string'], {}), '(string)\n', (9892, 9900), True, 'import numpy as np\n'), ((3582, 3631), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['g', '(True)'], {}), '(g, True)\n', (3622, 3631), True, 'import tensorflow as tf\n'), ((2887, 2903), 'GPUtil.getGPUs', 'GPUtil.getGPUs', ([], {}), '()\n', (2901, 2903), False, 'import GPUtil\n'), ((6288, 6302), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6300, 6302), False, 'from datetime import datetime\n'), ((6432, 6446), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6444, 6446), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
import cv2
import numpy as np
# Dicrease color
def k_mean_dic_color(img, K=5):
if len(img.shape) > 2:
H, W, C = img.shape
else:
H, W = img.shape
C = 1
# reshape img into 2D
tmp_img = img.reshape(H * W, C)
# Step1: select one index randomly
i = np.random.choice(np.arange(H * W), K, replace=False)
color = tmp_img[i].copy()
while True:
# prepare pixel class label
clss = np.zeros((H * W), dtype=int)
# Step2: # get argmin distance for each pixel
for i in range(H * W):
# get distance from base pixel
dis = np.sqrt(np.sum((color - tmp_img[i]) ** 2, axis=1))
clss[i] = np.argmin(dis)
# Step3: Base on result generate new class
color_tmp = np.zeros((K, 3))
for i in range(K):
color_tmp[i] = np.mean(tmp_img[clss == i], axis = 0)
# if not any change
if (color == color_tmp).all():
break
else:
color = color_tmp.copy()
# prepare out image
out = np.zeros((H * W, 3), dtype=np.float32)
# assign selected pixel values
for i in range(K):
out[clss == i] = color[i]
print(color)
out = np.clip(out, 0, 255)
# reshape out image
out = np.reshape(out, (H, W, 3))
out = out.astype(np.uint8)
return out
# Read image
img = cv2.imread("Jeanne.jpg").astype(np.float32)
# Process image
out = k_mean_dic_color(img, K=10)
# Show and save image
cv2.namedWindow("result", 0)
cv2.resizeWindow("result", 512, 512)
cv2.imwrite("Myresult/out92.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"numpy.clip",
"cv2.imwrite",
"numpy.mean",
"cv2.resizeWindow",
"numpy.reshape",
"cv2.imshow",
"numpy.sum",
"numpy.zeros",
"cv2.destroyAllWindows",
"numpy.argmin",
"cv2.waitKey",
"cv2.namedWindow",
"numpy.arange",
"cv2.imread"
] | [((1587, 1615), 'cv2.namedWindow', 'cv2.namedWindow', (['"""result"""', '(0)'], {}), "('result', 0)\n", (1602, 1615), False, 'import cv2\n'), ((1616, 1652), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""result"""', '(512)', '(512)'], {}), "('result', 512, 512)\n", (1632, 1652), False, 'import cv2\n'), ((1653, 1691), 'cv2.imwrite', 'cv2.imwrite', (['"""Myresult/out92.jpg"""', 'out'], {}), "('Myresult/out92.jpg', out)\n", (1664, 1691), False, 'import cv2\n'), ((1692, 1717), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'out'], {}), "('result', out)\n", (1702, 1717), False, 'import cv2\n'), ((1718, 1732), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1729, 1732), False, 'import cv2\n'), ((1733, 1756), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1754, 1756), False, 'import cv2\n'), ((1148, 1186), 'numpy.zeros', 'np.zeros', (['(H * W, 3)'], {'dtype': 'np.float32'}), '((H * W, 3), dtype=np.float32)\n', (1156, 1186), True, 'import numpy as np\n'), ((1319, 1339), 'numpy.clip', 'np.clip', (['out', '(0)', '(255)'], {}), '(out, 0, 255)\n', (1326, 1339), True, 'import numpy as np\n'), ((1375, 1401), 'numpy.reshape', 'np.reshape', (['out', '(H, W, 3)'], {}), '(out, (H, W, 3))\n', (1385, 1401), True, 'import numpy as np\n'), ((344, 360), 'numpy.arange', 'np.arange', (['(H * W)'], {}), '(H * W)\n', (353, 360), True, 'import numpy as np\n'), ((482, 508), 'numpy.zeros', 'np.zeros', (['(H * W)'], {'dtype': 'int'}), '(H * W, dtype=int)\n', (490, 508), True, 'import numpy as np\n'), ((842, 858), 'numpy.zeros', 'np.zeros', (['(K, 3)'], {}), '((K, 3))\n', (850, 858), True, 'import numpy as np\n'), ((1469, 1493), 'cv2.imread', 'cv2.imread', (['"""Jeanne.jpg"""'], {}), "('Jeanne.jpg')\n", (1479, 1493), False, 'import cv2\n'), ((743, 757), 'numpy.argmin', 'np.argmin', (['dis'], {}), '(dis)\n', (752, 757), True, 'import numpy as np\n'), ((922, 957), 'numpy.mean', 'np.mean', (['tmp_img[clss == i]'], {'axis': '(0)'}), '(tmp_img[clss == i], axis=0)\n', (929, 957), True, 'import numpy as np\n'), ((674, 715), 'numpy.sum', 'np.sum', (['((color - tmp_img[i]) ** 2)'], {'axis': '(1)'}), '((color - tmp_img[i]) ** 2, axis=1)\n', (680, 715), True, 'import numpy as np\n')] |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: Simplified BSD
import numpy as np
from ...utils import warn
class _LinkViewer(object):
"""Class to link multiple _TimeViewer objects."""
def __init__(self, brains, time=True, camera=False, colorbar=True,
picking=False):
self.brains = brains
self.time_viewers = [brain.time_viewer for brain in brains]
# check time infos
times = [brain._times for brain in brains]
if time and not all(np.allclose(x, times[0]) for x in times):
warn('stc.times do not match, not linking time')
time = False
if camera:
self.link_cameras()
if time:
# link time sliders
self.link_sliders(
name="time",
callback=self.set_time_point,
event_type="always"
)
# link playback speed sliders
self.link_sliders(
name="playback_speed",
callback=self.set_playback_speed,
event_type="always"
)
# link toggle to start/pause playback
for time_viewer in self.time_viewers:
time_viewer.actions["play"].triggered.disconnect()
time_viewer.actions["play"].triggered.connect(
self.toggle_playback)
# link time course canvas
def _time_func(*args, **kwargs):
for time_viewer in self.time_viewers:
time_viewer.callbacks["time"](*args, **kwargs)
for time_viewer in self.time_viewers:
if time_viewer.show_traces:
time_viewer.mpl_canvas.time_func = _time_func
if picking:
def _func_add(*args, **kwargs):
for time_viewer in self.time_viewers:
time_viewer._add_point(*args, **kwargs)
time_viewer.plotter.update()
def _func_remove(*args, **kwargs):
for time_viewer in self.time_viewers:
time_viewer._remove_point(*args, **kwargs)
# save initial picked points
initial_points = dict()
for hemi in ('lh', 'rh'):
initial_points[hemi] = set()
for time_viewer in self.time_viewers:
initial_points[hemi] |= \
set(time_viewer.picked_points[hemi])
# link the viewers
for time_viewer in self.time_viewers:
time_viewer.clear_points()
time_viewer._add_point = time_viewer.add_point
time_viewer.add_point = _func_add
time_viewer._remove_point = time_viewer.remove_point
time_viewer.remove_point = _func_remove
# link the initial points
leader = self.time_viewers[0] # select a time_viewer as leader
for hemi in initial_points.keys():
if hemi in time_viewer.brain._hemi_meshes:
mesh = time_viewer.brain._hemi_meshes[hemi]
for vertex_id in initial_points[hemi]:
leader.add_point(hemi, mesh, vertex_id)
if colorbar:
leader = self.time_viewers[0] # select a time_viewer as leader
fmin = leader.brain._data["fmin"]
fmid = leader.brain._data["fmid"]
fmax = leader.brain._data["fmax"]
for time_viewer in self.time_viewers:
time_viewer.callbacks["fmin"](fmin)
time_viewer.callbacks["fmid"](fmid)
time_viewer.callbacks["fmax"](fmax)
for slider_name in ('fmin', 'fmid', 'fmax'):
func = getattr(self, "set_" + slider_name)
self.link_sliders(
name=slider_name,
callback=func,
event_type="always"
)
def set_fmin(self, value):
for time_viewer in self.time_viewers:
time_viewer.callbacks["fmin"](value)
def set_fmid(self, value):
for time_viewer in self.time_viewers:
time_viewer.callbacks["fmid"](value)
def set_fmax(self, value):
for time_viewer in self.time_viewers:
time_viewer.callbacks["fmax"](value)
def set_time_point(self, value):
for time_viewer in self.time_viewers:
time_viewer.callbacks["time"](value, update_widget=True)
def set_playback_speed(self, value):
for time_viewer in self.time_viewers:
time_viewer.callbacks["playback_speed"](value, update_widget=True)
def toggle_playback(self):
leader = self.time_viewers[0] # select a time_viewer as leader
value = leader.callbacks["time"].slider_rep.GetValue()
# synchronize starting points before playback
self.set_time_point(value)
for time_viewer in self.time_viewers:
time_viewer.toggle_playback()
def link_sliders(self, name, callback, event_type):
from ..backends._pyvista import _update_slider_callback
for time_viewer in self.time_viewers:
slider = time_viewer.sliders[name]
if slider is not None:
_update_slider_callback(
slider=slider,
callback=callback,
event_type=event_type
)
def link_cameras(self):
from ..backends._pyvista import _add_camera_callback
def _update_camera(vtk_picker, event):
for time_viewer in self.time_viewers:
time_viewer.plotter.update()
leader = self.time_viewers[0] # select a time_viewer as leader
camera = leader.plotter.camera
_add_camera_callback(camera, _update_camera)
for time_viewer in self.time_viewers:
for renderer in time_viewer.plotter.renderers:
renderer.camera = camera
| [
"numpy.allclose"
] | [((549, 573), 'numpy.allclose', 'np.allclose', (['x', 'times[0]'], {}), '(x, times[0])\n', (560, 573), True, 'import numpy as np\n')] |
# Imports
import os
import numpy as np
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
# Building Neural Network
from keras.layers import Dense, Activation, Dropout
from keras.layers import BatchNormalization, Flatten
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard
from keras.optimizers import Adam
# Custom
# from .model_strategies import ModelStrategies
# from .model_evaluation import ModelEvaluation
class ModelBuilder:
_models_folder = 'trained_models'
def __init__(self):
print('Initialize Model Builder')
# MODELS
self._model_name = None
self.compiled_model = None
self.trained_model = None
self.training_history = None
self.vector_shape = 0
# GENERAL
self._model_name = None
self.val_size: float = 0.5
# PREDICTION PERIODS
self._predict_ma: int = 30
self.n_past: int = 10
self.n_future: int = 3
# PROPERTIES
self.epochs: int = 100
self.loss_func: str = 'binary_crossentropy'
self.activation_func: str = 'tanh'
self.output_func: str = 'sigmoid'
self.batch_size: int = 32
self.starting_learn_rate: float = 0.01
# BEHAVIORS
self.shuffle_inputs: bool = False
self.do_batch_norm: bool = False
# Dropout
self.dropout_rate: float = 0.5
self.do_dropout: bool = False
# Dynamic Learning Rate
self.learning_rate_patience: int = 0
self.learning_rate_reduction: float = 0.90
# Dynamic Training Stop
self.stop_training_patience = 50
# SCORING
self.monitor_metric: str = 'acc'
self.val_monitor_metric: str = 'val_' + self.monitor_metric
self.train_score: float = 0.
self.val_score: float = 0.
self.val_score_max_in_epoch: int = 0
@property
def models_folder(self):
return self._models_folder
@models_folder.setter
def models_folder(self, value):
self._models_folder = value
@property
def model_name(self):
return self._model_name
@model_name.setter
def model_name(self, value):
self._model_name = value
@property
def predict_ma(self):
return self._predict_ma
@predict_ma.setter
def predict_ma(self, value):
self._predict_ma = value
# MODEL WORKFLOW
# ------------------------------------------------------------------------------
def create_train_vectors(self, df_train, scaled_df_train):
n_columns = len(df_train.columns)
last_column = n_columns - 1
last_column_scaled_df = last_column - 2
# Train Vectors
x_train, y_train = [], []
for i in range(self.n_past, len(df_train) - self.n_future + 1):
x_train.append(scaled_df_train[i - self.n_past:i, 0:last_column])
y_train.append(df_train.iloc[i:i + 1, last_column].values)
# Vectors must be in numpy arr
x_train, y_train = np.array(x_train), np.array(y_train)
# Reshape Vector
y_train = y_train.reshape(y_train.shape[0], 1)
# Save vector shape for NN input layer
self.vector_shape = x_train.shape
return x_train, y_train
def create_test_vectors(self, df_test, scaled_df_test, df_test_close):
n_columns = len(df_test.columns)
last_column = n_columns - 1
# Test Vectors
x_test, y_test, y_test_price = [], [], []
for i in range(self.n_past, len(df_test) - self.n_future + 1):
x_test.append(scaled_df_test[i - self.n_past:i, 0:last_column])
y_test.append(df_test.iloc[i:i + 1, last_column].values)
y_test_price.append(df_test_close.iloc[i:i + 1].values)
# Vectors must be in numpy arr
x_test, y_test, y_test_price = np.array(x_test), np.array(y_test), np.array(
y_test_price)
# Reshape Vector
y_test, y_test_price = y_test.reshape(y_test.shape[0], 1), y_test_price.reshape(
y_test.shape[0], 1)
return x_test, y_test, y_test_price
def build_network(self):
# Build model
# Compile model
optimizer = Adam(lr=self.starting_learn_rate)
self.compiled_model.compile(
optimizer=optimizer,
loss=self.loss_func,
metrics=[self.monitor_metric])
print('Neural Network successfully compiled')
return self.compiled_model
# Compile model + load saved weights
def load_network(self):
self.build_network()
self.trained_model = self.compiled_model
self.trained_model.load_weights(
filepath=f'{self.models_folder}/{self.model_name}/{self.model_name}.hdf5',
by_name=False)
print('Weights successfully imported')
print('Model is ready to predict.')
return self.trained_model
# Compile model + retrain model
def train_network(self, x_train, y_train, verbose: int = 1):
"""
:param x_train:
:param y_train:
:param verbose: int -- printing training progress
:return:
"""
# Create Folder if not exist
self.create_folder(self.model_name)
# Compile Model
self.build_network()
self.trained_model = self.compiled_model
# CALLBACKS
# Dynamic Training Stop
stop_training = EarlyStopping(monitor=self.val_monitor_metric,
min_delta=1e-10,
patience=self.stop_training_patience,
verbose=verbose)
# Dynamic Learning Rate
reduce_learning_rate = ReduceLROnPlateau(monitor=self.val_monitor_metric,
factor=self.learning_rate_reduction,
patience=self.learning_rate_patience,
min_lr=0.000001, verbose=verbose)
# Save Best Model
self_checkpoint = ModelCheckpoint(
filepath=f'{self.models_folder}/{self.model_name}/{self.model_name}.hdf5',
monitor=self.val_monitor_metric,
verbose=1, save_best_only=True)
# Tensorboard
# tensor_board = TensorBoard(log_dir='{}'.format(self), write_graph=True, write_images=True)
# Train Model
self.training_history = self.trained_model.fit(x_train,
y_train,
shuffle=self.shuffle_inputs,
epochs=self.epochs,
callbacks=[stop_training,
reduce_learning_rate,
self_checkpoint],
validation_split=self.val_size,
verbose=verbose,
batch_size=self.batch_size)
# Set Score Metrics
self.set_score_values()
# Round Score Metrics
self.round_score_values()
print('Model was successfully trained.')
return self.trained_model, self.training_history
def set_score_values(self):
# Set Value of best val metric
self.val_score = np.max(self.training_history.history[self.val_monitor_metric], axis=0)
# Set Index of best val metric
self.val_score_max_in_epoch = np.argmax(
self.training_history.history[self.val_monitor_metric],
axis=0)
# Set Value of best val metric
self.train_score = self.training_history.history[self.monitor_metric][
self.val_score_max_in_epoch]
def round_score_values(self):
if self.monitor_metric is 'acc':
# For Classification
self.train_score = (self.train_score * 100).round(2)
self.val_score = (self.val_score * 100).round(2)
else:
# For Regression
self.train_score = self.train_score.round(6)
self.val_score = self.val_score.round(6)
# MODEL BUILDING TASKS
# ------------------------------------------------------------------------------
# Add Dropout layer inside model
def add_dropout(self):
if self.do_dropout:
self.compiled_model.add(Dropout(self.dropout_rate))
return self.compiled_model
# Batch Normalization - Apply Z-score on inputs inside NN
def add_batch_norm(self):
if self.do_batch_norm:
self.compiled_model.add(BatchNormalization())
return self.compiled_model
# Add layer for flattening the dimension of input
def add_flat(self):
self.compiled_model.add(Flatten())
return self.compiled_model
# VISUALIZE TRAINING
# ------------------------------------------------------------------------------
def plot_training_loss(self, show=True):
sns.set()
plt.plot(self.training_history.history['loss'])
plt.plot(self.training_history.history['val_loss'])
y_bottom_border = self.training_history.history['loss'][-1] - 0.05
y_top_border = self.training_history.history['loss'][1] + 0.125
plt.ylim(y_bottom_border, y_top_border)
plt.title('Model Training Error')
plt.ylabel('Error')
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.savefig(f'{self.models_folder}/{self.model_name}/training_error.png',
bbox_inches='tight', dpi=150)
if show:
return plt.show()
else:
plt.show(block=False)
return plt.close()
def plot_training_metric(self, show=True):
sns.set()
plt.plot(self.training_history.history[self.monitor_metric])
plt.plot(self.training_history.history[self.val_monitor_metric])
plt.title('Model Training Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='lower right')
plt.savefig(f'{self.models_folder}/{self.model_name}/training_accuracy.png',
bbox_inches='tight', dpi=150)
if show:
return plt.show()
else:
plt.show(block=False)
return plt.close()
# OTHER
# ------------------------------------------------------------------------------
def create_folder(self, name):
# Dir to save results
if not os.path.exists('{}/{}'.format(self.models_folder, name)):
print(f'Inside {self.models_folder} create folder {name}')
os.makedirs('{}/{}'.format(str(self.models_folder), name))
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"seaborn.set",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"keras.callbacks.EarlyStopping",
"matplotlib.pyplot.ylim",
"keras.optimizers.Adam",
"matplotlib.pyplot.savefig",
"keras.layers.Flatten",
"ke... | [((4245, 4278), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.starting_learn_rate'}), '(lr=self.starting_learn_rate)\n', (4249, 4278), False, 'from keras.optimizers import Adam\n'), ((5451, 5574), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': 'self.val_monitor_metric', 'min_delta': '(1e-10)', 'patience': 'self.stop_training_patience', 'verbose': 'verbose'}), '(monitor=self.val_monitor_metric, min_delta=1e-10, patience=\n self.stop_training_patience, verbose=verbose)\n', (5464, 5574), False, 'from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard\n'), ((5747, 5913), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': 'self.val_monitor_metric', 'factor': 'self.learning_rate_reduction', 'patience': 'self.learning_rate_patience', 'min_lr': '(1e-06)', 'verbose': 'verbose'}), '(monitor=self.val_monitor_metric, factor=self.\n learning_rate_reduction, patience=self.learning_rate_patience, min_lr=\n 1e-06, verbose=verbose)\n', (5764, 5913), False, 'from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard\n'), ((6106, 6270), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'f"""{self.models_folder}/{self.model_name}/{self.model_name}.hdf5"""', 'monitor': 'self.val_monitor_metric', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath=\n f'{self.models_folder}/{self.model_name}/{self.model_name}.hdf5',\n monitor=self.val_monitor_metric, verbose=1, save_best_only=True)\n", (6121, 6270), False, 'from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard\n'), ((7557, 7627), 'numpy.max', 'np.max', (['self.training_history.history[self.val_monitor_metric]'], {'axis': '(0)'}), '(self.training_history.history[self.val_monitor_metric], axis=0)\n', (7563, 7627), True, 'import numpy as np\n'), ((7705, 7778), 'numpy.argmax', 'np.argmax', (['self.training_history.history[self.val_monitor_metric]'], {'axis': '(0)'}), '(self.training_history.history[self.val_monitor_metric], axis=0)\n', (7714, 7778), True, 'import numpy as np\n'), ((9201, 9210), 'seaborn.set', 'sns.set', ([], {}), '()\n', (9208, 9210), True, 'import seaborn as sns\n'), ((9219, 9266), 'matplotlib.pyplot.plot', 'plt.plot', (["self.training_history.history['loss']"], {}), "(self.training_history.history['loss'])\n", (9227, 9266), True, 'import matplotlib.pyplot as plt\n'), ((9275, 9326), 'matplotlib.pyplot.plot', 'plt.plot', (["self.training_history.history['val_loss']"], {}), "(self.training_history.history['val_loss'])\n", (9283, 9326), True, 'import matplotlib.pyplot as plt\n'), ((9482, 9521), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y_bottom_border', 'y_top_border'], {}), '(y_bottom_border, y_top_border)\n', (9490, 9521), True, 'import matplotlib.pyplot as plt\n'), ((9530, 9563), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Training Error"""'], {}), "('Model Training Error')\n", (9539, 9563), True, 'import matplotlib.pyplot as plt\n'), ((9572, 9591), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (9582, 9591), True, 'import matplotlib.pyplot as plt\n'), ((9600, 9619), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (9610, 9619), True, 'import matplotlib.pyplot as plt\n'), ((9628, 9682), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""upper right"""'}), "(['train', 'validation'], loc='upper right')\n", (9638, 9682), True, 'import matplotlib.pyplot as plt\n'), ((9691, 9798), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{self.models_folder}/{self.model_name}/training_error.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(150)'}), "(f'{self.models_folder}/{self.model_name}/training_error.png',\n bbox_inches='tight', dpi=150)\n", (9702, 9798), True, 'import matplotlib.pyplot as plt\n'), ((9997, 10006), 'seaborn.set', 'sns.set', ([], {}), '()\n', (10004, 10006), True, 'import seaborn as sns\n'), ((10015, 10075), 'matplotlib.pyplot.plot', 'plt.plot', (['self.training_history.history[self.monitor_metric]'], {}), '(self.training_history.history[self.monitor_metric])\n', (10023, 10075), True, 'import matplotlib.pyplot as plt\n'), ((10084, 10148), 'matplotlib.pyplot.plot', 'plt.plot', (['self.training_history.history[self.val_monitor_metric]'], {}), '(self.training_history.history[self.val_monitor_metric])\n', (10092, 10148), True, 'import matplotlib.pyplot as plt\n'), ((10157, 10193), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Training Accuracy"""'], {}), "('Model Training Accuracy')\n", (10166, 10193), True, 'import matplotlib.pyplot as plt\n'), ((10202, 10224), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (10212, 10224), True, 'import matplotlib.pyplot as plt\n'), ((10233, 10252), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (10243, 10252), True, 'import matplotlib.pyplot as plt\n'), ((10261, 10315), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""lower right"""'}), "(['train', 'validation'], loc='lower right')\n", (10271, 10315), True, 'import matplotlib.pyplot as plt\n'), ((10324, 10434), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{self.models_folder}/{self.model_name}/training_accuracy.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(150)'}), "(f'{self.models_folder}/{self.model_name}/training_accuracy.png',\n bbox_inches='tight', dpi=150)\n", (10335, 10434), True, 'import matplotlib.pyplot as plt\n'), ((3058, 3075), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (3066, 3075), True, 'import numpy as np\n'), ((3077, 3094), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (3085, 3094), True, 'import numpy as np\n'), ((3885, 3901), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (3893, 3901), True, 'import numpy as np\n'), ((3903, 3919), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (3911, 3919), True, 'import numpy as np\n'), ((3921, 3943), 'numpy.array', 'np.array', (['y_test_price'], {}), '(y_test_price)\n', (3929, 3943), True, 'import numpy as np\n'), ((8991, 9000), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8998, 9000), False, 'from keras.layers import BatchNormalization, Flatten\n'), ((9851, 9861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9859, 9861), True, 'import matplotlib.pyplot as plt\n'), ((9888, 9909), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (9896, 9909), True, 'import matplotlib.pyplot as plt\n'), ((9929, 9940), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9938, 9940), True, 'import matplotlib.pyplot as plt\n'), ((10487, 10497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10495, 10497), True, 'import matplotlib.pyplot as plt\n'), ((10524, 10545), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (10532, 10545), True, 'import matplotlib.pyplot as plt\n'), ((10565, 10576), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10574, 10576), True, 'import matplotlib.pyplot as plt\n'), ((8592, 8618), 'keras.layers.Dropout', 'Dropout', (['self.dropout_rate'], {}), '(self.dropout_rate)\n', (8599, 8618), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((8819, 8839), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8837, 8839), False, 'from keras.layers import BatchNormalization, Flatten\n')] |
#coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
import shutil
import numpy as np
import PIL.Image as Image
import tensorflow as tf
import pandas as pd
import retrain as retrain
from count_ops import load_graph
import time
import scipy.io as sio
sys.path.append("/home/deepl/PHICOMM/FoodAI/FoodAi/tensorflow/tensorflow_models/models/research/PHICOMM/slim")
from nets import nets_factory
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def read_tensor_from_jpg_image_file(input_height=299, input_width=299,
input_mean=0, input_std=255):
input_name = "file_reader"
output_name = "normalized"
# [NEW] make file_name as a placeholder.
file_name_placeholder = tf.placeholder("string", name="fnamejpg")
file_reader = tf.read_file(file_name_placeholder, input_name)
# if file_name.endswith(".png"):
# image_reader = tf.image.decode_png(file_reader, channels = 3,
# name='png_reader')
# elif file_name.endswith(".gif"):
# image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
# name='gif_reader'))
# elif file_name.endswith(".bmp"):
# image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
# else:
# image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
# name='jpeg_reader')
image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
name='jpeg_reader')
normalized = preprocess_for_eval(image_reader, input_height, input_width)
#sess = tf.Session()
#result = sess.run(normalized)
#return result
return normalized
def extract():
#sess=tf.Session()
#先加载图和参数变量
# for op in graph.get_operations():
# print(str(op.name))
# var = tf.global_variables()#全部调用
# for i in var:
# print(i)
input_layer= "input"
#nput_layer = "MobilenetV2/input"
output_layer= "MobilenetV2/Predictions/Reshape_1"
#output_layer= "MobilenetV2/Logits/output"
#graph = load_graph('./frozen_pb/frozen_0-0-0.pb')
#with graph.as_default() as g:
# image_buffer_input = g.get_tensor_by_name('input:0')
# final_tensor = g.get_tensor_by_name('MobilenetV2/Logits/AvgPool:0')
#image_dir = '/home/xxxx/PHICOMM/ai-share/dataset/imagenet/raw-data/imagenet-data/validation'
image_dir="/home/deepl/PHICOMM/dataset/cifar10_tf/cifar-10/test"
testing_percentage = 100
validation_percentage = 0
category='testing'
image_lists = retrain.create_image_lists(
image_dir, testing_percentage,
validation_percentage)
class_count = len(image_lists.keys())
print(class_count)
total_start = time.time()
ground_truths = []
filenames = []
all_files_df = pd.DataFrame(columns=['image_name', 'ground_truth',"predecit_label"])
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(image_lists[label_name][category]):
image_name = retrain.get_image_path(
image_lists, label_name, image_index, image_dir, category)
# ground_truth = np.zeros([1, class_count], dtype=np.float32)
# ground_truth[0, label_index] = 1.0
# ground_truths.append(ground_truth)
filenames.append(image_name)
# ground_truth_argmax= np.argmax(ground_truth,axis =1)
# ground_truth_argmax = np.squeeze(ground_truth_argmax)
#all_files_df=all_files_df.append([{'image_name':image_name, 'ground_truth':ground_truth_argmax}],ignore_index=True)
#all_files_df.to_csv("ground_truth1.csv")
#print(filenames)
if os.path.exists("./data"):
print("data is exist, please delete it!")
exit()
#shutil.rmtree("./data")
#os.makedirs("./data")
#sio.savemat('./data/truth.mat',{"truth": ground_truths})
cf = 0.875
predictions = []
i = 0
start = time.time()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
record_iterator = tf.python_io.tf_record_iterator(path='/home/deepl/PHICOMM/dataset/cifar10_tf/cifar10_test.tfrecord')
c =0
# for string_iterator in record_iterator:
# c += 1
# example = tf.train.Example()
# example.ParseFromString(string_iterator)
# height = example.features.feature['image/height'].int64_list.value[0]
# width = example.features.feature['image/width'].int64_list.value[0]
# png_string = example.features.feature['image/encoded'].bytes_list.value[0]
# label = example.features.feature['image/class/label'].int64_list.value[0]
example = tf.train.Example()
with tf.Session(config=config) as sess:
#with tf.Session(graph=graph) as sess:
network_fn = nets_factory.get_network_fn(
"mobilenet_v2",
num_classes=10,
is_training=False)
image_size = 224
placeholder = tf.placeholder(name='input', dtype=tf.float32,
shape=[128, image_size,
image_size, 3])
logits, _ = network_fn(placeholder)
graph = tf.get_default_graph()
saver = tf.train.Saver()
#raph_def = graph.as_graph_def()
#aver = tf.train.import_meta_graph(graph_def)
#saver = tf.train.import_meta_graph('./mobilenetv2_on_cifar10_check_point/0/model_0/model.ckpt-20000.meta')
saver.restore(sess,'./mobilenetv2_on_cifar10_check_point/0/model_0/model.ckpt-20000')
# Initalize the variables
#sess.run(tf.global_variables_initializer())
output_operation = graph.get_operation_by_name(output_layer);
input_operation = graph.get_operation_by_name(input_layer);
read_tensor_from_jpg_image_file_op = read_tensor_from_jpg_image_file(
input_height=224,
input_width=224)
file_num = len(filenames)
batch_size = 128
count = file_num // batch_size
print("need %d batch, every batch is %d"%(count,batch_size))
image_placeholder = tf.placeholder(dtype=tf.string)
decoded_img = tf.image.decode_png(image_placeholder, channels=3)
normalized = preprocess_for_eval(decoded_img, 224, 224)
for i in range(count):
print("this is %d batch"%i)
print("jpg order get from %d to %d"%(batch_size*i,batch_size*i+batch_size-1))
for j in range(batch_size):
example.ParseFromString(next(record_iterator))
png_string = example.features.feature['image/encoded'].bytes_list.value[0]
label_tf_name = example.features.feature['image/class/label'].int64_list.value[0]
ground_truth = np.zeros([1, 10], dtype=np.float32)
ground_truth[0, label_tf_name] = 1.0
ground_truths.append(ground_truth)
if j == 0:
# t_batch = sess.run(read_tensor_from_jpg_image_file_op,feed_dict={"fnamejpg:0": filenames[batch_size*i+j]})
t_batch = sess.run(normalized,feed_dict={image_placeholder: png_string})
t_batch = np.expand_dims(t_batch,axis = 0)
else:
t = sess.run(normalized,feed_dict={image_placeholder: png_string})
t = np.expand_dims(t,axis = 0)
t_batch = np.concatenate((t_batch,t), axis=0)
print(t_batch.shape)
# feed_dict={image_buffer_input: t}
# ft = final_tensor.eval(feed_dict, sess)
# pre = sess.run(output_operation.outputs[0],
# {input_operation.outputs[0]: t_batch})
pre = sess.run(logits,
{input_operation.outputs[0]: t_batch})
print(pre.shape)
predictions.extend(pre)
#i = i + 1
#print(i)
predictions = np.array(predictions)
#print(predictions.shape)
#predictions = np.squeeze(predictions)
ground_truths = np.array(ground_truths)
ground_truths = np.squeeze(ground_truths)
print(predictions.shape)
print(ground_truths.shape)
with tf.Session(config=config) as sess:
# with tf.Session(graph=graph) as sess:
ground_truth_input = tf.placeholder(
tf.float32, [None, 10], name='GroundTruthInput')
fts = tf.placeholder(tf.float32, [None, 10], name='fts')
accuracy, _ = retrain.add_evaluation_step(fts, ground_truth_input)
feed_dict={fts: predictions, ground_truth_input: ground_truths}
#accuracies.append(accuracy.eval(feed_dict, sess))
ret = accuracy.eval(feed_dict, sess)
# for index, row in all_files_df.iterrows():
# row['predecit_label'] = np.squeeze(np.argmax(predictions[index,:],axis=0))
# all_files_df.to_csv("ground_truth.csv")
print('Ensemble Accuracy: %g' % ret)
stop = time.time()
#print(str((stop-start)/len(ftg))+' seconds.')
#sio.savemat('./data/feature.mat',{"feature": ftg})
total_stop = time.time()
print("total time is "+str((total_stop-total_start))+' seconds.')
if __name__ == "__main__":
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
extract()
| [
"tensorflow.image.convert_image_dtype",
"tensorflow.multiply",
"numpy.array",
"sys.path.append",
"os.path.exists",
"retrain.get_image_path",
"tensorflow.train.Example",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.placeholder",
"tensorflow.image.resize_bilinear",
"tensorflow.Session",
... | [((364, 484), 'sys.path.append', 'sys.path.append', (['"""/home/deepl/PHICOMM/FoodAI/FoodAi/tensorflow/tensorflow_models/models/research/PHICOMM/slim"""'], {}), "(\n '/home/deepl/PHICOMM/FoodAI/FoodAi/tensorflow/tensorflow_models/models/research/PHICOMM/slim'\n )\n", (379, 484), False, 'import sys\n'), ((1970, 1993), 'tensorflow.subtract', 'tf.subtract', (['image', '(0.5)'], {}), '(image, 0.5)\n', (1981, 1993), True, 'import tensorflow as tf\n'), ((2004, 2027), 'tensorflow.multiply', 'tf.multiply', (['image', '(2.0)'], {}), '(image, 2.0)\n', (2015, 2027), True, 'import tensorflow as tf\n'), ((2281, 2322), 'tensorflow.placeholder', 'tf.placeholder', (['"""string"""'], {'name': '"""fnamejpg"""'}), "('string', name='fnamejpg')\n", (2295, 2322), True, 'import tensorflow as tf\n'), ((2340, 2387), 'tensorflow.read_file', 'tf.read_file', (['file_name_placeholder', 'input_name'], {}), '(file_name_placeholder, input_name)\n', (2352, 2387), True, 'import tensorflow as tf\n'), ((2982, 3047), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['file_reader'], {'channels': '(3)', 'name': '"""jpeg_reader"""'}), "(file_reader, channels=3, name='jpeg_reader')\n", (3002, 3047), True, 'import tensorflow as tf\n'), ((4121, 4206), 'retrain.create_image_lists', 'retrain.create_image_lists', (['image_dir', 'testing_percentage', 'validation_percentage'], {}), '(image_dir, testing_percentage, validation_percentage\n )\n', (4147, 4206), True, 'import retrain as retrain\n'), ((4303, 4314), 'time.time', 'time.time', ([], {}), '()\n', (4312, 4314), False, 'import time\n'), ((4378, 4448), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['image_name', 'ground_truth', 'predecit_label']"}), "(columns=['image_name', 'ground_truth', 'predecit_label'])\n", (4390, 4448), True, 'import pandas as pd\n'), ((5237, 5261), 'os.path.exists', 'os.path.exists', (['"""./data"""'], {}), "('./data')\n", (5251, 5261), False, 'import os\n'), ((5511, 5522), 'time.time', 'time.time', ([], {}), '()\n', (5520, 5522), False, 'import time\n'), ((5539, 5555), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5553, 5555), True, 'import tensorflow as tf\n'), ((5620, 5725), 'tensorflow.python_io.tf_record_iterator', 'tf.python_io.tf_record_iterator', ([], {'path': '"""/home/deepl/PHICOMM/dataset/cifar10_tf/cifar10_test.tfrecord"""'}), "(path=\n '/home/deepl/PHICOMM/dataset/cifar10_tf/cifar10_test.tfrecord')\n", (5651, 5725), True, 'import tensorflow as tf\n'), ((6216, 6234), 'tensorflow.train.Example', 'tf.train.Example', ([], {}), '()\n', (6232, 6234), True, 'import tensorflow as tf\n'), ((9496, 9517), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (9504, 9517), True, 'import numpy as np\n'), ((9611, 9634), 'numpy.array', 'np.array', (['ground_truths'], {}), '(ground_truths)\n', (9619, 9634), True, 'import numpy as np\n'), ((9655, 9680), 'numpy.squeeze', 'np.squeeze', (['ground_truths'], {}), '(ground_truths)\n', (9665, 9680), True, 'import numpy as np\n'), ((10484, 10495), 'time.time', 'time.time', ([], {}), '()\n', (10493, 10495), False, 'import time\n'), ((10620, 10631), 'time.time', 'time.time', ([], {}), '()\n', (10629, 10631), False, 'import time\n'), ((1428, 1481), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (1456, 1481), True, 'import tensorflow as tf\n'), ((1619, 1682), 'tensorflow.image.central_crop', 'tf.image.central_crop', (['image'], {'central_fraction': 'central_fraction'}), '(image, central_fraction=central_fraction)\n', (1640, 1682), True, 'import tensorflow as tf\n'), ((1779, 1803), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (1793, 1803), True, 'import tensorflow as tf\n'), ((1816, 1885), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['image', '[height, width]'], {'align_corners': '(False)'}), '(image, [height, width], align_corners=False)\n', (1840, 1885), True, 'import tensorflow as tf\n'), ((1937, 1959), 'tensorflow.squeeze', 'tf.squeeze', (['image', '[0]'], {}), '(image, [0])\n', (1947, 1959), True, 'import tensorflow as tf\n'), ((6246, 6271), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (6256, 6271), True, 'import tensorflow as tf\n'), ((6345, 6423), 'nets.nets_factory.get_network_fn', 'nets_factory.get_network_fn', (['"""mobilenet_v2"""'], {'num_classes': '(10)', 'is_training': '(False)'}), "('mobilenet_v2', num_classes=10, is_training=False)\n", (6372, 6423), False, 'from nets import nets_factory\n'), ((6508, 6598), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""input"""', 'dtype': 'tf.float32', 'shape': '[128, image_size, image_size, 3]'}), "(name='input', dtype=tf.float32, shape=[128, image_size,\n image_size, 3])\n", (6522, 6598), True, 'import tensorflow as tf\n'), ((6736, 6758), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (6756, 6758), True, 'import tensorflow as tf\n'), ((6775, 6791), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6789, 6791), True, 'import tensorflow as tf\n'), ((7687, 7718), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (7701, 7718), True, 'import tensorflow as tf\n'), ((7741, 7791), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['image_placeholder'], {'channels': '(3)'}), '(image_placeholder, channels=3)\n', (7760, 7791), True, 'import tensorflow as tf\n'), ((9752, 9777), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (9762, 9777), True, 'import tensorflow as tf\n'), ((9859, 9922), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 10]'], {'name': '"""GroundTruthInput"""'}), "(tf.float32, [None, 10], name='GroundTruthInput')\n", (9873, 9922), True, 'import tensorflow as tf\n'), ((9950, 10000), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 10]'], {'name': '"""fts"""'}), "(tf.float32, [None, 10], name='fts')\n", (9964, 10000), True, 'import tensorflow as tf\n'), ((10023, 10075), 'retrain.add_evaluation_step', 'retrain.add_evaluation_step', (['fts', 'ground_truth_input'], {}), '(fts, ground_truth_input)\n', (10050, 10075), True, 'import retrain as retrain\n'), ((4619, 4704), 'retrain.get_image_path', 'retrain.get_image_path', (['image_lists', 'label_name', 'image_index', 'image_dir', 'category'], {}), '(image_lists, label_name, image_index, image_dir,\n category)\n', (4641, 4704), True, 'import retrain as retrain\n'), ((8340, 8375), 'numpy.zeros', 'np.zeros', (['[1, 10]'], {'dtype': 'np.float32'}), '([1, 10], dtype=np.float32)\n', (8348, 8375), True, 'import numpy as np\n'), ((8757, 8788), 'numpy.expand_dims', 'np.expand_dims', (['t_batch'], {'axis': '(0)'}), '(t_batch, axis=0)\n', (8771, 8788), True, 'import numpy as np\n'), ((8924, 8949), 'numpy.expand_dims', 'np.expand_dims', (['t'], {'axis': '(0)'}), '(t, axis=0)\n', (8938, 8949), True, 'import numpy as np\n'), ((8981, 9017), 'numpy.concatenate', 'np.concatenate', (['(t_batch, t)'], {'axis': '(0)'}), '((t_batch, t), axis=0)\n', (8995, 9017), True, 'import numpy as np\n')] |
r"""
The modified gamma distribution PSD
===================================
The form of the modified gamma distribution (MGD) used here is as follows:
.. math::
\frac{N(X)}{dX} = N \frac{\nu}{\Gamma(1 + \alpha)}\lambda^{\nu(1 + \alpha)}
D^{\nu(1 + \alpha) - 1} \cdot \exp \{-(\lambda D)^\nu\}.
The distribution is described by four parameters:
1. The intercept parameter :math:`N`
2. The slope parameter :math:`\lambda`
3. The shape parameter :math:`\alpha`
4. The parameter :math:`\nu`
"""
import numpy as np
import scipy as sp
from scipy.special import gamma
from artssat import dimensions as dim
from artssat.scattering.psd.arts.arts_psd import ArtsPSD
from artssat.scattering.psd.data.psd_data import PSDData, D_eq
class ModifiedGamma(ArtsPSD):
r"""
The :class:`ModifiedGamma` class describes the size distribution of
scattering particles in an atmosphere using the four parameters of the
particle size distribution.
"""
properties = [("intercept_parameter", (dim.p, dim.lat, dim.lon), np.ndarray),
("alpha", (dim.p, dim.lat, dim.lon), np.ndarray),
("lmbd", (dim.p, dim.lat, dim.lon), np.ndarray),
("nu", (dim.p, dim.lat, dim.lon), np.ndarray)]
def __init__(self,
size_parameter,
intercept_parameter = None,
alpha = None,
lmbd = None,
nu = None):
r"""
Create instance of the modified gamma distribution with given parameters.
If any of the parameters is neither provided and nor explicitly set
afterwards, it will be requested from the data provider. However, most
operations on PSDs will require the values to be set and can thus first
be performed when the object has access to the data.
Parameters:
size_parameter(SizeParameter): The SizeParameter instance describing
the size parameter that should be used fo PSD.
intercept_parameter(numpy.float or ndarray): The intercept parameter
:math:`N`
alpha(numpy.float or ndarray): The shape parameter :math:`\alpha`.
Must be broadcastable into the shape of N.
lmbd(numpy.float or ndarray): The slope parameter :math:`\lambda`.
Must be broadcastable into the shape of N.
nu(numpy.float or ndarray): The :math:`\nu` parameter. Must be
broadcastable into the shape of N.
"""
if not intercept_parameter is None:
self.intercept_parameter = intercept_parameter
shape = self.intercept_parameter.shape
if not alpha is None:
try:
self.alpha = np.broadcast_to(alpha, shape)
except:
raise Exception("Could not broadcast alpha parameter to shape "
"of intercept parameter.")
if not lmbd is None:
try:
self.lmbd = np.broadcast_to(lmbd, shape)
except:
raise Exception("Could not broadcast lambda parameter to shape "
" of intercept parameter.")
if not nu is None:
try:
self.nu = np.broadcast_to(nu, shape)
except:
raise Exception("Could not broadcast nu parameter to shape "
" of N parameter.")
super().__init__(size_parameter)
def _get_parameters(self):
"""
Checks if parameters of the PSD are available and tries to broadcast
them to the shape of the intercept parameter.
Returns:
:code:`tuple(n, alpha, lmbd, nu)` containing the four parameters of
the PSD.
Raises:
An exception if any of the MGD parameters is not set or cannot be
broadcasted.
"""
n = self.intercept_parameter
if n is None:
raise Exception("The intercept parameter needs to be set to use"
" this function.")
shape = n.shape
# Lambda parameter
lmbd = self.lmbd
if lmbd is None:
raise Exception("The lambda parameter needs to be set to use "
"this function")
try:
lmbd = np.broadcast_to(lmbd, shape)
except:
raise Exception("Could not broadcast lambda paramter to the shape"
"of the provided intercept parameter N.")
# Alpha parameter
alpha = self.alpha
if alpha is None:
raise Exception("The alpha parameter needs to be set to use "
"this function.")
try:
alpha = np.broadcast_to(alpha, shape)
except:
raise Exception("Could not broadcast alpha paramter to the shape"
"of the provided intercept parameter N.")
# Nu parameter
nu = self.nu
if nu is None:
raise Exception("The nu parameter needs to be set to use this"
"function.")
try:
nu = np.broadcast_to(nu, shape)
except:
raise Exception("Could not broadcast nu paramter to the shape"
"of the provided intercept parameter N.")
return n, lmbd, alpha, nu
@property
def moment_names(self):
r"""
The free parameters of the PSD.
"""
return []
def get_moment(self, p, reference_size_parameter = None):
r"""
Computes the :math:`p` th moment :math:`M(p)` of the PSD using
.. math::
M(p) = \frac{N}{\lambda} \frac{\Gamma (1 + \alpha + p / \nu )}
{\Gamma({1 + \alpha})}.
Parameters:
p(np.float): Which moment of the PSD to compute.
Raises:
Exception: If any of the parameters of the PSD is not set.
"""
if not reference_size_parameter is None:
a1 = self.size_parameter.a
b1 = self.size_parameter.b
a2 = reference_size_parameter.a
b2 = reference_size_parameter.b
c = (a1 / a2) ** (p / b2)
p = p * b1 / b2
else:
c = 1.0
n, lmbd, alpha, nu = self._get_parameters()
m = n / lmbd ** p
m *= gamma(1 + alpha + p / nu)
m /= gamma(1 + alpha)
return c * m
def get_mass_density(self):
r"""
Computes the mass density :math: `\rho_m` for the given bulk elements
using
.. math::
\rho_m = a \cdot M(b).
where :math:`a` and :math:`b` are the coefficients of the mass-size
relation of the size parameter.
Returns:
:code:`numpy.ndarray` containing the mass density corresponding
to each volume element described by the PSD.
"""
a = self.size_parameter.a
b = self.size_parameter.b
return a * self.get_moment(b)
@property
def pnd_call_agenda(self):
r"""
ARTS agenda that contains the call to the WSM that computed this PSD.
"""
n0 = np.nan
if not self.intercept_parameter is None \
and self.intercept_parameter.size == 1:
n0 = self.intercept_parameter[0]
mu = np.nan
if not self.mu is None \
and self.mu.size == 1:
mu = self.mu[0]
lmbd = np.nan
if not self.lmbd is None \
and self.lmbd.size == 1:
lmbd = self.lmbd[0]
nu = np.nan
if not self.nu is None \
and self.nu.size == 1:
nu = self.nu[0]
@arts_agenda
def pnd_call(ws):
ws.psdMgd(n0 = n0,
mu = mu,
la = lambd,
gam = nu,
t_min = self.t_min,
t_max = self.t_max)
return pnd_call
def evaluate(self, x):
r"""
Computes the values of this modified gamma distribution evaluated at
the given size grid :code:`x`.
Parameters:
x(numpy.array): Array containing the values of the size parameter
at which to evaluate the PSD.
Returns:
:class:`PSDData` object containing the numeric PSD data obtained
by evaluating this PSD at the given values of the size parameter.
"""
n, lmbd, alpha, nu = self._get_parameters()
shape = n.shape
n = n.reshape(shape + (1,))
lmbd = lmbd.reshape(shape + (1,))
alpha = alpha.reshape(shape + (1,))
nu = nu.reshape(shape + (1,))
print(n, lmbd, alpha, nu)
y = n * nu / gamma(1 + alpha)
y *= lmbd ** (nu * (1.0 + alpha))
y = y * x ** (nu * (1.0 + alpha) - 1) \
* np.exp(- (lmbd * x) ** nu)
return PSDData(x, y, self.size_parameter)
| [
"numpy.exp",
"artssat.scattering.psd.data.psd_data.PSDData",
"scipy.special.gamma",
"numpy.broadcast_to"
] | [((6439, 6464), 'scipy.special.gamma', 'gamma', (['(1 + alpha + p / nu)'], {}), '(1 + alpha + p / nu)\n', (6444, 6464), False, 'from scipy.special import gamma\n'), ((6478, 6494), 'scipy.special.gamma', 'gamma', (['(1 + alpha)'], {}), '(1 + alpha)\n', (6483, 6494), False, 'from scipy.special import gamma\n'), ((9014, 9048), 'artssat.scattering.psd.data.psd_data.PSDData', 'PSDData', (['x', 'y', 'self.size_parameter'], {}), '(x, y, self.size_parameter)\n', (9021, 9048), False, 'from artssat.scattering.psd.data.psd_data import PSDData, D_eq\n'), ((4383, 4411), 'numpy.broadcast_to', 'np.broadcast_to', (['lmbd', 'shape'], {}), '(lmbd, shape)\n', (4398, 4411), True, 'import numpy as np\n'), ((4811, 4840), 'numpy.broadcast_to', 'np.broadcast_to', (['alpha', 'shape'], {}), '(alpha, shape)\n', (4826, 4840), True, 'import numpy as np\n'), ((5221, 5247), 'numpy.broadcast_to', 'np.broadcast_to', (['nu', 'shape'], {}), '(nu, shape)\n', (5236, 5247), True, 'import numpy as np\n'), ((8850, 8866), 'scipy.special.gamma', 'gamma', (['(1 + alpha)'], {}), '(1 + alpha)\n', (8855, 8866), False, 'from scipy.special import gamma\n'), ((8972, 8997), 'numpy.exp', 'np.exp', (['(-(lmbd * x) ** nu)'], {}), '(-(lmbd * x) ** nu)\n', (8978, 8997), True, 'import numpy as np\n'), ((2774, 2803), 'numpy.broadcast_to', 'np.broadcast_to', (['alpha', 'shape'], {}), '(alpha, shape)\n', (2789, 2803), True, 'import numpy as np\n'), ((3038, 3066), 'numpy.broadcast_to', 'np.broadcast_to', (['lmbd', 'shape'], {}), '(lmbd, shape)\n', (3053, 3066), True, 'import numpy as np\n'), ((3299, 3325), 'numpy.broadcast_to', 'np.broadcast_to', (['nu', 'shape'], {}), '(nu, shape)\n', (3314, 3325), True, 'import numpy as np\n')] |
import numpy as np
# A 4x5 robot world of characters 'o' and 'b'
world = np.array([ ['o', 'b', 'o', 'o', 'b'],
['o', 'o', 'b', 'o', 'o'],
['b', 'o', 'o', 'b', 'o'],
['b', 'o', 'o', 'o', 'o'] ])
# Sensor measurement
measurement = ['b', 'o']
# This function takes in the world and the sensor measurement.
# Complete this function so that it returns the indices of the
# likely robot locations, based on matching the measurement
# with the color patterns in the world
def find_match(world, measurement):
# Empty possible_locations list
possible_locations = []
# Store the number of columns and rows in the 2D array
col = world.shape[1]
row = world.shape[0]
# Iterate through the entire array
for i in range(0, row):
for j in range (0, col):
# Check that we are within the bounds of the world,
# since we have to check two values, this means we're at
# a row index < the number of columns (5) - 1
# In other words j < 4
if j < col - 1:
# Check if a match is found by comparing array contents
# and checking for equality at world[i][j] and
# one row to the right at world[i][j+1]
# Values under and in front of the robot
under = world[i][j]
in_front = world[i][j+1]
if((measurement[0] == under) and (measurement[1] == in_front)):
# A match is found!
# Append the index that the robot is on
possible_locations.append([i,j])
# Return the completed list
return possible_locations
# This line runs the function and stores the output - do not delete
locations = find_match(world, measurement)
| [
"numpy.array"
] | [((74, 196), 'numpy.array', 'np.array', (["[['o', 'b', 'o', 'o', 'b'], ['o', 'o', 'b', 'o', 'o'], ['b', 'o', 'o', 'b',\n 'o'], ['b', 'o', 'o', 'o', 'o']]"], {}), "([['o', 'b', 'o', 'o', 'b'], ['o', 'o', 'b', 'o', 'o'], ['b', 'o',\n 'o', 'b', 'o'], ['b', 'o', 'o', 'o', 'o']])\n", (82, 196), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 19 14:55:02 2021
@author: <NAME>
Copyright 2021 <NAME>
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
EXAMPLE_DESCRIPTIVE_NAME = 'Tune conductivities to fit clinical LAT map'
EXAMPLE_AUTHOR = '<NAME> <<EMAIL>>'
import os
import sys
import vtk
from datetime import date
from carputils import settings
from carputils import tools
import numpy as np
from carputils.carpio import igb
from scipy.spatial import cKDTree
import csv
import random
from vtk.numpy_interface import dataset_adapter as dsa
import Methods_converge_to_lat
from sklearn.metrics import mean_squared_error
vtk_version = vtk.vtkVersion.GetVTKSourceVersion().split()[-1].split('.')[0]
def parser():
# Generate the standard command line parser
parser = tools.standard_parser()
# Add arguments
parser.add_argument('--giL', type=float, default=0.86, help='intracellular longitudinal conductivity to fit CV=0.714 m/s with dx=0.3 mm and dt=20')
parser.add_argument('--geL', type=float, default=3.10, help='extracellular longitudinal conductivity to fit CV=0.714 m/s with dx=0.3 mm and dt=20')
parser.add_argument('--model',
type=str,
default='COURTEMANCHE',
help='input ionic model')
parser.add_argument('--low_vol_thr',
type=float,
default=0.5,
help='bipolar voltage threshold to define low voltage region')
parser.add_argument('--low_CV_thr',
type=float,
default=300,
help='CV threshold to define low CV region in mm/s')
parser.add_argument('--LaAT',
type=float,
default=134,
help='wanted last activation')
parser.add_argument('--max_LAT_id',
type=int,
default=0,
help='wanted last activation')
parser.add_argument('--fib_perc',
type=float,
default=0.3,
help='bipolar voltage threshold to define low voltage region')
parser.add_argument('--tol',
type=float, default=1,
help='tolerance to optimize RMSE in [ms]')
parser.add_argument('--max_LAT_pt',
type=str, default='max',
help='meshname')
parser.add_argument('--step',
type=float,
default=20,
help='LAT band steps in ms')
parser.add_argument('--thr',
type=int,
default=4,
help='LAT band steps in ms')
parser.add_argument('--mesh',
type=str, default='',
help='meshname')
parser.add_argument('--fibrotic_tissue',
type=int,
default=1,
help='set 1 for mesh with fibrotic tissue, 0 otherwise')
parser.add_argument('--M_lump',
type=int,
default='1',
help='set 1 for mass lumping, 0 otherwise')
parser.add_argument('--meth',
type=int,
default=0,
help='0 only low voltage, 1 scale vec, 2 low CV as 0.3 m/s')
#----------------------------------------------------------------------------------------------------------------------------------------
parser.add_argument('--dt',
type=float, default=20.0,
help='[microsec]')
parser.add_argument('--bcl',
type=float, default=500.0,
help='initial basic cycle lenght in [ms]')
parser.add_argument('--beats-single-cell',
type = int,
default = 20,
help='Beats to prepace at single cell')
parser.add_argument('--prebeats',
type = int,
default = 4,
help='Number of beats to prepace the tissue')
#----------------------------------------------------------------------------------------------------------------------------------------
return parser
def jobID(args):
today = date.today()
ID = '/pfs/work7/workspace/scratch/yx5140-result-0/converge_band_{}_prebeats_{}_bcl_{}_fib_{}_max_LAT_pt_{}_voltage_{}_CV_{}_meth_{}_fib_p_{}_step_{}_thr_{}'.format(args.mesh,
args.prebeats, args.bcl, args.fibrotic_tissue, args.max_LAT_pt, args.low_vol_thr, args.low_CV_thr, args.meth, args.fib_perc, args.step, args.thr)
return ID
def single_cell_initialization(args,job, steady_state_dir, to_do):
g_CaL_reg = [0.45, 0.7515, 0.7515, 0.3015, 0.3015, 0.4770, 0.4770, 0.45, 0.3375]
g_K1_reg = [2, 2, 2, 2, 2, 2, 2, 2, 1.34]
blf_g_Kur_reg = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
g_to_reg = [0.35, 0.35, 0.35, 0.35, 0.35, 0.2380, 0.2380, 0.35, 0.2625]
g_Ks_reg = [2, 2, 2, 2, 2, 2, 2, 2, 3.74]
maxI_pCa_reg = [1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5]
maxI_NaCa_reg = [1.6, 1.6, 1.6, 1.6, 1.6, 1.6, 1.6, 1.6, 1.6]
g_Kr_reg = [1, 1, 1, 1.53, 2.44, 1, 1.6, 1.6, 2.4]
n_regions = len(g_CaL_reg)
duration = 20*1000
for k in range(n_regions):
init_file = steady_state_dir + '/init_values_stab_bcl_{}_reg_{}.sv'.format(1000, k)
cmd = [settings.execs.BENCH,
'--imp', args.model,
'--imp-par', 'g_CaL*{},g_K1*{},blf_i_Kur*{},g_to*{},g_Ks*{},maxI_pCa*{},maxI_NaCa*{},g_Kr*{}'.format(g_CaL_reg[k],g_K1_reg[k],blf_g_Kur_reg[k],g_to_reg[k],g_Ks_reg[k],maxI_pCa_reg[k],maxI_NaCa_reg[k],g_Kr_reg[k]),
'--bcl', 1000,
'--dt-out', duration,
'--stim-curr', 9.5,
'--stim-dur', 2,
'--numstim', 20,
'--duration', duration,
'--stim-start', 0,
'--dt', args.dt/1000,
'--fout=' + '{}/tVI_stabilization'.format(job.ID),
'-S', duration,
'-F', init_file,
'--no-trace', 'on']
if to_do:
job.bash(cmd)
if args.fibrotic_tissue == 1:
g_CaL_fib = [0.225]
g_Na_fib = [0.6]
blf_g_Kur_fib = [0.5]
g_to_fib = [0.35]
g_Ks_fib = [2]
maxI_pCa_fib = [1.5]
maxI_NaCa_fib = [1.6]
n_regions += len(g_CaL_fib)
for kk in range(len(g_CaL_fib)):
init_file = steady_state_dir + '/init_values_stab_bcl_{}_reg_{}.sv'.format(1000, k+1+kk)
cmd = [settings.execs.BENCH,
'--imp', args.model,
'--imp-par', 'g_CaL*{},g_Na*{},blf_i_Kur*{},g_to*{},g_Ks*{},maxI_pCa*{},maxI_NaCa*{}'.format(g_CaL_fib[kk],g_Na_fib[kk],blf_g_Kur_fib[kk],g_to_fib[kk],g_Ks_fib[kk],maxI_pCa_fib[kk],maxI_NaCa_fib[kk]),
'--bcl', 1000,
'--dt-out', duration,
'--stim-curr', 9.5,
'--stim-dur', 2,
'--numstim', 20,
'--duration', duration,
'--stim-start', 0,
'--dt', args.dt/1000,
'--fout=' + '{}/tVI_stabilization'.format(job.ID),
'-S', duration,
'-F', init_file,
'--no-trace', 'on']
if to_do:
job.bash(cmd)
tissue_init = []
for k in range(n_regions):
init_file = steady_state_dir + '/init_values_stab_bcl_{}_reg_{}.sv'.format(1000, k)
tissue_init += ['-imp_region[{}].im_sv_init'.format(k), init_file]
return tissue_init
def remove_trash2(simid):
for f in os.listdir(simid):
if f.startswith("init_") or f.startswith("Trace_"):
if os.path.isfile(os.path.join(simid,f)):
os.remove(os.path.join(simid,f))
def tagregopt( reg, field, val ) :
return ['-tagreg['+str(reg)+'].'+field, val ]
def tri_centroid(nodes, element):
x1 = nodes[element[0],0]
x2 = nodes[element[1],0]
x3 = nodes[element[2],0]
y1 = nodes[element[0],1]
y2 = nodes[element[1],1]
y3 = nodes[element[2],1]
z1 = nodes[element[0],2]
z2 = nodes[element[1],2]
z3 = nodes[element[2],2]
return [(x1+x2+x3)/3, (y1+y2+y3)/3, (z1+z2+z3)/3]
@tools.carpexample(parser, jobID)
def run(args, job):
# Polyfit of the CVs
# p = np.poly1d([0.67278584, 0.17556362, 0.01718574])
meshname = '/home/kit/ibt/yx5140/meshes/Jadidi/{}/LA_bilayer_with_fiber'.format(args.mesh)
meshfold = '/home/kit/ibt/yx5140/meshes/Jadidi/{}'.format(args.mesh)
meshbasename = meshname.split('/')[-1]
steady_state_dir = '/home/kit/ibt/yx5140/cell_state'
try:
os.makedirs(steady_state_dir)
except OSError:
print ("Creation of the directory %s failed" % steady_state_dir)
else:
print ("Successfully created the directory %s " % steady_state_dir)
if not os.path.isfile(steady_state_dir + '/init_values_stab_bcl_{}_reg_{}.sv'.format(1000, 0)):
tissue_init = single_cell_initialization(args,job,steady_state_dir, 1)
else:
tissue_init = single_cell_initialization(args,job,steady_state_dir, 0)
simid = job.ID
try:
os.makedirs(simid)
except OSError:
print ("Creation of the directory %s failed" % simid)
else:
print ("Successfully created the directory %s " % simid)
bilayer_n_cells, elements_in_fibrotic_reg, endo, endo_ids, centroids, LAT_map, min_LAT, el_to_clean, el_border, stim_pt, fit_LAT, healthy_endo = Methods_converge_to_lat.low_vol_LAT(args, meshname+'_with_data.vtk')
with open(meshfold+"/clinical_stim_pt.txt","w") as f:
f.write("{} {} {}".format(stim_pt[0],stim_pt[1],stim_pt[2]))
# Set to 1 every LAT <= 1
LAT_map = np.where(LAT_map<=1, 1, LAT_map)
args.LaAT = args.LaAT - min_LAT
# Set to max_LAT every LAT >= max_LAT
LAT_map = np.where(LAT_map>args.LaAT, args.LaAT, LAT_map)
print("Wanted LAT: {}".format(args.LaAT))
print("Max LAT point id: {}".format(args.max_LAT_id))
print(fit_LAT)
# Find all not conductive elements belonging to the fibrotic tissue and not use them in the fitting
tag = {}
elems_not_conductive = np.loadtxt(meshfold+'/elems_slow_conductive.regele', skiprows=1, dtype=int)
endo_etag = vtk.util.numpy_support.vtk_to_numpy(endo.GetCellData().GetArray('elemTag'))
elems_not_conductive = elems_not_conductive[np.where(elems_not_conductive<len(endo_etag))]
endo_etag[elems_not_conductive] = 103
# Save endocardium mesh in carp format
meshNew = dsa.WrapDataObject(endo)
meshNew.CellData.append(endo_etag, "elemTag")
writer = vtk.vtkUnstructuredGridWriter()
writer.SetFileName(meshfold+"/LA_endo_with_fiber_30.vtk")
writer.SetInputData(meshNew.VTKObject)
writer.SetFileTypeToBinary()
writer.Write()
pts = vtk.util.numpy_support.vtk_to_numpy(meshNew.VTKObject.GetPoints().GetData())
with open(meshfold+"/LA_endo_with_fiber_30.pts","w") as f:
f.write("{}\n".format(len(pts)))
for i in range(len(pts)):
f.write("{} {} {}\n".format(pts[i][0], pts[i][1], pts[i][2]))
with open(meshfold+"/LA_endo_with_fiber_30.elem","w") as f:
f.write("{}\n".format(meshNew.VTKObject.GetNumberOfCells()))
for i in range(meshNew.VTKObject.GetNumberOfCells()):
cell = meshNew.VTKObject.GetCell(i)
f.write("Tr {} {} {} {}\n".format(cell.GetPointIds().GetId(0), cell.GetPointIds().GetId(1), cell.GetPointIds().GetId(2), endo_etag[i]))
el_epi = vtk.util.numpy_support.vtk_to_numpy(meshNew.VTKObject.GetCellData().GetArray('fiber'))
sheet_epi = vtk.util.numpy_support.vtk_to_numpy(meshNew.VTKObject.GetCellData().GetArray('sheet'))
with open(meshfold+"/LA_endo_with_fiber_30.lon","w") as f:
f.write("2\n")
for i in range(len(el_epi)):
f.write("{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}\n".format(el_epi[i][0], el_epi[i][1], el_epi[i][2], sheet_epi[i][0], sheet_epi[i][1], sheet_epi[i][2]))
meshname = meshfold +'/{}'.format(meshbasename)+'_{}'.format(args.fib_perc)
meshname_e = meshfold+"/LA_endo_with_fiber_30"
new_endo = Methods_converge_to_lat.smart_reader(meshname_e+'.vtk')
cellid = vtk.vtkIdFilter()
cellid.CellIdsOn()
cellid.SetInputData(new_endo)
cellid.PointIdsOn()
cellid.FieldDataOn()
if int(vtk_version) >= 9:
cellid.SetPointIdsArrayName('Global_ids')
cellid.SetCellIdsArrayName('Global_ids')
else:
cellid.SetIdsArrayName('Global_ids')
cellid.Update()
new_endo = cellid.GetOutput()
with open('element_tag.csv') as f:
reader = csv.DictReader(f)
for row in reader:
tag[row['name']] = int(row['tag'])
reg_0 = [tag['sinus_node'], 54, 65, 57, 67] # SN, ICV, CS
reg_1 = [1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 17, 51, 52, 55, 56, 58, 61, 62, 63, 66, 68, 84, 86, 88]
reg_2 = tag['crista_terminalis'] # CT
reg_3 = tag['pectinate_muscle'] # PM
reg_4 = [tag['bachmann_bundel_left'], tag['bachmann_bundel_right'], tag['bachmann_bundel_internal']] # BB
if args.fibrotic_tissue == 1:
reg_1 = [1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 17, 51, 52, 55, 56, 58, 61, 62, 63, 66, 68, 84, 86, 88, 101, 102]
lat = ['-num_LATs', 1,
'-lats[0].ID', 'ACTs',
'-lats[0].all', 0,
'-lats[0].measurand', 0,
'-lats[0].mode', 0,
'-lats[0].threshold', -50]
slow_CV = np.ones((len(endo_ids),))
slow_CV_old = np.ones((len(endo_ids),))
f = open(simid + '/low_CV.dat','w')
for i in slow_CV:
f.write("{:.4f}\n".format(i))
f.close()
f = open(simid + '/low_CV_old.dat','w')
for i in slow_CV:
f.write("{:.4f}\n".format(i))
f.close()
final_diff = []
old_cells = np.array([],dtype=int)
lats_to_fit = np.array([])
active_cells_old = np.array([],dtype=int)
active_cells_band = np.array([],dtype=int)
for l in range(len(fit_LAT)):
RMSE = 500
err = RMSE
it = 1
while RMSE>args.tol:
cmd = tools.carp_cmd('stimulation.par')
tissue_0 = ['-num_gregions', 6,
'-gregion[0].num_IDs', len(reg_0),
'-gregion[0].g_il', args.giL,
'-gregion[0].g_it', args.giL,
'-gregion[0].g_in', args.giL,
'-gregion[0].g_el', args.geL,
'-gregion[0].g_et ', args.geL,
'-gregion[0].g_en ', args.geL]
tissue_1 = ['-gregion[1].num_IDs ', len(reg_1),
'-gregion[1].g_il ', args.giL,
'-gregion[1].g_it ', args.giL/(3.75*0.62),
'-gregion[1].g_in ', args.giL/(3.75*0.62),
'-gregion[1].g_el ', args.geL,
'-gregion[1].g_et ', args.geL/(3.75*0.62),
'-gregion[1].g_en ', args.geL/(3.75*0.62)]
if args.fibrotic_tissue == 1:
tissue_0 = ['-num_gregions', 8,
'-gregion[0].num_IDs', len(reg_0),
'-gregion[0].g_il', args.giL,
'-gregion[0].g_it', args.giL,
'-gregion[0].g_in', args.giL,
'-gregion[0].g_el', args.geL,
'-gregion[0].g_et ', args.geL,
'-gregion[0].g_en ', args.geL]
tissue_1 = ['-gregion[1].num_IDs ', len(reg_1),
'-gregion[1].g_il ', args.giL,
'-gregion[1].g_it ', args.giL/(3.75*0.62),
'-gregion[1].g_in ', args.giL/(3.75*0.62),
'-gregion[1].g_el ', args.geL,
'-gregion[1].g_et ', args.geL/(3.75*0.62),
'-gregion[1].g_en ', args.geL/(3.75*0.62)]
for i in range(len(reg_0)):
tissue_0 += ['-gregion[0].ID[' + str(i) + ']', reg_0[i],]
for i in range(len(reg_1)):
tissue_1 += ['-gregion[1].ID[' + str(i) + ']', reg_1[i],]
CT_gi = args.giL*2
CT_ge = args.geL*2
tissue_2 = ['-gregion[2].num_IDs ', 1,
'-gregion[2].ID ', reg_2,
'-gregion[2].g_il ', CT_gi,
'-gregion[2].g_it ', CT_gi/(6.562*0.62),
'-gregion[2].g_in ', CT_gi/(6.562*0.62),
'-gregion[2].g_el ', CT_ge,
'-gregion[2].g_et ', CT_ge/(6.562*0.62),
'-gregion[2].g_en ', CT_ge/(6.562*0.62)]
PM_gi = args.giL*2
PM_ge = args.geL*2
tissue_3 = ['-gregion[3].num_IDs ', 1,
'-gregion[3].ID ', reg_3,
'-gregion[3].g_il ', PM_gi,
'-gregion[3].g_it ', PM_gi/(10.52*0.62),
'-gregion[3].g_in ', PM_gi/(10.52*0.62),
'-gregion[3].g_el ', PM_ge,
'-gregion[3].g_et ', PM_ge/(10.52*0.62),
'-gregion[3].g_en ', PM_ge/(10.52*0.62)]
BB_gi = args.giL*3
BB_ge = args.geL*3
tissue_4 = ['-gregion[4].num_IDs ', 3,
'-gregion[4].g_il ', BB_gi,
'-gregion[4].g_it ', BB_gi/(9*0.62),
'-gregion[4].g_in ', BB_gi/(9*0.62),
'-gregion[4].g_el ', BB_ge,
'-gregion[4].g_et ', BB_ge/(9*0.62),
'-gregion[4].g_en ', BB_ge/(9*0.62)]
for i in range(len(reg_4)):
tissue_4 += ['-gregion[4].ID[' + str(i) + ']', reg_4[i],]
sigma_L = 2.1
bilayer = ['-gregion[5].num_IDs ', 1,
'-gregion[5].ID ', 100,
'-gregion[5].g_il ', sigma_L,
'-gregion[5].g_it ', sigma_L,
'-gregion[5].g_in ', sigma_L,
'-gregion[5].g_el ', sigma_L,
'-gregion[5].g_et ', sigma_L,
'-gregion[5].g_en ', sigma_L]
if it == 1:
g_scale = ['-ge_scale_vec', simid+'/conductivity_map_old.dat',
'-gi_scale_vec', simid+'/conductivity_map_old.dat']
else:
g_scale = ['-ge_scale_vec', simid+'/conductivity_map.dat',
'-gi_scale_vec', simid+'/conductivity_map.dat']
# Set different tissue properties
cmd += tissue_0
cmd += tissue_1
cmd += tissue_2
cmd += tissue_3
cmd += tissue_4 + bilayer + g_scale
if args.fibrotic_tissue == 1:
fib_reg = [103]
sigma = 0.000001
fibrotic_tissue = ['-gregion[6].num_IDs ', 1, #tianbao
'-gregion[6].ID ', 103,
'-gregion[6].g_il ', sigma,
'-gregion[6].g_it ', sigma,
'-gregion[6].g_in ', sigma,
'-gregion[6].g_el ', sigma,
'-gregion[6].g_et ', sigma,
'-gregion[6].g_en ', sigma]
cmd += fibrotic_tissue
writestatef = 'state'
tsav_state = fit_LAT[l]
# Setting the stimulus at the sinus node
prepace = ['-num_stim', 1,
'-write_statef', writestatef,
'-num_tsav', 1,
'-tsav[0]', tsav_state,
'-stimulus[0].stimtype', 0,
'-stimulus[0].strength', 30.0,
'-stimulus[0].duration', 2.0,
'-stimulus[0].npls', 1,
'-stimulus[0].ctr_def', 1,
'-stimulus[0].x0', stim_pt[0],
'-stimulus[0].xd', 3000,
'-stimulus[0].y0', stim_pt[1],
'-stimulus[0].yd', 3000,
'-stimulus[0].z0', stim_pt[2],
'-stimulus[0].zd', 3000]
cmd += lat
cmd += tissue_init + prepace
cmd += ['-simID', simid,
'-dt', 20,
'-spacedt', 1,
'-mass_lumping', args.M_lump,
'-timedt', 10,
'-tend', tsav_state+0.1,
'-meshname', meshname_e]
#Run simulation
remove_trash2(simid)
job.carp(cmd)
# Read simulated LAT map
lats = np.loadtxt(simid + '/init_acts_ACTs-thresh.dat')
meshNew = dsa.WrapDataObject(new_endo)
# Convert point to cell data
meshNew.PointData.append(lats, "lat_s")
pt_cell = vtk.vtkPointDataToCellData()
pt_cell.SetInputData(meshNew.VTKObject)
pt_cell.AddPointDataArray("lat_s")
pt_cell.PassPointDataOn()
pt_cell.CategoricalDataOff()
pt_cell.ProcessAllArraysOff()
pt_cell.Update()
model = pt_cell.GetOutput()
meshNew = dsa.WrapDataObject(model)
# Extract all not fibrotic tissue (103 is not conductive)
healthy_endo = Methods_converge_to_lat.vtk_thr(model,1,"CELLS","elemTag",102)
# Extract all cells which are activated
active = Methods_converge_to_lat.vtk_thr(healthy_endo,0,"POINTS","lat_s",0)
active_cells = vtk.util.numpy_support.vtk_to_numpy(active.GetCellData().GetArray('Global_ids')).astype(int)
print("active_cells: {}".format(len(active_cells)))
act_cls_old = np.zeros((model.GetNumberOfCells(),))
act_cls = np.zeros((model.GetNumberOfCells(),))
meshNew.CellData.append(act_cls, "act_cls")
meshNew.CellData.append(act_cls_old, "act_cls_old")
active_cells_old = np.array(active_cells_band, dtype=int)
# Remove from fitting all the cells which were fitted in the previous step
active_cells_band = np.setdiff1d(active_cells, old_cells)
act_cls_old[active_cells_old] = 1
act_cls[active_cells] = 1
lats_to_fit_old = np.array(lats_to_fit)
lats_to_fit = vtk.util.numpy_support.vtk_to_numpy(model.GetCellData().GetArray('lat_s'))
if len(lats_to_fit_old)>0:
meshNew.CellData.append(lats_to_fit_old, "LATs_old")
meshNew.CellData.append(LAT_map, "LAT_to_clean")
# Find all active areas (border = 2 and core = 1) marked as wrong annotation, we give to the core the mean of the active border
active_to_interpolate = []
active_border = []
idss = np.zeros((model.GetNumberOfCells(),))
l_idss = np.zeros((model.GetNumberOfCells(),))
for k in range(len(el_to_clean)):
idss[el_to_clean[k]] = 1
idss[el_border[k]] = 2
current_active_to_interp = np.setdiff1d(np.intersect1d(el_to_clean[k],active_cells_band),old_cells)
if len(current_active_to_interp>0):
active_to_interpolate.append(current_active_to_interp)
active_border.append(np.setdiff1d(np.intersect1d(el_border[k], active_cells_band),old_cells))
l_idss[current_active_to_interp] = 1
l_idss[np.setdiff1d(np.intersect1d(el_border[k], active_cells_band),old_cells)] = 2
meshNew.CellData.append(idss, "idss")
meshNew.CellData.append(l_idss, "l_idss")
last_ACT = np.mean(lats_to_fit[active_cells_band])
print("ACT to fit: {}".format(fit_LAT[l]))
print("last ACT: {}".format(last_ACT))
print("old_cells: {}".format(len(old_cells)))
# Compute RMSE between simulated and clinical LAT excluding elements to clean (marked as wrong annotation)
if len(lats_to_fit[active_cells_band])>0:
if len(active_border)>0:
print("Active border")
current_active_to_interp = np.array([], dtype=int)
for k in range(len(active_to_interpolate)):
current_active_to_interp = np.union1d(current_active_to_interp, active_to_interpolate[k])
active_cleaned_cells = np.setdiff1d(active_cells_band, current_active_to_interp)
RMSE = mean_squared_error(LAT_map[active_cleaned_cells], lats_to_fit[active_cleaned_cells], squared=False)
else:
RMSE = mean_squared_error(LAT_map[active_cells_band], lats_to_fit[active_cells_band], squared=False)
print("RMSE: ",RMSE)
print("err: ", err)
if RMSE>args.tol and RMSE + args.tol*0.25 < err: # Stopping criteria: RMSE< tol or new RMSE + 0.25*tol > old RMSE
meshNew.CellData.append(slow_CV_old, "slow_CV_old")
slow_CV_old[:] = slow_CV[:]
active_cells_old_old = np.array(active_cells_old, dtype=int)
if len(active_border)>0: # Elements to clean
# For each area to clean, give to the active core the mean of conductivity of the active border
slow_CV[active_cleaned_cells] = slow_CV[active_cleaned_cells]*((lats_to_fit[active_cleaned_cells]/(LAT_map[active_cleaned_cells]))**2)
for k in range(len(active_to_interpolate)):
if len(active_border[k])>0:
slow_CV[active_to_interpolate[k]] = np.mean(slow_CV[active_border[k]])
else: # No elements to clean
# sigma_new = sigma_old*(lat_simulated/lat_clinical)^2 for sigma = CV^2 see https://opencarp.org/documentation/examples/02_ep_tissue/03a_study_prep_tunecv
slow_CV[active_cells_band] = slow_CV[active_cells_band]*((lats_to_fit[active_cells_band]/(LAT_map[active_cells_band]))**2)
slow_CV = np.where(slow_CV>3.5, 3.5, slow_CV) # Set an upper bound in CV of 2.15 m/s
slow_CV = np.where(slow_CV<0.15, 0.15, slow_CV) # Set a lower bound in CV of 0.35 m/s
meshNew.CellData.append(slow_CV, "slow_CV")
writer = vtk.vtkUnstructuredGridWriter()
writer.SetFileName(job.ID+"/endo_cleaned_{}.vtk".format(l))
writer.SetInputData(meshNew.VTKObject)
writer.SetFileTypeToBinary()
writer.Write()
LAT_diff = RMSE
os.rename(simid + '/low_CV.dat',simid + '/low_CV_old.dat')
f = open(simid + '/low_CV.dat','w')
for i in slow_CV:
f.write("{:.4f}\n".format(i))
f.close()
it +=1
else:
old_cells = np.union1d(old_cells, active_cells_old_old)
slow_CV[:] = slow_CV_old[:]
LATs_diff = np.zeros((model.GetNumberOfCells(),))
LATs_diff[old_cells] = lats_to_fit_old[old_cells]-LAT_map[old_cells]
meshNew.CellData.append(slow_CV, "slow_CV")
meshNew.CellData.append(LATs_diff, "LATs_diff")
meshNew.CellData.append(slow_CV_old, "slow_CV_old")
final_diff.append(LAT_diff)
writer = vtk.vtkUnstructuredGridWriter()
writer.SetFileName(job.ID+"/endo_cleaned_{}.vtk".format(l))
writer.SetInputData(meshNew.VTKObject)
writer.SetFileTypeToBinary()
writer.Write()
break
err = RMSE
cmd = tools.carp_cmd('stimulation.par')
g_scale = ['-ge_scale_vec', simid+'/low_CV_old.dat',
'-gi_scale_vec', simid+'/low_CV_old.dat']
# Set different tissue properties
cmd += tissue_0
cmd += tissue_1
cmd += tissue_2
cmd += tissue_3
cmd += tissue_4 + bilayer + g_scale
cmd += fibrotic_tissue
cmd += lat
# Setting the stimulus at the sinus node
prepace = ['-num_stim', 1,
'-write_statef', writestatef,
'-num_tsav', 1,
'-tsav[0]', tsav_state,
'-stimulus[0].stimtype', 0,
'-stimulus[0].strength', 30.0,
'-stimulus[0].duration', 2.0,
'-stimulus[0].npls', 1,
'-stimulus[0].ctr_def', 1,
'-stimulus[0].x0', stim_pt[0],
'-stimulus[0].xd', 3000,
'-stimulus[0].y0', stim_pt[1],
'-stimulus[0].yd', 3000,
'-stimulus[0].z0', stim_pt[2],
'-stimulus[0].zd', 3000]
cmd += tissue_init + prepace
cmd += ['-simID', simid,
'-dt', 20,
'-spacedt', 1,
'-mass_lumping', args.M_lump,
'-timedt', 10,
'-num_tsav', 1,
'-tsav[0]', tsav_state,
'-tend', tsav_state+2*args.step+0.1,
'-meshname', meshname_e]
#Run simulation
remove_trash2(simid)
job.carp(cmd)
model_cleaned = Methods_converge_to_lat.vtk_thr(meshNew.VTKObject, 2, "CELLS", "idss", 0,0)
cleaned_ids = vtk.util.numpy_support.vtk_to_numpy(model_cleaned.GetPointData().GetArray('Global_ids')).astype(int)
lats = np.loadtxt(simid + '/init_acts_ACTs-thresh.dat')
lats_to_fit = vtk.util.numpy_support.vtk_to_numpy(model.GetPointData().GetArray('lat')) - min_LAT
RMSE = mean_squared_error(lats[cleaned_ids], lats_to_fit[cleaned_ids], squared=False)
final_diff.append(RMSE)
print(RMSE)
print("Final last ACT: {}".format(last_ACT))
print("Final giL: {}".format(args.giL))
print("Final geL: {}".format(args.geL))
f = open(job.ID + '/err.dat','w')
for i in final_diff:
f.write("{:.4f}\n".format(i))
f.close()
if os.path.exists('RMSE_patients.txt'):
append_write = 'a' # append if already exists
else:
append_write = 'w' # make a new file if not
f=open('RMSE_patients.txt', append_write)
f.write("{} {} {} {:.2f}\n".format(args.mesh, args.step, args.thr, RMSE))
f.close()
slow_CV = np.loadtxt(simid+'/low_CV_old.dat')
slow_CV_bil = np.ones((bilayer_n_cells,))
slow_CV_bil[endo_ids] = slow_CV
slow_CV_bil[endo_ids+len(endo_ids)] = slow_CV
f = open(meshfold + '/low_CV_3_{}_{}.dat'.format(args.step,args.thr),'w')
for i in slow_CV_bil:
f.write("{:.4f}\n".format(i))
f.close()
meshNew = dsa.WrapDataObject(new_endo)
meshNew.PointData.append(lats, "lat_s")
pt_cell = vtk.vtkPointDataToCellData()
pt_cell.SetInputData(meshNew.VTKObject)
pt_cell.AddPointDataArray("lat_s")
pt_cell.PassPointDataOn()
pt_cell.CategoricalDataOff()
pt_cell.ProcessAllArraysOff()
pt_cell.Update()
meshNew.CellData.append(LAT_map, "LAT_to_clean")
LATs_diff = vtk.util.numpy_support.vtk_to_numpy(pt_cell.GetOutput().GetCellData().GetArray('lat_s'))-LAT_map
meshNew.CellData.append(slow_CV, "slow_CV")
meshNew.CellData.append(LATs_diff, "LATs_diff")
writer = vtk.vtkUnstructuredGridWriter()
writer.SetFileName(job.ID+"/endo_final.vtk".format(l))
writer.SetInputData(meshNew.VTKObject)
writer.SetFileTypeToBinary()
writer.Write()
if __name__ == '__main__':
run()
| [
"Methods_converge_to_lat.smart_reader",
"csv.DictReader",
"carputils.tools.carp_cmd",
"numpy.union1d",
"Methods_converge_to_lat.vtk_thr",
"carputils.tools.standard_parser",
"numpy.array",
"vtk.vtkIdFilter",
"vtk.vtkVersion.GetVTKSourceVersion",
"os.path.exists",
"numpy.mean",
"os.listdir",
"... | [((9440, 9472), 'carputils.tools.carpexample', 'tools.carpexample', (['parser', 'jobID'], {}), '(parser, jobID)\n', (9457, 9472), False, 'from carputils import tools\n'), ((1518, 1541), 'carputils.tools.standard_parser', 'tools.standard_parser', ([], {}), '()\n', (1539, 1541), False, 'from carputils import tools\n'), ((5194, 5206), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5204, 5206), False, 'from datetime import date\n'), ((8793, 8810), 'os.listdir', 'os.listdir', (['simid'], {}), '(simid)\n', (8803, 8810), False, 'import os\n'), ((10722, 10792), 'Methods_converge_to_lat.low_vol_LAT', 'Methods_converge_to_lat.low_vol_LAT', (['args', "(meshname + '_with_data.vtk')"], {}), "(args, meshname + '_with_data.vtk')\n", (10757, 10792), False, 'import Methods_converge_to_lat\n'), ((10968, 11002), 'numpy.where', 'np.where', (['(LAT_map <= 1)', '(1)', 'LAT_map'], {}), '(LAT_map <= 1, 1, LAT_map)\n', (10976, 11002), True, 'import numpy as np\n'), ((11095, 11144), 'numpy.where', 'np.where', (['(LAT_map > args.LaAT)', 'args.LaAT', 'LAT_map'], {}), '(LAT_map > args.LaAT, args.LaAT, LAT_map)\n', (11103, 11144), True, 'import numpy as np\n'), ((11417, 11494), 'numpy.loadtxt', 'np.loadtxt', (["(meshfold + '/elems_slow_conductive.regele')"], {'skiprows': '(1)', 'dtype': 'int'}), "(meshfold + '/elems_slow_conductive.regele', skiprows=1, dtype=int)\n", (11427, 11494), True, 'import numpy as np\n'), ((11784, 11808), 'vtk.numpy_interface.dataset_adapter.WrapDataObject', 'dsa.WrapDataObject', (['endo'], {}), '(endo)\n', (11802, 11808), True, 'from vtk.numpy_interface import dataset_adapter as dsa\n'), ((11873, 11904), 'vtk.vtkUnstructuredGridWriter', 'vtk.vtkUnstructuredGridWriter', ([], {}), '()\n', (11902, 11904), False, 'import vtk\n'), ((13430, 13487), 'Methods_converge_to_lat.smart_reader', 'Methods_converge_to_lat.smart_reader', (["(meshname_e + '.vtk')"], {}), "(meshname_e + '.vtk')\n", (13466, 13487), False, 'import Methods_converge_to_lat\n'), ((13499, 13516), 'vtk.vtkIdFilter', 'vtk.vtkIdFilter', ([], {}), '()\n', (13514, 13516), False, 'import vtk\n'), ((15153, 15176), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (15161, 15176), True, 'import numpy as np\n'), ((15194, 15206), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15202, 15206), True, 'import numpy as np\n'), ((15231, 15254), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (15239, 15254), True, 'import numpy as np\n'), ((15278, 15301), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (15286, 15301), True, 'import numpy as np\n'), ((29408, 29441), 'carputils.tools.carp_cmd', 'tools.carp_cmd', (['"""stimulation.par"""'], {}), "('stimulation.par')\n", (29422, 29441), False, 'from carputils import tools\n'), ((30895, 30971), 'Methods_converge_to_lat.vtk_thr', 'Methods_converge_to_lat.vtk_thr', (['meshNew.VTKObject', '(2)', '"""CELLS"""', '"""idss"""', '(0)', '(0)'], {}), "(meshNew.VTKObject, 2, 'CELLS', 'idss', 0, 0)\n", (30926, 30971), False, 'import Methods_converge_to_lat\n'), ((31103, 31151), 'numpy.loadtxt', 'np.loadtxt', (["(simid + '/init_acts_ACTs-thresh.dat')"], {}), "(simid + '/init_acts_ACTs-thresh.dat')\n", (31113, 31151), True, 'import numpy as np\n'), ((31267, 31345), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['lats[cleaned_ids]', 'lats_to_fit[cleaned_ids]'], {'squared': '(False)'}), '(lats[cleaned_ids], lats_to_fit[cleaned_ids], squared=False)\n', (31285, 31345), False, 'from sklearn.metrics import mean_squared_error\n'), ((31653, 31688), 'os.path.exists', 'os.path.exists', (['"""RMSE_patients.txt"""'], {}), "('RMSE_patients.txt')\n", (31667, 31688), False, 'import os\n'), ((31959, 31996), 'numpy.loadtxt', 'np.loadtxt', (["(simid + '/low_CV_old.dat')"], {}), "(simid + '/low_CV_old.dat')\n", (31969, 31996), True, 'import numpy as np\n'), ((32013, 32040), 'numpy.ones', 'np.ones', (['(bilayer_n_cells,)'], {}), '((bilayer_n_cells,))\n', (32020, 32040), True, 'import numpy as np\n'), ((32299, 32327), 'vtk.numpy_interface.dataset_adapter.WrapDataObject', 'dsa.WrapDataObject', (['new_endo'], {}), '(new_endo)\n', (32317, 32327), True, 'from vtk.numpy_interface import dataset_adapter as dsa\n'), ((32386, 32414), 'vtk.vtkPointDataToCellData', 'vtk.vtkPointDataToCellData', ([], {}), '()\n', (32412, 32414), False, 'import vtk\n'), ((32897, 32928), 'vtk.vtkUnstructuredGridWriter', 'vtk.vtkUnstructuredGridWriter', ([], {}), '()\n', (32926, 32928), False, 'import vtk\n'), ((9876, 9905), 'os.makedirs', 'os.makedirs', (['steady_state_dir'], {}), '(steady_state_dir)\n', (9887, 9905), False, 'import os\n'), ((10396, 10414), 'os.makedirs', 'os.makedirs', (['simid'], {}), '(simid)\n', (10407, 10414), False, 'import os\n'), ((13918, 13935), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (13932, 13935), False, 'import csv\n'), ((15440, 15473), 'carputils.tools.carp_cmd', 'tools.carp_cmd', (['"""stimulation.par"""'], {}), "('stimulation.par')\n", (15454, 15473), False, 'from carputils import tools\n'), ((22225, 22273), 'numpy.loadtxt', 'np.loadtxt', (["(simid + '/init_acts_ACTs-thresh.dat')"], {}), "(simid + '/init_acts_ACTs-thresh.dat')\n", (22235, 22273), True, 'import numpy as np\n'), ((22296, 22324), 'vtk.numpy_interface.dataset_adapter.WrapDataObject', 'dsa.WrapDataObject', (['new_endo'], {}), '(new_endo)\n', (22314, 22324), True, 'from vtk.numpy_interface import dataset_adapter as dsa\n'), ((22440, 22468), 'vtk.vtkPointDataToCellData', 'vtk.vtkPointDataToCellData', ([], {}), '()\n', (22466, 22468), False, 'import vtk\n'), ((22789, 22814), 'vtk.numpy_interface.dataset_adapter.WrapDataObject', 'dsa.WrapDataObject', (['model'], {}), '(model)\n', (22807, 22814), True, 'from vtk.numpy_interface import dataset_adapter as dsa\n'), ((22912, 22978), 'Methods_converge_to_lat.vtk_thr', 'Methods_converge_to_lat.vtk_thr', (['model', '(1)', '"""CELLS"""', '"""elemTag"""', '(102)'], {}), "(model, 1, 'CELLS', 'elemTag', 102)\n", (22943, 22978), False, 'import Methods_converge_to_lat\n'), ((23048, 23118), 'Methods_converge_to_lat.vtk_thr', 'Methods_converge_to_lat.vtk_thr', (['healthy_endo', '(0)', '"""POINTS"""', '"""lat_s"""', '(0)'], {}), "(healthy_endo, 0, 'POINTS', 'lat_s', 0)\n", (23079, 23118), False, 'import Methods_converge_to_lat\n'), ((23575, 23613), 'numpy.array', 'np.array', (['active_cells_band'], {'dtype': 'int'}), '(active_cells_band, dtype=int)\n', (23583, 23613), True, 'import numpy as np\n'), ((23734, 23771), 'numpy.setdiff1d', 'np.setdiff1d', (['active_cells', 'old_cells'], {}), '(active_cells, old_cells)\n', (23746, 23771), True, 'import numpy as np\n'), ((23887, 23908), 'numpy.array', 'np.array', (['lats_to_fit'], {}), '(lats_to_fit)\n', (23895, 23908), True, 'import numpy as np\n'), ((25316, 25355), 'numpy.mean', 'np.mean', (['lats_to_fit[active_cells_band]'], {}), '(lats_to_fit[active_cells_band])\n', (25323, 25355), True, 'import numpy as np\n'), ((8918, 8940), 'os.path.join', 'os.path.join', (['simid', 'f'], {}), '(simid, f)\n', (8930, 8940), False, 'import os\n'), ((26752, 26789), 'numpy.array', 'np.array', (['active_cells_old'], {'dtype': 'int'}), '(active_cells_old, dtype=int)\n', (26760, 26789), True, 'import numpy as np\n'), ((27744, 27781), 'numpy.where', 'np.where', (['(slow_CV > 3.5)', '(3.5)', 'slow_CV'], {}), '(slow_CV > 3.5, 3.5, slow_CV)\n', (27752, 27781), True, 'import numpy as np\n'), ((27847, 27886), 'numpy.where', 'np.where', (['(slow_CV < 0.15)', '(0.15)', 'slow_CV'], {}), '(slow_CV < 0.15, 0.15, slow_CV)\n', (27855, 27886), True, 'import numpy as np\n'), ((28026, 28057), 'vtk.vtkUnstructuredGridWriter', 'vtk.vtkUnstructuredGridWriter', ([], {}), '()\n', (28055, 28057), False, 'import vtk\n'), ((28313, 28372), 'os.rename', 'os.rename', (["(simid + '/low_CV.dat')", "(simid + '/low_CV_old.dat')"], {}), "(simid + '/low_CV.dat', simid + '/low_CV_old.dat')\n", (28322, 28372), False, 'import os\n'), ((28603, 28646), 'numpy.union1d', 'np.union1d', (['old_cells', 'active_cells_old_old'], {}), '(old_cells, active_cells_old_old)\n', (28613, 28646), True, 'import numpy as np\n'), ((29103, 29134), 'vtk.vtkUnstructuredGridWriter', 'vtk.vtkUnstructuredGridWriter', ([], {}), '()\n', (29132, 29134), False, 'import vtk\n'), ((8976, 8998), 'os.path.join', 'os.path.join', (['simid', 'f'], {}), '(simid, f)\n', (8988, 8998), False, 'import os\n'), ((24713, 24762), 'numpy.intersect1d', 'np.intersect1d', (['el_to_clean[k]', 'active_cells_band'], {}), '(el_to_clean[k], active_cells_band)\n', (24727, 24762), True, 'import numpy as np\n'), ((25827, 25850), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (25835, 25850), True, 'import numpy as np\n'), ((26072, 26129), 'numpy.setdiff1d', 'np.setdiff1d', (['active_cells_band', 'current_active_to_interp'], {}), '(active_cells_band, current_active_to_interp)\n', (26084, 26129), True, 'import numpy as np\n'), ((26157, 26261), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['LAT_map[active_cleaned_cells]', 'lats_to_fit[active_cleaned_cells]'], {'squared': '(False)'}), '(LAT_map[active_cleaned_cells], lats_to_fit[\n active_cleaned_cells], squared=False)\n', (26175, 26261), False, 'from sklearn.metrics import mean_squared_error\n'), ((26306, 26404), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['LAT_map[active_cells_band]', 'lats_to_fit[active_cells_band]'], {'squared': '(False)'}), '(LAT_map[active_cells_band], lats_to_fit[\n active_cells_band], squared=False)\n', (26324, 26404), False, 'from sklearn.metrics import mean_squared_error\n'), ((1379, 1415), 'vtk.vtkVersion.GetVTKSourceVersion', 'vtk.vtkVersion.GetVTKSourceVersion', ([], {}), '()\n', (1413, 1415), False, 'import vtk\n'), ((25966, 26028), 'numpy.union1d', 'np.union1d', (['current_active_to_interp', 'active_to_interpolate[k]'], {}), '(current_active_to_interp, active_to_interpolate[k])\n', (25976, 26028), True, 'import numpy as np\n'), ((24954, 25001), 'numpy.intersect1d', 'np.intersect1d', (['el_border[k]', 'active_cells_band'], {}), '(el_border[k], active_cells_band)\n', (24968, 25001), True, 'import numpy as np\n'), ((25111, 25158), 'numpy.intersect1d', 'np.intersect1d', (['el_border[k]', 'active_cells_band'], {}), '(el_border[k], active_cells_band)\n', (25125, 25158), True, 'import numpy as np\n'), ((27303, 27337), 'numpy.mean', 'np.mean', (['slow_CV[active_border[k]]'], {}), '(slow_CV[active_border[k]])\n', (27310, 27337), True, 'import numpy as np\n')] |
from __future__ import print_function
import importlib, inspect, os, sys
import numpy as np
from sklearn.datasets import make_classification, make_regression
from sklearn.metrics import accuracy_score, r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
import h2o
from h2o.sklearn import H2OAutoMLEstimator, H2OAutoMLClassifier, H2OAutoMLRegressor
from h2o.sklearn.wrapper import H2OConnectionMonitorMixin
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils, Namespace as ns
"""
This test suite creates sklearn pipelines using either a mix of sklearn+H2O components,
or only H2O components.
Then, it feeds them with H2O frames (more efficient and ensures compatibility with old API.)
or with numpy arrays to provide the simplest approach for users wanting to use H2O like any sklearn estimator.
"""
seed = 2019
init_connection_args = dict(strict_version_check=False, show_progress=True)
max_models = 3
scores = {}
def _get_data(format='numpy', n_classes=2):
generator = make_classification if n_classes > 0 else make_regression
params = dict(n_samples=100, n_features=5, n_informative=n_classes or 2, random_state=seed)
if generator is make_classification:
params.update(n_classes=n_classes)
X, y = generator(**params)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed)
data = ns(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
if format == 'h2o':
for k, v in data.__dict__.items():
setattr(data, k, h2o.H2OFrame(v))
return data
def test_binomial_classification_with_h2o_frames():
pipeline = make_pipeline(H2OAutoMLClassifier(seed=seed))
pipeline.set_params(
h2oautomlclassifier__max_models=max_models,
h2oautomlclassifier__nfolds=3
)
pipeline.named_steps.h2oautomlclassifier.exclude_algos = ['XGBoost']
data = _get_data(format='h2o', n_classes=2)
assert isinstance(data.X_train, h2o.H2OFrame)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlclassifier.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, h2o.H2OFrame)
assert preds.dim == [len(data.X_test), 1]
probs = pipeline.predict_proba(data.X_test)
assert probs.dim == [len(data.X_test), 2]
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test.as_data_frame().values, preds.as_data_frame().values)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_multinomial_classification_with_numpy_frames():
pipeline = make_pipeline(H2OAutoMLClassifier(seed=seed, init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlclassifier__max_models=max_models,
h2oautomlclassifier__nfolds=3
)
pipeline.named_steps.h2oautomlclassifier.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=3)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlclassifier.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
probs = pipeline.predict_proba(data.X_test)
assert probs.shape == (len(data.X_test), 3)
assert np.allclose(np.sum(probs, axis=1), 1.), "`predict_proba` didn't return probabilities"
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_regression_with_numpy_frames():
pipeline = make_pipeline(H2OAutoMLRegressor(seed=seed, init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlregressor__max_models=max_models,
h2oautomlregressor__nfolds=3
)
pipeline.named_steps.h2oautomlregressor.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=0)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlregressor.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_generic_estimator_for_classification():
pipeline = make_pipeline(H2OAutoMLEstimator(estimator_type='classifier', seed=seed,
init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlestimator__max_models=max_models,
h2oautomlestimator__nfolds=3
)
pipeline.named_steps.h2oautomlestimator.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=3)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlestimator.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
probs = pipeline.predict_proba(data.X_test)
assert probs.shape == (len(data.X_test), 3)
assert np.allclose(np.sum(probs, axis=1), 1.), "`predict_proba` didn't return probabilities"
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_generic_estimator_for_regression():
pipeline = make_pipeline(H2OAutoMLEstimator(estimator_type='regressor', seed=seed,
init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlestimator__max_models=max_models,
h2oautomlestimator__nfolds=3
)
pipeline.named_steps.h2oautomlestimator.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=0)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlestimator.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
pyunit_utils.run_tests([
test_binomial_classification_with_h2o_frames,
test_multinomial_classification_with_numpy_frames,
test_regression_with_numpy_frames,
test_generic_estimator_for_classification,
test_generic_estimator_for_regression,
])
| [
"sklearn.metrics.accuracy_score",
"h2o.sklearn.H2OAutoMLClassifier",
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.sum",
"h2o.sklearn.H2OAutoMLRegressor",
"h2o.H2OFrame",
"tests.Namespace",
"sklearn.metrics.r2_score",
"h2o.sklearn.H2OAutoMLEstimator",
"tests.pyunit_utils.run... | [((7000, 7254), 'tests.pyunit_utils.run_tests', 'pyunit_utils.run_tests', (['[test_binomial_classification_with_h2o_frames,\n test_multinomial_classification_with_numpy_frames,\n test_regression_with_numpy_frames,\n test_generic_estimator_for_classification,\n test_generic_estimator_for_regression]'], {}), '([test_binomial_classification_with_h2o_frames,\n test_multinomial_classification_with_numpy_frames,\n test_regression_with_numpy_frames,\n test_generic_estimator_for_classification,\n test_generic_estimator_for_regression])\n', (7022, 7254), False, 'from tests import pyunit_utils, Namespace as ns\n'), ((483, 507), 'os.path.join', 'os.path.join', (['""".."""', '""".."""'], {}), "('..', '..')\n", (495, 507), False, 'import importlib, inspect, os, sys\n'), ((1371, 1412), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': 'seed'}), '(X, y, random_state=seed)\n', (1387, 1412), False, 'from sklearn.model_selection import train_test_split\n'), ((1424, 1490), 'tests.Namespace', 'ns', ([], {'X_train': 'X_train', 'X_test': 'X_test', 'y_train': 'y_train', 'y_test': 'y_test'}), '(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)\n', (1426, 1490), True, 'from tests import pyunit_utils, Namespace as ns\n'), ((3702, 3736), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['data.y_test', 'preds'], {}), '(data.y_test, preds)\n', (3716, 3736), False, 'from sklearn.metrics import accuracy_score, r2_score\n'), ((4642, 4670), 'sklearn.metrics.r2_score', 'r2_score', (['data.y_test', 'preds'], {}), '(data.y_test, preds)\n', (4650, 4670), False, 'from sklearn.metrics import accuracy_score, r2_score\n'), ((5855, 5889), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['data.y_test', 'preds'], {}), '(data.y_test, preds)\n', (5869, 5889), False, 'from sklearn.metrics import accuracy_score, r2_score\n'), ((6876, 6904), 'sklearn.metrics.r2_score', 'r2_score', (['data.y_test', 'preds'], {}), '(data.y_test, preds)\n', (6884, 6904), False, 'from sklearn.metrics import accuracy_score, r2_score\n'), ((1703, 1733), 'h2o.sklearn.H2OAutoMLClassifier', 'H2OAutoMLClassifier', ([], {'seed': 'seed'}), '(seed=seed)\n', (1722, 1733), False, 'from h2o.sklearn import H2OAutoMLEstimator, H2OAutoMLClassifier, H2OAutoMLRegressor\n'), ((2763, 2836), 'h2o.sklearn.H2OAutoMLClassifier', 'H2OAutoMLClassifier', ([], {'seed': 'seed', 'init_connection_args': 'init_connection_args'}), '(seed=seed, init_connection_args=init_connection_args)\n', (2782, 2836), False, 'from h2o.sklearn import H2OAutoMLEstimator, H2OAutoMLClassifier, H2OAutoMLRegressor\n'), ((3522, 3543), 'numpy.sum', 'np.sum', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (3528, 3543), True, 'import numpy as np\n'), ((3901, 3973), 'h2o.sklearn.H2OAutoMLRegressor', 'H2OAutoMLRegressor', ([], {'seed': 'seed', 'init_connection_args': 'init_connection_args'}), '(seed=seed, init_connection_args=init_connection_args)\n', (3919, 3973), False, 'from h2o.sklearn import H2OAutoMLEstimator, H2OAutoMLClassifier, H2OAutoMLRegressor\n'), ((4843, 4948), 'h2o.sklearn.H2OAutoMLEstimator', 'H2OAutoMLEstimator', ([], {'estimator_type': '"""classifier"""', 'seed': 'seed', 'init_connection_args': 'init_connection_args'}), "(estimator_type='classifier', seed=seed,\n init_connection_args=init_connection_args)\n", (4861, 4948), False, 'from h2o.sklearn import H2OAutoMLEstimator, H2OAutoMLClassifier, H2OAutoMLRegressor\n'), ((5675, 5696), 'numpy.sum', 'np.sum', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (5681, 5696), True, 'import numpy as np\n'), ((6059, 6163), 'h2o.sklearn.H2OAutoMLEstimator', 'H2OAutoMLEstimator', ([], {'estimator_type': '"""regressor"""', 'seed': 'seed', 'init_connection_args': 'init_connection_args'}), "(estimator_type='regressor', seed=seed,\n init_connection_args=init_connection_args)\n", (6077, 6163), False, 'from h2o.sklearn import H2OAutoMLEstimator, H2OAutoMLClassifier, H2OAutoMLRegressor\n'), ((1587, 1602), 'h2o.H2OFrame', 'h2o.H2OFrame', (['v'], {}), '(v)\n', (1599, 1602), False, 'import h2o\n')] |
import numpy as np
import pandas as pd
import attr
from tqdm.auto import tqdm
from threading import Thread
from labAPI import Parameter
import matplotlib.pyplot as plt
@attr.s
class Sampler:
''' A base class for parameter space exploration and optimization.
Arguments:
experiment (callable): a function or method which measures the
objective function at the current point in
the parameter space. Takes no positional arguments.
An instance of the labAPI.Parameter class can also
be passed.
parameters (dict): instances of the Parameter class to use
during optimization. Defaults to empty, and Parameters
can be added by calling Algorithm.add_parameter().
bounds (dict): tuple bounds indexed by Parameter names. Defaults to empty,
and is set when adding Parameters.
points (dict): optional points for each Parameter to override default
point generation in optimizers, e.g. sampling locations
for a grid search or initial population in a genetic algorithm.
sign (int): choose between maximization (+1) and minimization (-1)
X (2d array): coordinates sampled during optimization. Defaults to empty,
but previous results can be passed (along with results into
the y argument) to speed up some optimizers.
y (1d array): objective function evaluations. Defaults to empty.
threaded (bool): if True, run optimization in a separate thread.
show_progress (bool): whether to display a progress bar during
optimization. Adds <1 ms overhead per iteration.
record_data (bool): whether to store X, y observations. Adds <1 ms overhead per iteration.
display (bool): whether to display optimization status using ipywidgets.
Only works when running in a Jupyter environment.
continuous (bool): whether to quit after convergence/specified number of iterations
or continue running.
'''
experiment = attr.ib(default=None)
parameters = attr.ib(factory=dict)
bounds = attr.ib(factory=dict)
points = attr.ib(factory=dict) # optional overrides to search points
sign = attr.ib(default=1, converter=np.sign)
X = attr.ib(factory=lambda: np.atleast_2d([]))
y = attr.ib(factory=lambda: np.array([]))
threaded = attr.ib(default=False)
show_progress = attr.ib(default=True)
continuous = attr.ib(default=False)
def add_parameter(self, parameter, bounds):
''' Adds a parameter.
Arguments:
parameter (parametric.Parameter)
bounds (tuple): a (min, max) pair defining the limits of the
optimization.
points (array-like): a list of points to override sampling behavior
in the algorithm.
'''
self.parameters[parameter.name] = parameter
self.bounds[parameter.name] = bounds
return self
def best(self):
''' Returns the coordinates of the objective function minimum as determined by the algorithm '''
return self.X[np.argmin(self.y)]
def check_bounds(self, point):
''' Checks that the point is within the specified bounds '''
i = 0
for name, parameter in self.parameters.items():
bounds = self.bounds[name]
if not bounds[0] <= point[i] <= bounds[1]:
raise ValueError(f'The optimizer requested a point outside the valid bounds for parameter {parameter.name} and will now terminate.')
i += 1
def actuate(self, point):
''' Actuate to specified point '''
self.check_bounds(point)
for i, (name, parameter) in enumerate(self.parameters.items()):
parameter.set(point[i])
def measure(self, point):
''' Actuate to specified point and measure result '''
if self.experiment is None:
raise ValueError('No experiment has been assigned to this optimizer!')
self.actuate(point)
if isinstance(self.experiment, Parameter):
result = self.experiment.get()
else:
result = self.experiment()
if len(self.X[0]) == 0:
self.X = np.atleast_2d(point)
else:
self.X = np.append(self.X, np.atleast_2d(point), axis=0)
self.y = np.append(self.y, result)
return -self.sign*result
@property
def dataset(self):
''' If the optimizer is set to data_format = 'numpy', this converts acquired
data into a pandas.DataFrame.
'''
df = pd.DataFrame(self.X, columns = list(self.parameters.keys()))
df[self.experiment.__name__] = self.y
return df
def iterate(self, lst):
''' Functions similarly to the built-in list() generator, e.g.
for x in [1, 2, 3]:
print(x)
prints 1, 2, and 3.
If self.show_progress==True, returns a tqdm generator for displaying
a progress bar.
'''
if self.show_progress:
yield from tqdm(lst)
else:
yield from list(lst)
def run(self):
if self.threaded:
Thread(target=self._run).start()
else:
self._run()
def plot(self, x):
''' A 1D plot of the objective function vs. an independent variable '''
data = self.dataset.sort_values(x)
plt.plot(data[x], data[self.experiment.name])
plt.xlabel(x)
plt.ylabel(self.experiment.name)
def plot_history(self):
''' A 1D plot of the objective function vs. the iteration number '''
plt.figure(dpi=300)
plt.plot(self.y, '.')
plt.xlabel('Iteration')
plt.ylabel(self.experiment.name)
plt.plot(np.minimum.accumulate(self.y), '-', color='#13476c') | [
"numpy.atleast_2d",
"matplotlib.pyplot.ylabel",
"numpy.minimum.accumulate",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.append",
"numpy.array",
"matplotlib.pyplot.figure",
"tqdm.auto.tqdm",
"numpy.argmin",
"threading.Thread",
"attr.ib"
] | [((2374, 2395), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (2381, 2395), False, 'import attr\n'), ((2413, 2434), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (2420, 2434), False, 'import attr\n'), ((2448, 2469), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (2455, 2469), False, 'import attr\n'), ((2483, 2504), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (2490, 2504), False, 'import attr\n'), ((2561, 2598), 'attr.ib', 'attr.ib', ([], {'default': '(1)', 'converter': 'np.sign'}), '(default=1, converter=np.sign)\n', (2568, 2598), False, 'import attr\n'), ((2712, 2734), 'attr.ib', 'attr.ib', ([], {'default': '(False)'}), '(default=False)\n', (2719, 2734), False, 'import attr\n'), ((2755, 2776), 'attr.ib', 'attr.ib', ([], {'default': '(True)'}), '(default=True)\n', (2762, 2776), False, 'import attr\n'), ((2794, 2816), 'attr.ib', 'attr.ib', ([], {'default': '(False)'}), '(default=False)\n', (2801, 2816), False, 'import attr\n'), ((4750, 4775), 'numpy.append', 'np.append', (['self.y', 'result'], {}), '(self.y, result)\n', (4759, 4775), True, 'import numpy as np\n'), ((5841, 5886), 'matplotlib.pyplot.plot', 'plt.plot', (['data[x]', 'data[self.experiment.name]'], {}), '(data[x], data[self.experiment.name])\n', (5849, 5886), True, 'import matplotlib.pyplot as plt\n'), ((5895, 5908), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {}), '(x)\n', (5905, 5908), True, 'import matplotlib.pyplot as plt\n'), ((5917, 5949), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.experiment.name'], {}), '(self.experiment.name)\n', (5927, 5949), True, 'import matplotlib.pyplot as plt\n'), ((6064, 6083), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(300)'}), '(dpi=300)\n', (6074, 6083), True, 'import matplotlib.pyplot as plt\n'), ((6092, 6113), 'matplotlib.pyplot.plot', 'plt.plot', (['self.y', '"""."""'], {}), "(self.y, '.')\n", (6100, 6113), True, 'import matplotlib.pyplot as plt\n'), ((6122, 6145), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (6132, 6145), True, 'import matplotlib.pyplot as plt\n'), ((6154, 6186), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.experiment.name'], {}), '(self.experiment.name)\n', (6164, 6186), True, 'import matplotlib.pyplot as plt\n'), ((3509, 3526), 'numpy.argmin', 'np.argmin', (['self.y'], {}), '(self.y)\n', (3518, 3526), True, 'import numpy as np\n'), ((4629, 4649), 'numpy.atleast_2d', 'np.atleast_2d', (['point'], {}), '(point)\n', (4642, 4649), True, 'import numpy as np\n'), ((6204, 6233), 'numpy.minimum.accumulate', 'np.minimum.accumulate', (['self.y'], {}), '(self.y)\n', (6225, 6233), True, 'import numpy as np\n'), ((2631, 2648), 'numpy.atleast_2d', 'np.atleast_2d', (['[]'], {}), '([])\n', (2644, 2648), True, 'import numpy as np\n'), ((2682, 2694), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2690, 2694), True, 'import numpy as np\n'), ((4703, 4723), 'numpy.atleast_2d', 'np.atleast_2d', (['point'], {}), '(point)\n', (4716, 4723), True, 'import numpy as np\n'), ((5500, 5509), 'tqdm.auto.tqdm', 'tqdm', (['lst'], {}), '(lst)\n', (5504, 5509), False, 'from tqdm.auto import tqdm\n'), ((5615, 5639), 'threading.Thread', 'Thread', ([], {'target': 'self._run'}), '(target=self._run)\n', (5621, 5639), False, 'from threading import Thread\n')] |
import numpy as np
from core.tresbases import bases_ent
dim = 4
v_a = np.array([1/np.sqrt(2), 1/np.sqrt(2), 1/np.sqrt(2)])
v_b = np.sqrt(1 - v_a**2)
v_fase = np.array([0, np.pi/2, np.pi/4 ])
base_0, d_bases = bases_ent(dim, v_a, v_b, v_fase)
print(d_bases[:, :, 2])
| [
"numpy.array",
"numpy.sqrt",
"core.tresbases.bases_ent"
] | [((130, 151), 'numpy.sqrt', 'np.sqrt', (['(1 - v_a ** 2)'], {}), '(1 - v_a ** 2)\n', (137, 151), True, 'import numpy as np\n'), ((159, 194), 'numpy.array', 'np.array', (['[0, np.pi / 2, np.pi / 4]'], {}), '([0, np.pi / 2, np.pi / 4])\n', (167, 194), True, 'import numpy as np\n'), ((210, 242), 'core.tresbases.bases_ent', 'bases_ent', (['dim', 'v_a', 'v_b', 'v_fase'], {}), '(dim, v_a, v_b, v_fase)\n', (219, 242), False, 'from core.tresbases import bases_ent\n'), ((83, 93), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (90, 93), True, 'import numpy as np\n'), ((97, 107), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (104, 107), True, 'import numpy as np\n'), ((111, 121), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (118, 121), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tarfile
import cv2
import copy
import numpy as np
import tensorflow as tf
from utils.curve import points_to_heatmap_rectangle_68pt
from six.moves import xrange
from six.moves import urllib
from datagen import DataGenerator
from datagen import ensure_dir
from FAB import FAB
MOMENTUM = 0.9
POINTS_NUM = 68
IMAGE_SIZE = 256
PIC_CHANNEL = 3
num_input_imgs = 3
NUM_CLASSES = POINTS_NUM*2
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
structure_predictor_net_channel = 64
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('structure_predictor_train_dir', '', """Directory where to write train_checkpoints.""")
tf.app.flags.DEFINE_string('video_deblur_train_dir', '', """Directory where to write train_checkpoints.""")
tf.app.flags.DEFINE_string('resnet_train_dir', '', """Directory where to write train_checkpoints.""")
tf.app.flags.DEFINE_string('end_2_end_train_dir', '', """Directory where to write train_checkpoints.""")
tf.app.flags.DEFINE_string('end_2_end_test_dir', '', """Directory where to write test logs.""")
tf.app.flags.DEFINE_string('data_dir', '', """Directory where the dataset stores.""")
tf.app.flags.DEFINE_string('img_list', '', """Directory where the img_list stores.""")
tf.app.flags.DEFINE_float('learning_rate', 0.0, "learning rate.")
tf.app.flags.DEFINE_integer('batch_size', 1, "batch size")
tf.app.flags.DEFINE_boolean('resume_structure_predictor', False, """Resume from latest saved state.""")
tf.app.flags.DEFINE_boolean('resume_resnet', False, """Resume from latest saved state.""")
tf.app.flags.DEFINE_boolean('resume_video_deblur', False, """Resume from latest saved state.""")
tf.app.flags.DEFINE_boolean('resume_all', False, """Resume from latest saved state.""")
tf.app.flags.DEFINE_boolean('minimal_summaries', False, """Produce fewer summaries to save HD space.""")
tf.app.flags.DEFINE_boolean('use_bn', False, """Use batch normalization. Otherwise use biases.""")
def resume(sess, do_resume, ckpt_path, key_word):
var = tf.global_variables()
if do_resume:
structure_predictor_latest = tf.train.latest_checkpoint(ckpt_path)
if not structure_predictor_latest:
print ("\n No checkpoint to continue from in ", ckpt_path, '\n')
structure_predictor_var_to_restore = [val for val in var if key_word in val.name]
saver_structure_predictor = tf.train.Saver(structure_predictor_var_to_restore)
saver_structure_predictor.restore(sess, structure_predictor_latest)
def test(resnet_model, is_training, F, H, F_curr, H_curr, input_images_blur,
input_images_boundary, next_boundary_gt, labels, data_dir, img_list,
dropout_ratio):
global_step = tf.get_variable('global_step', [],
initializer=tf.constant_initializer(0),
trainable=False)
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
sess.run(init)
val_save_root = os.path.join(FLAGS.end_2_end_test_dir,'visualization')
################################ resume part #################################
# resume weights
resume(sess, FLAGS.resume_structure_predictor, FLAGS.structure_predictor_train_dir, 'voxel_flow_model_')
resume(sess, FLAGS.resume_video_deblur, FLAGS.video_deblur_train_dir, 'video_deblur_model_')
resume(sess, FLAGS.resume_resnet, FLAGS.resnet_train_dir, 'resnet_model_')
resume(sess, FLAGS.resume_all, FLAGS.end_2_end_train_dir, '')
##############################################################################
gt_file_path = os.path.join(FLAGS.end_2_end_test_dir,'gt.txt')
pre_file_path = os.path.join(FLAGS.end_2_end_test_dir,'pre.txt')
ensure_dir(gt_file_path)
ensure_dir(pre_file_path)
gt_file = open(gt_file_path,'w')
pre_file = open(pre_file_path,'w')
dataset = DataGenerator(data_dir,img_list)
dataset._create_train_table()
dataset._create_sets_for_300VW()
test_gen = dataset._aux_generator(batch_size = FLAGS.batch_size, num_input_imgs = num_input_imgs,
NUM_CLASSES = POINTS_NUM*2, sample_set='test')
test_break_flag = False
for x in xrange(len(dataset.train_table)-2):
step = sess.run(global_step)
if not test_break_flag:
test_line_num, frame_name, input_boundaries, boundary_gt_test, input_images_blur_generated, landmark_gt_test, names, test_break_flag = next(test_gen)
if (frame_name == '2.jpg') or test_line_num <= 3:
input_images_boundary_init = copy.deepcopy(input_boundaries)
F_init = np.zeros([FLAGS.batch_size, IMAGE_SIZE//2,
IMAGE_SIZE//2, structure_predictor_net_channel//2], dtype=np.float32)
H_init = np.zeros([1, FLAGS.batch_size, IMAGE_SIZE//2,
IMAGE_SIZE//2, structure_predictor_net_channel], dtype=np.float32)
feed_dict={
input_images_boundary:input_images_boundary_init,
input_images_blur:input_images_blur_generated,
F:F_init,
H:H_init,
labels:landmark_gt_test,
next_boundary_gt:boundary_gt_test,
dropout_ratio:1.0
}
else:
output_points = o[0]
output_points = np.reshape(output_points,(POINTS_NUM,2))
boundary_from_points = points_to_heatmap_rectangle_68pt(output_points)
boundary_from_points = np.expand_dims(boundary_from_points,axis=0)
boundary_from_points = np.expand_dims(boundary_from_points,axis=3)
input_images_boundary_init = np.concatenate([input_images_boundary_init[:,:,:,1:2],
boundary_from_points], axis=3)
feed_dict={
input_images_boundary:input_images_boundary_init,
input_images_blur:input_images_blur_generated,
F:o[-2],
H:o[-1],
labels:landmark_gt_test,
next_boundary_gt:boundary_gt_test,
dropout_ratio:1.0
}
i = [resnet_model.logits, F_curr, H_curr]
o = sess.run(i, feed_dict=feed_dict)
pres = o[0]
for batch_num,pre in enumerate(pres):
for v in pre:
pre_file.write(str(v*255.0)+' ')
if len(names) > 1:
pre_file.write(names[-1])
else:
pre_file.write(names[batch_num])
pre_file.write('\n')
for batch_num,g in enumerate(landmark_gt_test):
for v in g:
gt_file.write(str(v*255.0)+' ')
if len(names) > 1:
gt_file.write(names[-1])
else:
gt_file.write(names[batch_num])
gt_file.write('\n')
img = input_images_blur_generated[0,:,:,0:3]*255
points = o[0][0]*255
for point_num in range(int(points.shape[0]/2)):
cv2.circle(img,(int(round(points[point_num*2])),int(round(points[point_num*2+1]))),1,(55,225,155),2)
val_save_path = os.path.join(val_save_root,str(step)+'.jpg')
ensure_dir(val_save_path)
cv2.imwrite(val_save_path,img)
global_step = global_step + 1
print('Test done!')
def main(argv=None):
resnet_model = FAB()
is_training = tf.placeholder('bool', [], name='is_training')
input_images_boundary = tf.placeholder(tf.float32,shape=(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, 2))
input_images_blur = tf.placeholder(tf.float32,shape=(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, PIC_CHANNEL*3))
next_boundary_gt = tf.placeholder(tf.float32,shape=(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, 1))
labels = tf.placeholder(tf.float32,shape=(FLAGS.batch_size,NUM_CLASSES))
dropout_ratio = tf.placeholder(tf.float32)
F = tf.placeholder(tf.float32, [FLAGS.batch_size, IMAGE_SIZE//2, IMAGE_SIZE//2, structure_predictor_net_channel//2])
H = tf.placeholder(tf.float32, [1, FLAGS.batch_size, IMAGE_SIZE//2, IMAGE_SIZE//2, structure_predictor_net_channel])
F_curr, H_curr= \
resnet_model.FAB_inference(input_images_boundary, input_images_blur, F, H, FLAGS.batch_size,
net_channel=structure_predictor_net_channel, num_classes=136, num_blocks=[2, 2, 2, 2],
use_bias=(not FLAGS.use_bn), bottleneck=True, dropout_ratio=1.0)
test(resnet_model, is_training, F, H, F_curr, H_curr, input_images_blur,
input_images_boundary, next_boundary_gt, labels, FLAGS.data_dir, FLAGS.img_list,
dropout_ratio)
if __name__ == '__main__':
tf.app.run()
| [
"utils.curve.points_to_heatmap_rectangle_68pt",
"copy.deepcopy",
"tensorflow.app.run",
"numpy.reshape",
"datagen.ensure_dir",
"tensorflow.placeholder",
"numpy.concatenate",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.ConfigProto",
"FAB.FAB",
"tensorflow.initialize_all_variables",
"tenso... | [((670, 784), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""structure_predictor_train_dir"""', '""""""', '"""Directory where to write train_checkpoints."""'], {}), "('structure_predictor_train_dir', '',\n 'Directory where to write train_checkpoints.')\n", (696, 784), True, 'import tensorflow as tf\n'), ((785, 892), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""video_deblur_train_dir"""', '""""""', '"""Directory where to write train_checkpoints."""'], {}), "('video_deblur_train_dir', '',\n 'Directory where to write train_checkpoints.')\n", (811, 892), True, 'import tensorflow as tf\n'), ((893, 994), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""resnet_train_dir"""', '""""""', '"""Directory where to write train_checkpoints."""'], {}), "('resnet_train_dir', '',\n 'Directory where to write train_checkpoints.')\n", (919, 994), True, 'import tensorflow as tf\n'), ((995, 1099), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""end_2_end_train_dir"""', '""""""', '"""Directory where to write train_checkpoints."""'], {}), "('end_2_end_train_dir', '',\n 'Directory where to write train_checkpoints.')\n", (1021, 1099), True, 'import tensorflow as tf\n'), ((1100, 1195), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""end_2_end_test_dir"""', '""""""', '"""Directory where to write test logs."""'], {}), "('end_2_end_test_dir', '',\n 'Directory where to write test logs.')\n", (1126, 1195), True, 'import tensorflow as tf\n'), ((1196, 1281), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_dir"""', '""""""', '"""Directory where the dataset stores."""'], {}), "('data_dir', '',\n 'Directory where the dataset stores.')\n", (1222, 1281), True, 'import tensorflow as tf\n'), ((1282, 1368), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""img_list"""', '""""""', '"""Directory where the img_list stores."""'], {}), "('img_list', '',\n 'Directory where the img_list stores.')\n", (1308, 1368), True, 'import tensorflow as tf\n'), ((1370, 1435), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(0.0)', '"""learning rate."""'], {}), "('learning_rate', 0.0, 'learning rate.')\n", (1395, 1435), True, 'import tensorflow as tf\n'), ((1436, 1494), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(1)', '"""batch size"""'], {}), "('batch_size', 1, 'batch size')\n", (1463, 1494), True, 'import tensorflow as tf\n'), ((1495, 1598), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""resume_structure_predictor"""', '(False)', '"""Resume from latest saved state."""'], {}), "('resume_structure_predictor', False,\n 'Resume from latest saved state.')\n", (1522, 1598), True, 'import tensorflow as tf\n'), ((1599, 1689), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""resume_resnet"""', '(False)', '"""Resume from latest saved state."""'], {}), "('resume_resnet', False,\n 'Resume from latest saved state.')\n", (1626, 1689), True, 'import tensorflow as tf\n'), ((1690, 1786), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""resume_video_deblur"""', '(False)', '"""Resume from latest saved state."""'], {}), "('resume_video_deblur', False,\n 'Resume from latest saved state.')\n", (1717, 1786), True, 'import tensorflow as tf\n'), ((1787, 1874), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""resume_all"""', '(False)', '"""Resume from latest saved state."""'], {}), "('resume_all', False,\n 'Resume from latest saved state.')\n", (1814, 1874), True, 'import tensorflow as tf\n'), ((1875, 1979), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""minimal_summaries"""', '(False)', '"""Produce fewer summaries to save HD space."""'], {}), "('minimal_summaries', False,\n 'Produce fewer summaries to save HD space.')\n", (1902, 1979), True, 'import tensorflow as tf\n'), ((1980, 2078), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""use_bn"""', '(False)', '"""Use batch normalization. Otherwise use biases."""'], {}), "('use_bn', False,\n 'Use batch normalization. Otherwise use biases.')\n", (2007, 2078), True, 'import tensorflow as tf\n'), ((2140, 2161), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2159, 2161), True, 'import tensorflow as tf\n'), ((2999, 3028), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (3026, 3028), True, 'import tensorflow as tf\n'), ((3141, 3196), 'os.path.join', 'os.path.join', (['FLAGS.end_2_end_test_dir', '"""visualization"""'], {}), "(FLAGS.end_2_end_test_dir, 'visualization')\n", (3153, 3196), False, 'import os\n'), ((3757, 3805), 'os.path.join', 'os.path.join', (['FLAGS.end_2_end_test_dir', '"""gt.txt"""'], {}), "(FLAGS.end_2_end_test_dir, 'gt.txt')\n", (3769, 3805), False, 'import os\n'), ((3825, 3874), 'os.path.join', 'os.path.join', (['FLAGS.end_2_end_test_dir', '"""pre.txt"""'], {}), "(FLAGS.end_2_end_test_dir, 'pre.txt')\n", (3837, 3874), False, 'import os\n'), ((3878, 3902), 'datagen.ensure_dir', 'ensure_dir', (['gt_file_path'], {}), '(gt_file_path)\n', (3888, 3902), False, 'from datagen import ensure_dir\n'), ((3907, 3932), 'datagen.ensure_dir', 'ensure_dir', (['pre_file_path'], {}), '(pre_file_path)\n', (3917, 3932), False, 'from datagen import ensure_dir\n'), ((4024, 4057), 'datagen.DataGenerator', 'DataGenerator', (['data_dir', 'img_list'], {}), '(data_dir, img_list)\n', (4037, 4057), False, 'from datagen import DataGenerator\n'), ((7607, 7612), 'FAB.FAB', 'FAB', ([], {}), '()\n', (7610, 7612), False, 'from FAB import FAB\n'), ((7632, 7678), 'tensorflow.placeholder', 'tf.placeholder', (['"""bool"""', '[]'], {'name': '"""is_training"""'}), "('bool', [], name='is_training')\n", (7646, 7678), True, 'import tensorflow as tf\n'), ((7707, 7786), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, 2)'}), '(tf.float32, shape=(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, 2))\n', (7721, 7786), True, 'import tensorflow as tf\n'), ((7810, 7907), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, PIC_CHANNEL * 3)'}), '(tf.float32, shape=(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE,\n PIC_CHANNEL * 3))\n', (7824, 7907), True, 'import tensorflow as tf\n'), ((7924, 8003), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, 1)'}), '(tf.float32, shape=(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, 1))\n', (7938, 8003), True, 'import tensorflow as tf\n'), ((8016, 8081), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(FLAGS.batch_size, NUM_CLASSES)'}), '(tf.float32, shape=(FLAGS.batch_size, NUM_CLASSES))\n', (8030, 8081), True, 'import tensorflow as tf\n'), ((8100, 8126), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (8114, 8126), True, 'import tensorflow as tf\n'), ((8135, 8257), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[FLAGS.batch_size, IMAGE_SIZE // 2, IMAGE_SIZE // 2, \n structure_predictor_net_channel // 2]'], {}), '(tf.float32, [FLAGS.batch_size, IMAGE_SIZE // 2, IMAGE_SIZE //\n 2, structure_predictor_net_channel // 2])\n', (8149, 8257), True, 'import tensorflow as tf\n'), ((8256, 8377), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, FLAGS.batch_size, IMAGE_SIZE // 2, IMAGE_SIZE // 2,\n structure_predictor_net_channel]'], {}), '(tf.float32, [1, FLAGS.batch_size, IMAGE_SIZE // 2, \n IMAGE_SIZE // 2, structure_predictor_net_channel])\n', (8270, 8377), True, 'import tensorflow as tf\n'), ((8938, 8950), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (8948, 8950), True, 'import tensorflow as tf\n'), ((2217, 2254), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['ckpt_path'], {}), '(ckpt_path)\n', (2243, 2254), True, 'import tensorflow as tf\n'), ((2502, 2552), 'tensorflow.train.Saver', 'tf.train.Saver', (['structure_predictor_var_to_restore'], {}), '(structure_predictor_var_to_restore)\n', (2516, 2552), True, 'import tensorflow as tf\n'), ((7437, 7462), 'datagen.ensure_dir', 'ensure_dir', (['val_save_path'], {}), '(val_save_path)\n', (7447, 7462), False, 'from datagen import ensure_dir\n'), ((7471, 7502), 'cv2.imwrite', 'cv2.imwrite', (['val_save_path', 'img'], {}), '(val_save_path, img)\n', (7482, 7502), False, 'import cv2\n'), ((2908, 2934), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (2931, 2934), True, 'import tensorflow as tf\n'), ((3058, 3100), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)'}), '(log_device_placement=False)\n', (3072, 3100), True, 'import tensorflow as tf\n'), ((4728, 4759), 'copy.deepcopy', 'copy.deepcopy', (['input_boundaries'], {}), '(input_boundaries)\n', (4741, 4759), False, 'import copy\n'), ((4781, 4904), 'numpy.zeros', 'np.zeros', (['[FLAGS.batch_size, IMAGE_SIZE // 2, IMAGE_SIZE // 2, \n structure_predictor_net_channel // 2]'], {'dtype': 'np.float32'}), '([FLAGS.batch_size, IMAGE_SIZE // 2, IMAGE_SIZE // 2, \n structure_predictor_net_channel // 2], dtype=np.float32)\n', (4789, 4904), True, 'import numpy as np\n'), ((4947, 5067), 'numpy.zeros', 'np.zeros', (['[1, FLAGS.batch_size, IMAGE_SIZE // 2, IMAGE_SIZE // 2,\n structure_predictor_net_channel]'], {'dtype': 'np.float32'}), '([1, FLAGS.batch_size, IMAGE_SIZE // 2, IMAGE_SIZE // 2,\n structure_predictor_net_channel], dtype=np.float32)\n', (4955, 5067), True, 'import numpy as np\n'), ((5548, 5590), 'numpy.reshape', 'np.reshape', (['output_points', '(POINTS_NUM, 2)'], {}), '(output_points, (POINTS_NUM, 2))\n', (5558, 5590), True, 'import numpy as np\n'), ((5624, 5671), 'utils.curve.points_to_heatmap_rectangle_68pt', 'points_to_heatmap_rectangle_68pt', (['output_points'], {}), '(output_points)\n', (5656, 5671), False, 'from utils.curve import points_to_heatmap_rectangle_68pt\n'), ((5707, 5751), 'numpy.expand_dims', 'np.expand_dims', (['boundary_from_points'], {'axis': '(0)'}), '(boundary_from_points, axis=0)\n', (5721, 5751), True, 'import numpy as np\n'), ((5786, 5830), 'numpy.expand_dims', 'np.expand_dims', (['boundary_from_points'], {'axis': '(3)'}), '(boundary_from_points, axis=3)\n', (5800, 5830), True, 'import numpy as np\n'), ((5872, 5964), 'numpy.concatenate', 'np.concatenate', (['[input_images_boundary_init[:, :, :, 1:2], boundary_from_points]'], {'axis': '(3)'}), '([input_images_boundary_init[:, :, :, 1:2],\n boundary_from_points], axis=3)\n', (5886, 5964), True, 'import numpy as np\n')] |
# @Time : 2020/11/22
# @Author : <NAME>
# @Email : <EMAIL>
# UPDATE:
# @Time : 2020/11/24, 2021/1/9
# @Author : <NAME>, <NAME>
# @Email : <EMAIL>, <EMAIL>
# UPDATE:
# @Time : 2021/11/5
# @Author : <NAME>
# @Email : <EMAIL>
import os
from abc import ABC, abstractmethod
import numpy as np
import random
import nltk
import torch
from fuzzywuzzy.process import extractOne
from loguru import logger
from nltk import word_tokenize
from torch import optim
from transformers import AdamW, Adafactor
from crslab.config import SAVE_PATH
from crslab.evaluator import get_evaluator
from crslab.evaluator.metrics.base import AverageMetric
from crslab.model import get_model
from crslab.system.utils import lr_scheduler
from crslab.system.utils.functions import compute_grad_norm
optim_class = {}
optim_class.update({k: v for k, v in optim.__dict__.items() if not k.startswith('__') and k[0].isupper()})
optim_class.update({'AdamW': AdamW, 'Adafactor': Adafactor})
lr_scheduler_class = {k: v for k, v in lr_scheduler.__dict__.items() if not k.startswith('__') and k[0].isupper()}
transformers_tokenizer = ('bert', 'gpt2')
class BaseSystem(ABC):
"""Base class for all system"""
def __init__(self, opt, train_dataloader, valid_dataloader, test_dataloader, vocab, side_data, restore_system=False,
interact=False, debug=False, tensorboard=False):
"""
Args:
opt (dict): Indicating the hyper parameters.
train_dataloader (BaseDataLoader): Indicating the train dataloader of corresponding dataset.
valid_dataloader (BaseDataLoader): Indicating the valid dataloader of corresponding dataset.
test_dataloader (BaseDataLoader): Indicating the test dataloader of corresponding dataset.
vocab (dict): Indicating the vocabulary.
side_data (dict): Indicating the side data.
restore_system (bool, optional): Indicating if we store system after training. Defaults to False.
interact (bool, optional): Indicating if we interact with system. Defaults to False.
debug (bool, optional): Indicating if we train in debug mode. Defaults to False.
tensorboard (bool, optional) Indicating if we monitor the training performance in tensorboard. Defaults to False.
"""
self.opt = opt
if opt["gpu"] == [-1]:
self.device = torch.device('cpu')
elif len(opt["gpu"]) == 1:
self.device = torch.device('cuda')
else:
self.device = torch.device('cuda')
# seed
if 'seed' in opt:
seed = int(opt['seed'])
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
logger.info(f'[Set seed] {seed}')
# data
if debug:
self.train_dataloader = valid_dataloader
self.valid_dataloader = valid_dataloader
self.test_dataloader = test_dataloader
else:
self.train_dataloader = train_dataloader
self.valid_dataloader = valid_dataloader
self.test_dataloader = test_dataloader
self.vocab = vocab
self.side_data = side_data
# model
if 'model' in opt:
self.model = get_model(opt, opt['model'], self.device, vocab, side_data).to(self.device)
else:
if 'rec_model' in opt:
self.rec_model = get_model(opt, opt['rec_model'], self.device, vocab['rec'], side_data['rec']).to(
self.device)
if 'conv_model' in opt:
self.conv_model = get_model(opt, opt['conv_model'], self.device, vocab['conv'], side_data['conv']).to(
self.device)
if 'policy_model' in opt:
self.policy_model = get_model(opt, opt['policy_model'], self.device, vocab['policy'],
side_data['policy']).to(self.device)
model_file_name = opt.get('model_file', f'{opt["model_name"]}.pth')
self.model_file = os.path.join(SAVE_PATH, model_file_name)
if restore_system:
self.restore_model()
if not interact:
self.evaluator = get_evaluator(opt.get('evaluator', 'standard'), opt['dataset'], tensorboard)
def init_optim(self, opt, parameters):
self.optim_opt = opt
parameters = list(parameters)
if isinstance(parameters[0], dict):
for i, d in enumerate(parameters):
parameters[i]['params'] = list(d['params'])
# gradient acumulation
self.update_freq = opt.get('update_freq', 1)
self._number_grad_accum = 0
self.gradient_clip = opt.get('gradient_clip', -1)
self.build_optimizer(parameters)
self.build_lr_scheduler()
if isinstance(parameters[0], dict):
self.parameters = []
for d in parameters:
self.parameters.extend(d['params'])
else:
self.parameters = parameters
# early stop
self.need_early_stop = self.optim_opt.get('early_stop', False)
if self.need_early_stop:
logger.debug('[Enable early stop]')
self.reset_early_stop_state()
def build_optimizer(self, parameters):
optimizer_opt = self.optim_opt['optimizer']
optimizer = optimizer_opt.pop('name')
self.optimizer = optim_class[optimizer](parameters, **optimizer_opt)
logger.info(f"[Build optimizer: {optimizer}]")
def build_lr_scheduler(self):
"""
Create the learning rate scheduler, and assign it to self.scheduler. This
scheduler will be updated upon a call to receive_metrics. May also create
self.warmup_scheduler, if appropriate.
:param state_dict states: Possible state_dict provided by model
checkpoint, for restoring LR state
:param bool hard_reset: If true, the LR scheduler should ignore the
state dictionary.
"""
if self.optim_opt.get('lr_scheduler', None):
lr_scheduler_opt = self.optim_opt['lr_scheduler']
lr_scheduler = lr_scheduler_opt.pop('name')
self.scheduler = lr_scheduler_class[lr_scheduler](self.optimizer, **lr_scheduler_opt)
logger.info(f"[Build scheduler {lr_scheduler}]")
def reset_early_stop_state(self):
self.best_valid = None
self.drop_cnt = 0
self.impatience = self.optim_opt.get('impatience', 3)
if self.optim_opt['stop_mode'] == 'max':
self.stop_mode = 1
elif self.optim_opt['stop_mode'] == 'min':
self.stop_mode = -1
else:
raise
logger.debug('[Reset early stop state]')
@abstractmethod
def fit(self):
"""fit the whole system"""
pass
@abstractmethod
def step(self, batch, stage, mode):
"""calculate loss and prediction for batch data under certrain stage and mode
Args:
batch (dict or tuple): batch data
stage (str): recommendation/policy/conversation etc.
mode (str): train/valid/test
"""
pass
def backward(self, loss):
"""empty grad, backward loss and update params
Args:
loss (torch.Tensor):
"""
self._zero_grad()
if self.update_freq > 1:
self._number_grad_accum = (self._number_grad_accum + 1) % self.update_freq
loss /= self.update_freq
loss.backward(loss.clone().detach())
self._update_params()
def _zero_grad(self):
if self._number_grad_accum != 0:
# if we're accumulating gradients, don't actually zero things out yet.
return
self.optimizer.zero_grad()
def _update_params(self):
if self.update_freq > 1:
# we're doing gradient accumulation, so we don't only want to step
# every N updates instead
# self._number_grad_accum is updated in backward function
if self._number_grad_accum != 0:
return
if self.gradient_clip > 0:
grad_norm = torch.nn.utils.clip_grad_norm_(
self.parameters, self.gradient_clip
)
self.evaluator.optim_metrics.add('grad norm', AverageMetric(grad_norm))
self.evaluator.optim_metrics.add(
'grad clip ratio',
AverageMetric(float(grad_norm > self.gradient_clip)),
)
else:
grad_norm = compute_grad_norm(self.parameters)
self.evaluator.optim_metrics.add('grad norm', AverageMetric(grad_norm))
self.optimizer.step()
if hasattr(self, 'scheduler'):
self.scheduler.train_step()
def adjust_lr(self, metric=None):
"""adjust learning rate w/o metric by scheduler
Args:
metric (optional): Defaults to None.
"""
if not hasattr(self, 'scheduler') or self.scheduler is None:
return
self.scheduler.valid_step(metric)
logger.debug('[Adjust learning rate after valid epoch]')
def early_stop(self, metric):
if not self.need_early_stop:
return False
if self.best_valid is None or metric * self.stop_mode > self.best_valid * self.stop_mode:
self.best_valid = metric
self.drop_cnt = 0
logger.info('[Get new best model]')
return False
else:
self.drop_cnt += 1
if self.drop_cnt >= self.impatience:
logger.info('[Early stop]')
return True
def save_model(self):
r"""Store the model parameters."""
state = {}
if hasattr(self, 'model'):
state['model_state_dict'] = self.model.state_dict()
if hasattr(self, 'rec_model'):
state['rec_state_dict'] = self.rec_model.state_dict()
if hasattr(self, 'conv_model'):
state['conv_state_dict'] = self.conv_model.state_dict()
if hasattr(self, 'policy_model'):
state['policy_state_dict'] = self.policy_model.state_dict()
os.makedirs(SAVE_PATH, exist_ok=True)
torch.save(state, self.model_file)
logger.info(f'[Save model into {self.model_file}]')
def restore_model(self):
r"""Store the model parameters."""
if not os.path.exists(self.model_file):
raise ValueError(f'Saved model [{self.model_file}] does not exist')
checkpoint = torch.load(self.model_file, map_location=self.device)
if hasattr(self, 'model'):
self.model.load_state_dict(checkpoint['model_state_dict'])
if hasattr(self, 'rec_model'):
self.rec_model.load_state_dict(checkpoint['rec_state_dict'])
if hasattr(self, 'conv_model'):
self.conv_model.load_state_dict(checkpoint['conv_state_dict'])
if hasattr(self, 'policy_model'):
self.policy_model.load_state_dict(checkpoint['policy_state_dict'])
logger.info(f'[Restore model from {self.model_file}]')
@abstractmethod
def interact(self):
pass
def init_interact(self):
self.finished = False
self.context = {
'rec': {},
'conv': {}
}
for key in self.context:
self.context[key]['context_tokens'] = []
self.context[key]['context_entities'] = []
self.context[key]['context_words'] = []
self.context[key]['context_items'] = []
self.context[key]['user_profile'] = []
self.context[key]['interaction_history'] = []
self.context[key]['entity_set'] = set()
self.context[key]['word_set'] = set()
def update_context(self, stage, token_ids=None, entity_ids=None, item_ids=None, word_ids=None):
if token_ids is not None:
self.context[stage]['context_tokens'].append(token_ids)
if item_ids is not None:
self.context[stage]['context_items'] += item_ids
if entity_ids is not None:
for entity_id in entity_ids:
if entity_id not in self.context[stage]['entity_set']:
self.context[stage]['entity_set'].add(entity_id)
self.context[stage]['context_entities'].append(entity_id)
if word_ids is not None:
for word_id in word_ids:
if word_id not in self.context[stage]['word_set']:
self.context[stage]['word_set'].add(word_id)
self.context[stage]['context_words'].append(word_id)
def get_input(self, language):
print("Enter [EXIT] if you want to quit.")
if language == 'zh':
language = 'chinese'
elif language == 'en':
language = 'english'
else:
raise
text = input(f"Enter Your Message in {language}: ")
if '[EXIT]' in text:
self.finished = True
return text
def tokenize(self, text, tokenizer, path=None):
tokenize_fun = getattr(self, tokenizer + '_tokenize')
if path is not None:
return tokenize_fun(text, path)
else:
return tokenize_fun(text)
def nltk_tokenize(self, text):
nltk.download('punkt')
return word_tokenize(text)
def bert_tokenize(self, text, path):
if not hasattr(self, 'bert_tokenizer'):
from transformers import AutoTokenizer
self.bert_tokenizer = AutoTokenizer.from_pretrained(path)
return self.bert_tokenizer.tokenize(text)
def gpt2_tokenize(self, text, path):
if not hasattr(self, 'gpt2_tokenizer'):
from transformers import AutoTokenizer
self.gpt2_tokenizer = AutoTokenizer.from_pretrained(path)
return self.gpt2_tokenizer.tokenize(text)
def pkuseg_tokenize(self, text):
if not hasattr(self, 'pkuseg_tokenizer'):
import pkuseg
self.pkuseg_tokenizer = pkuseg.pkuseg()
return self.pkuseg_tokenizer.cut(text)
def link(self, tokens, entities):
linked_entities = []
for token in tokens:
entity = extractOne(token, entities, score_cutoff=90)
if entity:
linked_entities.append(entity[0])
return linked_entities
| [
"crslab.system.utils.functions.compute_grad_norm",
"nltk.download",
"torch.nn.utils.clip_grad_norm_",
"crslab.evaluator.metrics.base.AverageMetric",
"fuzzywuzzy.process.extractOne",
"crslab.system.utils.lr_scheduler.__dict__.items",
"transformers.AutoTokenizer.from_pretrained",
"crslab.model.get_model... | [((1005, 1034), 'crslab.system.utils.lr_scheduler.__dict__.items', 'lr_scheduler.__dict__.items', ([], {}), '()\n', (1032, 1034), False, 'from crslab.system.utils import lr_scheduler\n'), ((4146, 4186), 'os.path.join', 'os.path.join', (['SAVE_PATH', 'model_file_name'], {}), '(SAVE_PATH, model_file_name)\n', (4158, 4186), False, 'import os\n'), ((5558, 5604), 'loguru.logger.info', 'logger.info', (['f"""[Build optimizer: {optimizer}]"""'], {}), "(f'[Build optimizer: {optimizer}]')\n", (5569, 5604), False, 'from loguru import logger\n'), ((6792, 6832), 'loguru.logger.debug', 'logger.debug', (['"""[Reset early stop state]"""'], {}), "('[Reset early stop state]')\n", (6804, 6832), False, 'from loguru import logger\n'), ((9174, 9230), 'loguru.logger.debug', 'logger.debug', (['"""[Adjust learning rate after valid epoch]"""'], {}), "('[Adjust learning rate after valid epoch]')\n", (9186, 9230), False, 'from loguru import logger\n'), ((10256, 10293), 'os.makedirs', 'os.makedirs', (['SAVE_PATH'], {'exist_ok': '(True)'}), '(SAVE_PATH, exist_ok=True)\n', (10267, 10293), False, 'import os\n'), ((10302, 10336), 'torch.save', 'torch.save', (['state', 'self.model_file'], {}), '(state, self.model_file)\n', (10312, 10336), False, 'import torch\n'), ((10345, 10396), 'loguru.logger.info', 'logger.info', (['f"""[Save model into {self.model_file}]"""'], {}), "(f'[Save model into {self.model_file}]')\n", (10356, 10396), False, 'from loguru import logger\n'), ((10619, 10672), 'torch.load', 'torch.load', (['self.model_file'], {'map_location': 'self.device'}), '(self.model_file, map_location=self.device)\n', (10629, 10672), False, 'import torch\n'), ((11135, 11189), 'loguru.logger.info', 'logger.info', (['f"""[Restore model from {self.model_file}]"""'], {}), "(f'[Restore model from {self.model_file}]')\n", (11146, 11189), False, 'from loguru import logger\n'), ((13384, 13406), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (13397, 13406), False, 'import nltk\n'), ((13422, 13441), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (13435, 13441), False, 'from nltk import word_tokenize\n'), ((835, 857), 'torch.optim.__dict__.items', 'optim.__dict__.items', ([], {}), '()\n', (855, 857), False, 'from torch import optim\n'), ((2398, 2417), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2410, 2417), False, 'import torch\n'), ((2650, 2667), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2661, 2667), False, 'import random\n'), ((2680, 2700), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2694, 2700), True, 'import numpy as np\n'), ((2713, 2736), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2730, 2736), False, 'import torch\n'), ((2749, 2777), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (2771, 2777), False, 'import torch\n'), ((2790, 2822), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (2816, 2822), False, 'import torch\n'), ((2835, 2868), 'loguru.logger.info', 'logger.info', (['f"""[Set seed] {seed}"""'], {}), "(f'[Set seed] {seed}')\n", (2846, 2868), False, 'from loguru import logger\n'), ((5253, 5288), 'loguru.logger.debug', 'logger.debug', (['"""[Enable early stop]"""'], {}), "('[Enable early stop]')\n", (5265, 5288), False, 'from loguru import logger\n'), ((6382, 6430), 'loguru.logger.info', 'logger.info', (['f"""[Build scheduler {lr_scheduler}]"""'], {}), "(f'[Build scheduler {lr_scheduler}]')\n", (6393, 6430), False, 'from loguru import logger\n'), ((8250, 8317), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (['self.parameters', 'self.gradient_clip'], {}), '(self.parameters, self.gradient_clip)\n', (8280, 8317), False, 'import torch\n'), ((8635, 8669), 'crslab.system.utils.functions.compute_grad_norm', 'compute_grad_norm', (['self.parameters'], {}), '(self.parameters)\n', (8652, 8669), False, 'from crslab.system.utils.functions import compute_grad_norm\n'), ((9505, 9540), 'loguru.logger.info', 'logger.info', (['"""[Get new best model]"""'], {}), "('[Get new best model]')\n", (9516, 9540), False, 'from loguru import logger\n'), ((10485, 10516), 'os.path.exists', 'os.path.exists', (['self.model_file'], {}), '(self.model_file)\n', (10499, 10516), False, 'import os\n'), ((13617, 13652), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['path'], {}), '(path)\n', (13646, 13652), False, 'from transformers import AutoTokenizer\n'), ((13878, 13913), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['path'], {}), '(path)\n', (13907, 13913), False, 'from transformers import AutoTokenizer\n'), ((14114, 14129), 'pkuseg.pkuseg', 'pkuseg.pkuseg', ([], {}), '()\n', (14127, 14129), False, 'import pkuseg\n'), ((14295, 14339), 'fuzzywuzzy.process.extractOne', 'extractOne', (['token', 'entities'], {'score_cutoff': '(90)'}), '(token, entities, score_cutoff=90)\n', (14305, 14339), False, 'from fuzzywuzzy.process import extractOne\n'), ((2479, 2499), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2491, 2499), False, 'import torch\n'), ((2540, 2560), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2552, 2560), False, 'import torch\n'), ((8406, 8430), 'crslab.evaluator.metrics.base.AverageMetric', 'AverageMetric', (['grad_norm'], {}), '(grad_norm)\n', (8419, 8430), False, 'from crslab.evaluator.metrics.base import AverageMetric\n'), ((8728, 8752), 'crslab.evaluator.metrics.base.AverageMetric', 'AverageMetric', (['grad_norm'], {}), '(grad_norm)\n', (8741, 8752), False, 'from crslab.evaluator.metrics.base import AverageMetric\n'), ((9676, 9703), 'loguru.logger.info', 'logger.info', (['"""[Early stop]"""'], {}), "('[Early stop]')\n", (9687, 9703), False, 'from loguru import logger\n'), ((3360, 3419), 'crslab.model.get_model', 'get_model', (['opt', "opt['model']", 'self.device', 'vocab', 'side_data'], {}), "(opt, opt['model'], self.device, vocab, side_data)\n", (3369, 3419), False, 'from crslab.model import get_model\n'), ((3518, 3595), 'crslab.model.get_model', 'get_model', (['opt', "opt['rec_model']", 'self.device', "vocab['rec']", "side_data['rec']"], {}), "(opt, opt['rec_model'], self.device, vocab['rec'], side_data['rec'])\n", (3527, 3595), False, 'from crslab.model import get_model\n'), ((3703, 3788), 'crslab.model.get_model', 'get_model', (['opt', "opt['conv_model']", 'self.device', "vocab['conv']", "side_data['conv']"], {}), "(opt, opt['conv_model'], self.device, vocab['conv'], side_data['conv']\n )\n", (3712, 3788), False, 'from crslab.model import get_model\n'), ((3895, 3986), 'crslab.model.get_model', 'get_model', (['opt', "opt['policy_model']", 'self.device', "vocab['policy']", "side_data['policy']"], {}), "(opt, opt['policy_model'], self.device, vocab['policy'], side_data\n ['policy'])\n", (3904, 3986), False, 'from crslab.model import get_model\n')] |
import matplotlib.pyplot as plt
from keras.preprocessing import image
import numpy as np
import keras
import os
from PIL import Image
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization
from keras.models import Model
print("Creating CNN model...")
LETTERSTR = "0123456789ABCDEFGHJKLMNPQRSTUVWXYZ"
LETTERSTR = LETTERSTR.lower()
print(LETTERSTR)
in1 = Input((50, 200, 3))
out = in1
out = Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(out)
out = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Dropout(0.3)(out)
out = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(out)
out = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Dropout(0.3)(out)
out = Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(out)
out = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Dropout(0.3)(out)
out = Conv2D(filters=256, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Flatten()(out)
out = Dropout(0.3)(out)
out = [Dense(34, name='digit1', activation='softmax')(out),\
Dense(34, name='digit2', activation='softmax')(out),\
Dense(34, name='digit3', activation='softmax')(out),\
Dense(34, name='digit4', activation='softmax')(out),\
Dense(34, name='digit5', activation='softmax')(out),\
Dense(34, name='digit6', activation='softmax')(out)]
model = Model(inputs=in1, outputs=out)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
#model_json = model.to_json()
#with open("model.json", "w") as json_file:
# json_file.write(model_json)
model.load_weights("saved_model/last_model.h5")
#model.summary()
temp = []
verify = "./verify/"
i = 0
name = []
for image_path in os.listdir(verify):
img = image.load_img(verify+image_path ,target_size=(50,200))
name.append(image_path)
#print(image_path)
#img = Image.open(verify+image_path)
img = np.asarray(img)/255.0
temp.append(img)
#plt.imshow(img)
#img = np.expand_dims(img, axis=0)
test_data = np.stack(temp)
prediction = model.predict(test_data)
print(name)
#print("output: "+str(prediction))
answer=[]
temp = ""
for i in range(20):
for char in range(6):
temp += LETTERSTR[np.argmax(prediction[char][i])]
answer.append(temp)
temp = ""
print(answer) | [
"keras.layers.Conv2D",
"os.listdir",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"numpy.asarray",
"numpy.argmax",
"numpy.stack",
"keras.layers.Input",
"keras.models.Model",
"keras.layers.Dense",
"keras.layers.BatchNormalization",
"keras.layers.Dropout",
"keras.preprocessing.image.lo... | [((397, 416), 'keras.layers.Input', 'Input', (['(50, 200, 3)'], {}), '((50, 200, 3))\n', (402, 416), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1732, 1762), 'keras.models.Model', 'Model', ([], {'inputs': 'in1', 'outputs': 'out'}), '(inputs=in1, outputs=out)\n', (1737, 1762), False, 'from keras.models import Model\n'), ((2105, 2123), 'os.listdir', 'os.listdir', (['verify'], {}), '(verify)\n', (2115, 2123), False, 'import os\n'), ((2410, 2424), 'numpy.stack', 'np.stack', (['temp'], {}), '(temp)\n', (2418, 2424), True, 'import numpy as np\n'), ((433, 506), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), padding='same', activation='relu')\n", (439, 506), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((518, 575), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), activation='relu')\n", (524, 575), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((587, 607), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (605, 607), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((619, 649), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (631, 649), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((661, 673), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (668, 673), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((685, 758), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), padding='same', activation='relu')\n", (691, 758), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((770, 827), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), activation='relu')\n", (776, 827), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((839, 859), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (857, 859), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((871, 901), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (883, 901), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((913, 925), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (920, 925), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((937, 1011), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=128, kernel_size=(3, 3), padding='same', activation='relu')\n", (943, 1011), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1023, 1081), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=128, kernel_size=(3, 3), activation='relu')\n", (1029, 1081), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1093, 1113), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1111, 1113), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1125, 1155), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1137, 1155), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1167, 1179), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1174, 1179), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1191, 1249), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=256, kernel_size=(3, 3), activation='relu')\n", (1197, 1249), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1261, 1281), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1279, 1281), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1293, 1323), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1305, 1323), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1335, 1344), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1342, 1344), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1356, 1368), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1363, 1368), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((2135, 2193), 'keras.preprocessing.image.load_img', 'image.load_img', (['(verify + image_path)'], {'target_size': '(50, 200)'}), '(verify + image_path, target_size=(50, 200))\n', (2149, 2193), False, 'from keras.preprocessing import image\n'), ((1381, 1427), 'keras.layers.Dense', 'Dense', (['(34)'], {'name': '"""digit1"""', 'activation': '"""softmax"""'}), "(34, name='digit1', activation='softmax')\n", (1386, 1427), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1439, 1485), 'keras.layers.Dense', 'Dense', (['(34)'], {'name': '"""digit2"""', 'activation': '"""softmax"""'}), "(34, name='digit2', activation='softmax')\n", (1444, 1485), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1497, 1543), 'keras.layers.Dense', 'Dense', (['(34)'], {'name': '"""digit3"""', 'activation': '"""softmax"""'}), "(34, name='digit3', activation='softmax')\n", (1502, 1543), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1555, 1601), 'keras.layers.Dense', 'Dense', (['(34)'], {'name': '"""digit4"""', 'activation': '"""softmax"""'}), "(34, name='digit4', activation='softmax')\n", (1560, 1601), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1613, 1659), 'keras.layers.Dense', 'Dense', (['(34)'], {'name': '"""digit5"""', 'activation': '"""softmax"""'}), "(34, name='digit5', activation='softmax')\n", (1618, 1659), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((1671, 1717), 'keras.layers.Dense', 'Dense', (['(34)'], {'name': '"""digit6"""', 'activation': '"""softmax"""'}), "(34, name='digit6', activation='softmax')\n", (1676, 1717), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization\n'), ((2293, 2308), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (2303, 2308), True, 'import numpy as np\n'), ((2602, 2632), 'numpy.argmax', 'np.argmax', (['prediction[char][i]'], {}), '(prediction[char][i])\n', (2611, 2632), True, 'import numpy as np\n')] |
"""transpose of a matrix"""
import numpy
import csv
import pandas
dir_input=r'C:/Users/user/Desktop/input_data.csv'
dir_output=r'C:/Users/user/Desktop/output_data.csv'
df = pandas.read_csv(f'{dir_input}',encoding='UTF-8', header=None) #No index
df = df.values #'numpy.ndarray' #[1:,1:,]#index
df = df[1:3,1:3]#snip
df = numpy.where(df > 3, 1, -1)#if
df = pandas.DataFrame(df)
df.to_csv(f'{dir_output}',index=False,header=None)
"""transpose of a matrix"""
def matrix_transposed():
print(__doc__)
import numpy as np
import csv
A=[1,2,3]
B=[4,5,6]
C=[7,8,9]
D=[A,B,C]
print(D) #[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
DT= np.array(D).T.tolist()
print(DT)#[[1, 4, 7], [2, 5, 8], [3, 6, 9]]
with open('data.csv', 'w') as file:
writer = csv.writer(file, lineterminator='\n')
writer.writerows(DT) | [
"pandas.read_csv",
"numpy.where",
"csv.writer",
"numpy.array",
"pandas.DataFrame"
] | [((179, 241), 'pandas.read_csv', 'pandas.read_csv', (['f"""{dir_input}"""'], {'encoding': '"""UTF-8"""', 'header': 'None'}), "(f'{dir_input}', encoding='UTF-8', header=None)\n", (194, 241), False, 'import pandas\n'), ((334, 360), 'numpy.where', 'numpy.where', (['(df > 3)', '(1)', '(-1)'], {}), '(df > 3, 1, -1)\n', (345, 360), False, 'import numpy\n'), ((370, 390), 'pandas.DataFrame', 'pandas.DataFrame', (['df'], {}), '(df)\n', (386, 390), False, 'import pandas\n'), ((811, 848), 'csv.writer', 'csv.writer', (['file'], {'lineterminator': '"""\n"""'}), "(file, lineterminator='\\n')\n", (821, 848), False, 'import csv\n'), ((680, 691), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (688, 691), True, 'import numpy as np\n')] |
import os
from os import listdir
from tqdm import tqdm
import json
from collections import Counter
import itertools
import random
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
import tensorflow as tf
import tensorflow_hub as hub
import networkx as nx
def get_papers_dict(areas:list, year_start:int, year_end:int, path_2_unpacked:str = './unpacked') -> dict:
"""
Return dictionary of papers in form id: [properties].
"""
papers_dict = {}
files = [f for f in listdir(path_2_unpacked)]
areas = set(areas)
for j in tqdm(range(len(files))):
with open(path_2_unpacked + '/' + files[j]) as f:
lines = f.readlines()
cs_papers_local = []
for i in (range(len(lines))):
paper = json.loads(lines[i])
if not paper["year"]:
continue
if len(set(paper["fieldsOfStudy"]).intersection(areas)) > 0 \
and paper["year"] >= year_start \
and paper["year"] <= year_end \
and len(paper["inCitations"]) > 0 \
and len(paper["outCitations"]) > 0 \
and len(paper["doi"]) > 0 \
and len(paper["paperAbstract"]) > 0 \
and len(paper["title"]) > 0 \
and len(paper["journalName"]) > 0:
papers_dict[paper["id"]] = paper
cs_papers_local.append(paper)
return papers_dict
def get_edge_list(papers_dict:dict) -> list:
edge_list = []
for paper_id in tqdm(papers_dict):
paper = papers_dict[paper_id]
paper_cit = paper['outCitations']
for j in range(len(paper_cit)):
if (paper_cit[j] in papers_dict):
edge_list.append([paper_id, paper_cit[j]])
return edge_list
def get_data(papers_dict:dict, edge_list:list, dataset_name:str) -> list:
no_id_counter = 0
edge_dict = {} # keys -- edge list (author_1_id, author_2_id), values -- corresponding papers ids
authors_dict = {} # keys -- author_id, values -- papers ids
authors_interests = {} # keys -- author_id, values -- papers ids
for paper_id in tqdm(papers_dict):
paper = papers_dict[paper_id]
itertools.permutations(paper["authors"], 2)
ids = []
for author in paper["authors"]:
if len(author['ids']) == 1:
author_id = author['ids'][0]
ids.append(author_id)
areas = paper['fieldsOfStudy']
if author_id in authors_dict:
authors_dict[author_id][1].add(paper_id)
for area in areas:
authors_interests[author_id][1].add(area)
else:
authors_dict[author_id] = [author['name'], {paper_id}]
authors_interests[author_id] = [author_id, set()]
for area in areas:
authors_interests[author_id][1].add(area)
else:
no_id_counter += 1
authors_pairs = list(itertools.combinations(ids, 2))
for i in range(len(authors_pairs)):
if authors_pairs[i] in edge_dict:
edge_dict[authors_pairs[i]].append(paper_id)
else:
edge_dict[authors_pairs[i]] = [paper_id]
authors_interests_list = list(authors_interests.values())
df = pd.DataFrame(np.array(authors_interests_list), columns = ["author_id", "interests"])
try:
os.mkdir("processed_data")
except:
pass
papers_df = pd.DataFrame(list(papers_dict.values()))
papers_features = papers_df.drop(["inCitations", "outCitations"], axis = 1)
papers_features.to_csv("processed_data/" + dataset_name + "_papers_features.csv")
authors_features = df.drop('interests', 1).join(df.interests.str.join('|').str.get_dummies())
authors_features.to_csv("processed_data/" + dataset_name + "_authors_features.csv")
edge_dict_values = list(edge_dict.values())
authors_papers = pd.DataFrame(np.array(edge_dict_values), columns = ["papers_ids"])
authors_papers.to_csv("processed_data/" + dataset_name + "_authors_edges_papers.csv")
edge_dict_keys = list(edge_dict.keys())
authors_edges = pd.DataFrame(edge_dict_keys, columns = ["from", "to"])
authors_edges.to_csv("processed_data/" + dataset_name + "_authors_edge_list.csv")
papers_edges = pd.DataFrame(edge_list, columns = ["from", "to"])
papers_edges.to_csv("processed_data/" + dataset_name + "_papers_edge_list.csv")
return [papers_features, authors_features, authors_papers, authors_edges, papers_edges]
def parse_global_dataset(areas, year_start, year_end, dataset_name:str = "test_dataset") -> list:
papers_dict = get_papers_dict(areas, year_start, year_end)
edge_list = get_edge_list(papers_dict)
global_dataset = get_data(papers_dict, edge_list, dataset_name)
return global_dataset
def preprocessing(global_dataset:list, dataset_name:str = "test_dataset") -> list:
papers_features, authors_features, authors_papers, authors_edges, papers_edges = global_dataset
authors = []
papers_id = papers_features["id"]
id_to_index_id = {papers_id[i]: i for i in tqdm(range(len(papers_id)))}
authors_papers_unzipped = authors_papers["papers_ids"]
authors_papers_indexed = [
[
id_to_index_id[authors_papers_unzipped[i][j]]
for j in range(len(authors_papers_unzipped[i]))
]
for i in tqdm(range(len(authors_papers_unzipped)))
]
authors_papers_indexed_str = [
str(authors_papers_indexed[i]) for i in tqdm(range(len(authors_papers_indexed)))
]
authors_edges_papers_indices = pd.DataFrame(authors_papers_indexed_str, columns=["papers_indices"])
authors_edges_papers_indices.to_csv(
"processed_data/" + dataset_name + "_authors_edges_papers_indices.csv"
)
df = papers_features[
papers_features[
["id", "title", "paperAbstract", "year", "journalName", "fieldsOfStudy"]
].notna()
]
papers_features_abstracts = list(papers_features["paperAbstract"])
papers_features_abstracts = [
str(papers_features_abstracts[i]) for i in range(len(papers_features_abstracts))
]
papers_features["paperAbstract"] = papers_features["paperAbstract"].fillna(
"No abstract provided"
)
model = hub.load("https://tfhub.dev/google/universal-sentence-encoder/4")
vectorized_abstracts = []
for i in tqdm(range(len(papers_features_abstracts))):
abstract = papers_features_abstracts[i]
vectorized_abstracts.append(model([abstract])[0])
vectorized_abstracts_list = [
vectorized_abstracts[i].numpy() for i in tqdm(range(len(vectorized_abstracts)))
]
vectorized_abstracts_df = pd.DataFrame(vectorized_abstracts_list)
print('PCA started its work.')
pca = PCA(n_components=32)
pca_result = pca.fit_transform(vectorized_abstracts_df)
print('PCA ended its work.')
compressed_paper_features = pd.DataFrame(pca_result)
compressed_paper_features.to_csv(
"processed_data/" + dataset_name + "_papers_features_vectorized_compressed_32.csv"
)
papers_edge_list_indexed = papers_edges.values
for i in tqdm(range(len(papers_edge_list_indexed))):
pair = papers_edge_list_indexed[i]
for j in range(len(pair)):
pair[j] = id_to_index_id[pair[j]]
papers_edge_list_indexed_np = pd.DataFrame(papers_edge_list_indexed)
papers_edge_list_indexed_np.to_csv(
"processed_data/" + dataset_name + "_papers_edge_list_indexed.csv"
)
return [authors_edges_papers_indices, compressed_paper_features, papers_edge_list_indexed_np]
def extract_subgraph(global_dataset:list, processed_data:list, subgraph_name:str, nodes_number:int = 1000):
def get_nx_graph(edge_list):
aev = edge_list.values
edge_to_index = {(aev[i][0], aev[i][1]): i for i in tqdm(range(len(aev)))}
edges_list_t = list(edge_to_index.keys())
return edge_to_index, nx.DiGraph((x, y) for (x, y) in tqdm(Counter(edges_list_t)))
def get_subraph(N, source: int, depth_limit: int = 4):
nodes = list(nx.dfs_preorder_nodes(N, source=source, depth_limit=depth_limit))
H = N.subgraph(nodes)
return H
authors_edges_papers, compressed_paper_features, papers_edge_list_indexed_np = processed_data
papers_features, authors_features, authors_papers, authors_edges, papers_edges = global_dataset
edge_to_index_A, A = get_nx_graph(authors_edges)
edge_to_index_G, G = get_nx_graph(papers_edge_list_indexed_np)
try:
authors_edges_papers['papers_indices'] = authors_edges_papers['papers_indices'].apply(lambda x: x.replace('[', '').replace(']', '').split(','))
except:
pass
depth_limit, ready_flag, sub_A = 3, 0, ""
for i in range(depth_limit, 15):
if ready_flag == 0:
for i in range(10):
source = random.choice(list(A.nodes()))
sub_A = get_subraph(A, source, depth_limit=i)
if len(sub_A.nodes) >= nodes_number:
ready_flag = 1
else:
break
print(len(sub_A.nodes), len(sub_A.edges))
sub_A_edges = list(sub_A.edges())
authors_edges_papers_sub = [
authors_edges_papers["papers_indices"][edge_to_index_A[sub_A_edges[i]]]
for i in tqdm(range(len(sub_A_edges)))
]
authors_edges_papers_sub_flat = [
int(item) for subarray in authors_edges_papers_sub for item in subarray
]
unique_papers = list(set(authors_edges_papers_sub_flat))
papers_to_delete_initial = list(set(unique_papers) - set(G.nodes))
G_sub = G.subgraph(unique_papers)
G_sub_nodes = list(G_sub.nodes())
papers_out_lcc = papers_to_delete_initial
collabs_indices_to_delete = []
for i in tqdm(range(len(papers_out_lcc))):
for j in range(len(authors_edges_papers_sub)):
# if str(1745104) in authors_edges_papers_sub[j]:
# jj.append(j)
if str(papers_out_lcc[i]) in authors_edges_papers_sub[j]:
del authors_edges_papers_sub[j][
authors_edges_papers_sub[j].index(str(papers_out_lcc[i]))
]
if len(authors_edges_papers_sub[j]) == 0:
collabs_indices_to_delete.append(j)
A_sub_clear = nx.DiGraph(sub_A)
A_sub_clear_edges = list(A_sub_clear.edges())
for i in tqdm(range(len(collabs_indices_to_delete))):
edge = A_sub_clear_edges[collabs_indices_to_delete[i]]
if edge not in A_sub_clear_edges:
print("error")
A_sub_clear.remove_edge(*edge)
authors_edges_papers_sub_clear = [
authors_edges_papers_sub[i]
for i in range(len(authors_edges_papers_sub))
if len(authors_edges_papers_sub[i]) > 0
]
A_sub_clear_edges_check = list(A_sub_clear.edges())
authors_edges_papers_sub_2 = [
authors_edges_papers["papers_indices"][edge_to_index_A[A_sub_clear_edges_check[i]]]
for i in tqdm(range(len(A_sub_clear_edges_check)))
]
authors_edges_papers_sub_2 = [
authors_edges_papers["papers_indices"][edge_to_index_A[A_sub_clear_edges_check[i]]]
for i in tqdm(range(len(A_sub_clear_edges_check)))
]
authors_edges_papers_sub_flat_2 = [
int(item) for subarray in authors_edges_papers_sub_2 for item in subarray
]
unique_papers_2 = list(set(authors_edges_papers_sub_flat_2))
G_sub_clear = G_sub
try:
os.mkdir('datasets')
except:
pass
try:
os.mkdir('datasets/' + subgraph_name)
except:
pass
nx.write_edgelist(
G_sub_clear,
"datasets/" + subgraph_name + "/" + subgraph_name + "_" + "papers.edgelist",
)
nx.write_edgelist(
A_sub_clear,
"datasets/" + subgraph_name + "/" + subgraph_name + "_" + "authors.edgelist",
)
authors_edges_papers.to_csv(
"datasets/"
+ subgraph_name
+ "/"
+ subgraph_name
+ "_"
+ "authors_edges_papers_indices.csv"
) | [
"json.loads",
"os.listdir",
"sklearn.decomposition.PCA",
"networkx.dfs_preorder_nodes",
"networkx.DiGraph",
"tqdm.tqdm",
"tensorflow_hub.load",
"itertools.combinations",
"networkx.write_edgelist",
"numpy.array",
"collections.Counter",
"os.mkdir",
"pandas.DataFrame",
"itertools.permutations... | [((1511, 1528), 'tqdm.tqdm', 'tqdm', (['papers_dict'], {}), '(papers_dict)\n', (1515, 1528), False, 'from tqdm import tqdm\n'), ((2144, 2161), 'tqdm.tqdm', 'tqdm', (['papers_dict'], {}), '(papers_dict)\n', (2148, 2161), False, 'from tqdm import tqdm\n'), ((4259, 4311), 'pandas.DataFrame', 'pd.DataFrame', (['edge_dict_keys'], {'columns': "['from', 'to']"}), "(edge_dict_keys, columns=['from', 'to'])\n", (4271, 4311), True, 'import pandas as pd\n'), ((4424, 4471), 'pandas.DataFrame', 'pd.DataFrame', (['edge_list'], {'columns': "['from', 'to']"}), "(edge_list, columns=['from', 'to'])\n", (4436, 4471), True, 'import pandas as pd\n'), ((5737, 5805), 'pandas.DataFrame', 'pd.DataFrame', (['authors_papers_indexed_str'], {'columns': "['papers_indices']"}), "(authors_papers_indexed_str, columns=['papers_indices'])\n", (5749, 5805), True, 'import pandas as pd\n'), ((6429, 6494), 'tensorflow_hub.load', 'hub.load', (['"""https://tfhub.dev/google/universal-sentence-encoder/4"""'], {}), "('https://tfhub.dev/google/universal-sentence-encoder/4')\n", (6437, 6494), True, 'import tensorflow_hub as hub\n'), ((6849, 6888), 'pandas.DataFrame', 'pd.DataFrame', (['vectorized_abstracts_list'], {}), '(vectorized_abstracts_list)\n', (6861, 6888), True, 'import pandas as pd\n'), ((6935, 6955), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(32)'}), '(n_components=32)\n', (6938, 6955), False, 'from sklearn.decomposition import PCA\n'), ((7086, 7110), 'pandas.DataFrame', 'pd.DataFrame', (['pca_result'], {}), '(pca_result)\n', (7098, 7110), True, 'import pandas as pd\n'), ((7514, 7552), 'pandas.DataFrame', 'pd.DataFrame', (['papers_edge_list_indexed'], {}), '(papers_edge_list_indexed)\n', (7526, 7552), True, 'import pandas as pd\n'), ((10533, 10550), 'networkx.DiGraph', 'nx.DiGraph', (['sub_A'], {}), '(sub_A)\n', (10543, 10550), True, 'import networkx as nx\n'), ((11840, 11951), 'networkx.write_edgelist', 'nx.write_edgelist', (['G_sub_clear', "('datasets/' + subgraph_name + '/' + subgraph_name + '_' + 'papers.edgelist')"], {}), "(G_sub_clear, 'datasets/' + subgraph_name + '/' +\n subgraph_name + '_' + 'papers.edgelist')\n", (11857, 11951), True, 'import networkx as nx\n'), ((11976, 12088), 'networkx.write_edgelist', 'nx.write_edgelist', (['A_sub_clear', "('datasets/' + subgraph_name + '/' + subgraph_name + '_' + 'authors.edgelist')"], {}), "(A_sub_clear, 'datasets/' + subgraph_name + '/' +\n subgraph_name + '_' + 'authors.edgelist')\n", (11993, 12088), True, 'import networkx as nx\n'), ((2209, 2252), 'itertools.permutations', 'itertools.permutations', (["paper['authors']", '(2)'], {}), "(paper['authors'], 2)\n", (2231, 2252), False, 'import itertools\n'), ((3393, 3425), 'numpy.array', 'np.array', (['authors_interests_list'], {}), '(authors_interests_list)\n', (3401, 3425), True, 'import numpy as np\n'), ((3487, 3513), 'os.mkdir', 'os.mkdir', (['"""processed_data"""'], {}), "('processed_data')\n", (3495, 3513), False, 'import os\n'), ((4046, 4072), 'numpy.array', 'np.array', (['edge_dict_values'], {}), '(edge_dict_values)\n', (4054, 4072), True, 'import numpy as np\n'), ((11700, 11720), 'os.mkdir', 'os.mkdir', (['"""datasets"""'], {}), "('datasets')\n", (11708, 11720), False, 'import os\n'), ((11768, 11805), 'os.mkdir', 'os.mkdir', (["('datasets/' + subgraph_name)"], {}), "('datasets/' + subgraph_name)\n", (11776, 11805), False, 'import os\n'), ((517, 541), 'os.listdir', 'listdir', (['path_2_unpacked'], {}), '(path_2_unpacked)\n', (524, 541), False, 'from os import listdir\n'), ((788, 808), 'json.loads', 'json.loads', (['lines[i]'], {}), '(lines[i])\n', (798, 808), False, 'import json\n'), ((3046, 3076), 'itertools.combinations', 'itertools.combinations', (['ids', '(2)'], {}), '(ids, 2)\n', (3068, 3076), False, 'import itertools\n'), ((8262, 8326), 'networkx.dfs_preorder_nodes', 'nx.dfs_preorder_nodes', (['N'], {'source': 'source', 'depth_limit': 'depth_limit'}), '(N, source=source, depth_limit=depth_limit)\n', (8283, 8326), True, 'import networkx as nx\n'), ((8156, 8177), 'collections.Counter', 'Counter', (['edges_list_t'], {}), '(edges_list_t)\n', (8163, 8177), False, 'from collections import Counter\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 20:45:41 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn import svm
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from sklearn.metrics import (accuracy_score, precision_score, recall_score)
#import pickle # Nos sirve para evitar correr nuestro código y que tarde mucho
#%% Leer información
data = pd.read_csv('../Data/dataset_1_b.csv')
################################### EJ 1 ######################################
#%%
inercias=np.zeros(10)
for k in np.arange(10)+1:
modelo_KM=KMeans(n_clusters=k,init='k-means++',random_state=0)
modelo_KM=modelo_KM.fit(data)
inercias[k-1]=modelo_KM.inertia_
plt.plot(np.arange(1,11),inercias)
plt.xlabel('Num grupos')
plt.label('Inercias')
plt.show()
#%%
modelo_KM=KMeans(n_clusters=5,init='k-means++',random_state=0)
modelo_KM=modelo_KM.fit(data)
grupos=modelo_KM.predict(data)
##################################### EJ 2 ####################################
#%%
data2 = pd.read_csv('../Data/dataset_2_b.csv')
Class = data2['Class']
dummies= pd.get_dummies(data2['Class'])
data2 = data2.drop('Class',axis=1)
data2=data2.join(dummies)
data2 = (data2-np.mean(data2))/data2.std()
#%%
inercias=np.zeros(10)
for k in np.arange(10)+1:
modelo_KM=KMeans(n_clusters=k,init='k-means++',random_state=0)
modelo_KM=modelo_KM.fit(data2)
inercias[k-1]=modelo_KM.inertia_
plt.plot(np.arange(1,11),inercias)
plt.xlabel('Num grupos')
plt.label('Inercias')
plt.show()
#%%
modelo_KM=KMeans(n_clusters=3,init='k-means++',random_state=0)
modelo_KM=modelo_KM.fit(data2)
grupos=modelo_KM.predict(data2)
plt.scatter(data2.V1,data2.V2,c=grupos)
###################################### EJ 3 ###################################
###################################### RL ###################################
#%%
data3 = pd.read_csv('../Data/dataset_4_b.csv',header=None)
#%% Separar datos de entranamiento y prueba
X=data3.iloc[:,:-1]
Y=data3.iloc[:,-1]
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.3,random_state=0)
#%%
ngrado = 2
poly = PolynomialFeatures(ngrado)
Xa_train = poly.fit_transform(X_train)
Xa_test = poly.fit_transform(X_test)
modelo = linear_model.LogisticRegression()
modelo.fit(Xa_train,Y_train)
Yhat_test = modelo.predict(Xa_test)
#%%
dummies1 = pd.get_dummies(Y_test)
dummies2 = pd.get_dummies(Yhat_test)
y1=dummies1[0]
y2=dummies1[1]
y3=dummies1[2]
Yhat1= dummies2[0]
Yhat2= dummies2[1]
Yhat3= dummies2[2]
accuracy =(np.mean([accuracy_score(y1,Yhat1),accuracy_score(y2,Yhat2),accuracy_score(y3,Yhat3)]))
precision=(np.mean([precision_score(y1,Yhat1),precision_score(y2,Yhat2),precision_score(y3,Yhat3)]))
recall= (np.mean([recall_score(y1,Yhat1),recall_score(y2,Yhat2),recall_score(y3,Yhat3)]))
plt.bar(['Accu','Prec','Rec'],[accuracy,precision,recall])
print(accuracy,precision,recall)
#%%
###################################### SVM ####################################
#%%
x=data3.iloc[:,:-1]
y=data3.iloc[:,-1]
#%% Crear el modelo SVC. Clasificador de vector soporte.
modelo = svm.SVC(kernel= 'rbf')
modelo.fit(x,y)
Yhat=modelo.predict(x) # Aquí comparamos los datos con el accuracy, recall etc
#%% Medir precisión del modelo clasificador mediante dummies.
dummies1 = pd.get_dummies(y)
dummies2 = pd.get_dummies(Yhat)
# Se va a medir la precisión columna a columna y se hara un promedio de las 3 precisiones
y1=dummies1[0]
y2=dummies1[1]
y3=dummies1[2]
Yhat1= dummies2[0]
Yhat2= dummies2[1]
Yhat3= dummies2[2]
accuracy =(np.mean([accuracy_score(y1,Yhat1),accuracy_score(y2,Yhat2),accuracy_score(y3,Yhat3)]))
precision=(np.mean([precision_score(y1,Yhat1),precision_score(y2,Yhat2),precision_score(y3,Yhat3)]))
recall= (np.mean([recall_score(y1,Yhat1),recall_score(y2,Yhat2),recall_score(y3,Yhat3)]))
plt.bar(['Accu','Prec','Rec'],[accuracy,precision,recall])
print(accuracy,precision,recall)
#%%
plt.scatter(x.iloc[:,0],x.iloc[:,1],c=Yhat)
plt.show()
plt.scatter(x.iloc[:,0],x.iloc[:,2],c=Yhat)
plt.show()
plt.scatter(x.iloc[:,1],x.iloc[:,2],c=Yhat)
plt.show() | [
"sklearn.cluster.KMeans",
"numpy.mean",
"sklearn.svm.SVC",
"sklearn.preprocessing.PolynomialFeatures",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.xlabel",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_sc... | [((556, 594), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/dataset_1_b.csv"""'], {}), "('../Data/dataset_1_b.csv')\n", (567, 594), True, 'import pandas as pd\n'), ((695, 707), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (703, 707), True, 'import numpy as np\n'), ((921, 945), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Num grupos"""'], {}), "('Num grupos')\n", (931, 945), True, 'import matplotlib.pyplot as plt\n'), ((947, 968), 'matplotlib.pyplot.label', 'plt.label', (['"""Inercias"""'], {}), "('Inercias')\n", (956, 968), True, 'import matplotlib.pyplot as plt\n'), ((970, 980), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (978, 980), True, 'import matplotlib.pyplot as plt\n'), ((999, 1053), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(5)', 'init': '"""k-means++"""', 'random_state': '(0)'}), "(n_clusters=5, init='k-means++', random_state=0)\n", (1005, 1053), False, 'from sklearn.cluster import KMeans\n'), ((1217, 1255), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/dataset_2_b.csv"""'], {}), "('../Data/dataset_2_b.csv')\n", (1228, 1255), True, 'import pandas as pd\n'), ((1290, 1320), 'pandas.get_dummies', 'pd.get_dummies', (["data2['Class']"], {}), "(data2['Class'])\n", (1304, 1320), True, 'import pandas as pd\n'), ((1447, 1459), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1455, 1459), True, 'import numpy as np\n'), ((1674, 1698), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Num grupos"""'], {}), "('Num grupos')\n", (1684, 1698), True, 'import matplotlib.pyplot as plt\n'), ((1700, 1721), 'matplotlib.pyplot.label', 'plt.label', (['"""Inercias"""'], {}), "('Inercias')\n", (1709, 1721), True, 'import matplotlib.pyplot as plt\n'), ((1723, 1733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1731, 1733), True, 'import matplotlib.pyplot as plt\n'), ((1754, 1808), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)', 'init': '"""k-means++"""', 'random_state': '(0)'}), "(n_clusters=3, init='k-means++', random_state=0)\n", (1760, 1808), False, 'from sklearn.cluster import KMeans\n'), ((1877, 1918), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data2.V1', 'data2.V2'], {'c': 'grupos'}), '(data2.V1, data2.V2, c=grupos)\n', (1888, 1918), True, 'import matplotlib.pyplot as plt\n'), ((2125, 2176), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/dataset_4_b.csv"""'], {'header': 'None'}), "('../Data/dataset_4_b.csv', header=None)\n", (2136, 2176), True, 'import pandas as pd\n'), ((2297, 2350), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, Y, test_size=0.3, random_state=0)\n', (2313, 2350), False, 'from sklearn.model_selection import train_test_split\n'), ((2381, 2407), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['ngrado'], {}), '(ngrado)\n', (2399, 2407), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((2496, 2529), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {}), '()\n', (2527, 2529), False, 'from sklearn import linear_model\n'), ((2616, 2638), 'pandas.get_dummies', 'pd.get_dummies', (['Y_test'], {}), '(Y_test)\n', (2630, 2638), True, 'import pandas as pd\n'), ((2651, 2676), 'pandas.get_dummies', 'pd.get_dummies', (['Yhat_test'], {}), '(Yhat_test)\n', (2665, 2676), True, 'import pandas as pd\n'), ((3082, 3145), 'matplotlib.pyplot.bar', 'plt.bar', (["['Accu', 'Prec', 'Rec']", '[accuracy, precision, recall]'], {}), "(['Accu', 'Prec', 'Rec'], [accuracy, precision, recall])\n", (3089, 3145), True, 'import matplotlib.pyplot as plt\n'), ((3388, 3409), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (3395, 3409), False, 'from sklearn import svm\n'), ((3585, 3602), 'pandas.get_dummies', 'pd.get_dummies', (['y'], {}), '(y)\n', (3599, 3602), True, 'import pandas as pd\n'), ((3615, 3635), 'pandas.get_dummies', 'pd.get_dummies', (['Yhat'], {}), '(Yhat)\n', (3629, 3635), True, 'import pandas as pd\n'), ((4130, 4193), 'matplotlib.pyplot.bar', 'plt.bar', (["['Accu', 'Prec', 'Rec']", '[accuracy, precision, recall]'], {}), "(['Accu', 'Prec', 'Rec'], [accuracy, precision, recall])\n", (4137, 4193), True, 'import matplotlib.pyplot as plt\n'), ((4233, 4280), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x.iloc[:, 0]', 'x.iloc[:, 1]'], {'c': 'Yhat'}), '(x.iloc[:, 0], x.iloc[:, 1], c=Yhat)\n', (4244, 4280), True, 'import matplotlib.pyplot as plt\n'), ((4278, 4288), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4286, 4288), True, 'import matplotlib.pyplot as plt\n'), ((4290, 4337), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x.iloc[:, 0]', 'x.iloc[:, 2]'], {'c': 'Yhat'}), '(x.iloc[:, 0], x.iloc[:, 2], c=Yhat)\n', (4301, 4337), True, 'import matplotlib.pyplot as plt\n'), ((4335, 4345), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4343, 4345), True, 'import matplotlib.pyplot as plt\n'), ((4347, 4394), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x.iloc[:, 1]', 'x.iloc[:, 2]'], {'c': 'Yhat'}), '(x.iloc[:, 1], x.iloc[:, 2], c=Yhat)\n', (4358, 4394), True, 'import matplotlib.pyplot as plt\n'), ((4392, 4402), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4400, 4402), True, 'import matplotlib.pyplot as plt\n'), ((720, 733), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (729, 733), True, 'import numpy as np\n'), ((752, 806), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'init': '"""k-means++"""', 'random_state': '(0)'}), "(n_clusters=k, init='k-means++', random_state=0)\n", (758, 806), False, 'from sklearn.cluster import KMeans\n'), ((894, 910), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (903, 910), True, 'import numpy as np\n'), ((1472, 1485), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1481, 1485), True, 'import numpy as np\n'), ((1504, 1558), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'init': '"""k-means++"""', 'random_state': '(0)'}), "(n_clusters=k, init='k-means++', random_state=0)\n", (1510, 1558), False, 'from sklearn.cluster import KMeans\n'), ((1647, 1663), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (1656, 1663), True, 'import numpy as np\n'), ((1402, 1416), 'numpy.mean', 'np.mean', (['data2'], {}), '(data2)\n', (1409, 1416), True, 'import numpy as np\n'), ((2808, 2833), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y1', 'Yhat1'], {}), '(y1, Yhat1)\n', (2822, 2833), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((2833, 2858), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y2', 'Yhat2'], {}), '(y2, Yhat2)\n', (2847, 2858), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((2858, 2883), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y3', 'Yhat3'], {}), '(y3, Yhat3)\n', (2872, 2883), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((2907, 2933), 'sklearn.metrics.precision_score', 'precision_score', (['y1', 'Yhat1'], {}), '(y1, Yhat1)\n', (2922, 2933), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((2933, 2959), 'sklearn.metrics.precision_score', 'precision_score', (['y2', 'Yhat2'], {}), '(y2, Yhat2)\n', (2948, 2959), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((2959, 2985), 'sklearn.metrics.precision_score', 'precision_score', (['y3', 'Yhat3'], {}), '(y3, Yhat3)\n', (2974, 2985), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((3007, 3030), 'sklearn.metrics.recall_score', 'recall_score', (['y1', 'Yhat1'], {}), '(y1, Yhat1)\n', (3019, 3030), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((3030, 3053), 'sklearn.metrics.recall_score', 'recall_score', (['y2', 'Yhat2'], {}), '(y2, Yhat2)\n', (3042, 3053), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((3053, 3076), 'sklearn.metrics.recall_score', 'recall_score', (['y3', 'Yhat3'], {}), '(y3, Yhat3)\n', (3065, 3076), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((3856, 3881), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y1', 'Yhat1'], {}), '(y1, Yhat1)\n', (3870, 3881), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((3881, 3906), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y2', 'Yhat2'], {}), '(y2, Yhat2)\n', (3895, 3906), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((3906, 3931), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y3', 'Yhat3'], {}), '(y3, Yhat3)\n', (3920, 3931), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((3955, 3981), 'sklearn.metrics.precision_score', 'precision_score', (['y1', 'Yhat1'], {}), '(y1, Yhat1)\n', (3970, 3981), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((3981, 4007), 'sklearn.metrics.precision_score', 'precision_score', (['y2', 'Yhat2'], {}), '(y2, Yhat2)\n', (3996, 4007), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((4007, 4033), 'sklearn.metrics.precision_score', 'precision_score', (['y3', 'Yhat3'], {}), '(y3, Yhat3)\n', (4022, 4033), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((4055, 4078), 'sklearn.metrics.recall_score', 'recall_score', (['y1', 'Yhat1'], {}), '(y1, Yhat1)\n', (4067, 4078), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((4078, 4101), 'sklearn.metrics.recall_score', 'recall_score', (['y2', 'Yhat2'], {}), '(y2, Yhat2)\n', (4090, 4101), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((4101, 4124), 'sklearn.metrics.recall_score', 'recall_score', (['y3', 'Yhat3'], {}), '(y3, Yhat3)\n', (4113, 4124), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n')] |
import numpy as np
from environment.base_environment import BaseEnvironment
class L2Environment(BaseEnvironment):
def reward(self, state, action):
return 1 / (1 + np.sqrt((self.end_pos[0] - state[0]) ** 2 + (self.end_pos[1] - state[1]) ** 2)) | [
"numpy.sqrt"
] | [((177, 255), 'numpy.sqrt', 'np.sqrt', (['((self.end_pos[0] - state[0]) ** 2 + (self.end_pos[1] - state[1]) ** 2)'], {}), '((self.end_pos[0] - state[0]) ** 2 + (self.end_pos[1] - state[1]) ** 2)\n', (184, 255), True, 'import numpy as np\n')] |
import wx, numpy, re
# The recommended way to use wx with mpl is with the WXAgg backend.
import matplotlib
matplotlib.use('WXAgg')
import matplotlib.pyplot as plt
import scipy.spatial.distance
import matplotlib.colors
from matplotlib.text import Text
from matplotlib.patches import Ellipse
from matplotlib.patches import Polygon
from ..reremi.classQuery import SYM, Query
from ..reremi.classRedescription import Redescription
from ..reremi.classSParts import SSetts
from classInterObjects import MaskCreator
import pdb
class DrawerBasis(object):
wait_delay = 300
max_emphlbl = 5
ann_xy = (-10, 15)
info_dets = {"px": 0, "py":0, "dx": 10, "dy":10, "va":"bottom", "ha": "left", "ec": "#111111", "alpha": .6}
ltids_map = {1: "binary", 2: "spectral", 3: "viridis"}
@classmethod
def getCMap(tcl, ltid):
return plt.get_cmap(tcl.ltids_map.get(ltid, "jet"))
def __init__(self, view):
self.view = view
self.call_wait = None
self.elements = {"active_info": False, "act_butt": [1]}
self.initPlot()
self.plot_void()
def initPlot(self):
self.setAxe(self.getFigure().add_subplot( 111 ))
def draw(self):
self.getLayH().draw()
def setFocus(self):
self.getLayH().setFocus()
def getVecAndDets(self, inter_params=None):
vec = self.getPltDtH().getVec()
vec_dets = self.getPltDtH().getVecDets()
return vec, vec_dets
def update(self, more=None):
if self.view.wasKilled():
return
if self.isReadyPlot():
self.clearPlot()
self.makeBackground()
inter_params = self.getParamsInter()
vec, vec_dets = self.getVecAndDets(inter_params)
draw_settings = self.getDrawSettings()
selected = self.getPltDtH().getUnvizRows()
x0, x1, y0, y1 = self.getAxisLims()
bx, by = (x1-x0)/100.0, (y1-y0)/100.0
corners = (x0, x1, y0, y1, bx, by)
if self.getPltDtH().isSingleVar():
self.dots_draws, mapper = self.prepareSingleVarDots(vec, vec_dets, draw_settings)
else:
self.dots_draws = self.prepareEntitiesDots(vec, vec_dets, draw_settings)
mapper = None
if len(selected) > 0:
selp = inter_params.get("slide_opac", 50)/100.
self.dots_draws["fc_dots"][numpy.array(list(selected)), -1] *= selp
self.dots_draws["ec_dots"][numpy.array(list(selected)), -1] *= selp
draw_indices = numpy.where(self.dots_draws["draw_dots"])[0]
if self.plotSimple(): ## #### NO PICKER, FASTER PLOTTING.
self.plotDotsSimple(self.getAxe(), self.dots_draws, draw_indices, draw_settings)
else:
self.plotDotsPoly(self.getAxe(), self.dots_draws, draw_indices, draw_settings)
if mapper is not None:
corners = self.plotMapperHist(self.axe, vec, vec_dets, mapper, self.NBBINS, corners, draw_settings)
self.makeFinish(corners[:4], corners[4:])
self.updateEmphasize(review=False)
self.draw()
self.setFocus()
else:
self.plot_void()
### IMPLEMENT
def isEntitiesPlt(self):
return False
def isRedPlt(self):
return False
def inCapture(self, event):
return event.inaxes == self.getAxe()
def isReadyPlot(self):
return True
def getAxisLims(self):
return (0,1,0,1)
def drawPoly(self):
return False
def plotSimple(self):
return not self.drawPoly()
def adjust(self):
pass
def makeBackground(self):
pass
def makeFinish(self, xylims=(0,1,0,1), xybs=(.1,.1)):
self.axe.axis([xylims[0], xylims[1], xylims[2], xylims[3]])
def updateEmphasize(self, review=True):
if self.hasParent() and self.isEntitiesPlt():
lids = self.getParentViewsm().getEmphasizedR(vkey=self.getId())
self.emphasizeOnOff(turn_on=lids, turn_off=None, review=review)
def emphasizeOnOff(self, turn_on=set(), turn_off=set(), hover=False, review=True):
pass
def clearPlot(self):
axxs = self.getFigure().get_axes()
for ax in axxs:
ax.cla()
if ax != self.getAxe():
self.getFigure().delaxes(ax)
self.clearHighlighted()
self.clearInfoText()
def clearHighlighted(self):
### IMPLEMENT
pass
def getId(self):
return self.view.getId()
def hasParent(self):
return self.view.hasParent()
def getParent(self):
return self.view.getParent()
def getLayH(self):
return self.view.getLayH()
def getPltDtH(self):
return self.view.getPltDtH()
def getParentData(self):
return self.view.getParentData()
def getParentViewsm(self):
return self.view.getParentViewsm()
def getDrawSettDef(self):
return self.view.getDrawSettDef()
def getDrawSettings(self):
return self.view.getDrawSettings()
def getVec(self, more=None):
return self.getPltDtH().getVec(more)
def getFigure(self):
return self.getLayH().getFigure()
def getAxe(self):
return self.axe
def setAxe(self, value):
self.axe = value
def delElement(self, key):
if key in self.elements:
del self.elements[key]
def setElement(self, key, value):
self.elements[key] = value
def getElement(self, key):
return self.elements[key]
def hasElement(self, key):
return key in self.elements
def getSettV(self, key, default=False):
return self.view.getSettV(key, default)
def makeAdditionalElements(self, panel=None):
return []
#### SEC: INTERACTIVE ELEMENTS
###########################################
def prepareInteractive(self, panel=None):
self.setElement("inter_elems", {})
boxes = self.makeAdditionalElements(panel)
self.interBind()
self.setElement("ellipse", Ellipse((2, -1), 0.5, 0.5))
for act, meth in self.getCanvasConnections():
if act == "MASK":
self.setElement("mask_creator", MaskCreator(self.getAxe(), None, buttons_t=[], callback_change=self.view.makeMenu))
else:
self.getFigure().canvas.mpl_connect(act, meth)
self.prepareActions()
self.setKeys()
return boxes
def getCanvasConnections(self):
return []
def interBind(self):
for button in self.getElement("buttons"):
button["element"].Bind(wx.EVT_BUTTON, button["function"])
for name, elem in self.getElement("inter_elems").items():
if re.match("slide", name):
elem.Bind(wx.EVT_SCROLL_THUMBRELEASE, self.OnSlide)
if re.match("choice", name):
elem.Bind(wx.EVT_CHOICE, self.OnChoice)
def getParamsInter(self):
inter_params = {}
for name, elem in self.getElement("inter_elems").items():
if re.match("slide", name):
inter_params[name] = elem.GetValue()
if re.match("choice", name):
inter_params[name] = elem.GetSelection()
return inter_params
def getInterElements(self):
return self.getElement("inter_elems")
def OnSlide(self, event):
self.update()
def OnChoice(self, event):
self.update()
#### SEC: ACTIONS
###########################################
def hasToolbActive(self):
return self.getLayH().getToolbar().has_active_button()
def getActionsDetails(self):
details = []
for action, dtl in self.actions_map.items():
details.append({"label": "%s[%s]" % (dtl["label"].ljust(30), dtl["key"]),
"legend": dtl["legend"], "active": dtl["active_q"](),
"key": dtl["key"], "order": dtl["order"], "type": dtl["type"]})
if self.hasElement("mask_creator"):
details.extend(self.getElement("mask_creator").getActionsDetails(6))
return details
def prepareActions(self):
self.actions_map = {}
def OnMenuAction(self, event):
if event.GetId() in self.menu_map_act:
self.doActionForKey(self.menu_map_act[event.GetId()])
def OnMenuMCAction(self, event):
if self.hasElement("mask_creator") and event.GetId() in self.menu_map_act:
self.getElement("mask_creator").doActionForKey(self.menu_map_act[event.GetId()])
def makeActionsMenu(self, frame, menuAct=None):
self.menu_map_act = {}
if menuAct is None:
menuAct = wx.Menu()
for action in sorted(self.getActionsDetails(), key=lambda x:(x["order"],x["key"])):
ID_ACT = wx.NewId()
if action["type"] == "check":
m_act = menuAct.AppendCheckItem(ID_ACT, action["label"], action["legend"])
frame.Bind(wx.EVT_MENU, self.OnMenuAction, m_act)
self.menu_map_act[ID_ACT] = action["key"]
if action["active"]:
m_act.Check()
else:
m_act = menuAct.Append(ID_ACT, action["label"], action["legend"])
if action["active"]:
if action["type"] == "mc":
frame.Bind(wx.EVT_MENU, self.OnMenuMCAction, m_act)
else:
frame.Bind(wx.EVT_MENU, self.OnMenuAction, m_act)
self.menu_map_act[ID_ACT] = action["key"]
else:
menuAct.Enable(ID_ACT, False)
if menuAct.GetMenuItemCount() == 0:
self.getParent().appendEmptyMenuEntry(menuAct, "No Actions", "There are no edit actions.")
return menuAct
################ HANDLING KEY ACTIONS
def setKeys(self, keys=None):
self.keys_map = {}
if keys is None:
for action, details in self.actions_map.items():
details["key"] = action[0]
self.keys_map[details["key"]] = action
else:
for action, details in self.actions_map.items():
details["key"] = None
for key, action in keys.items():
if action in self.actions_map:
self.actions_map[action]["key"] = key
self.keys_map[key] = action
def doActionForKey(self, key):
if self.keys_map.get(key, None in self.actions_map):
act = self.actions_map[self.keys_map[key]]
if act["type"] == "check" or act["active_q"]():
self.actions_map[self.keys_map[key]]["method"](self.actions_map[self.keys_map[key]]["more"])
return True
return False
def key_press_callback(self, event):
self.doActionForKey(event.key)
def mkey_press_callback(self, event):
self.doActionForKey(chr(event.GetKeyCode()).lower())
################ ACTIONS QUERIES
def q_has_poly(self):
return self.hasElement("mask_creator") and self.getElement("mask_creator").q_has_poly()
def q_active_poly(self):
return self.hasElement("mask_creator") and self.getElement("mask_creator").isActive()
def q_active_info(self):
return self.getElement("active_info")
def q_true(self):
return True
def q_not_svar(self):
return not self.getPltDtH().isSingleVar()
def q_has_selected(self):
return len(self.getHighlightedIds()) > 0
################ ACTIONS FUNCTIONS
def do_toggle_info(self, event):
self.setElement("active_info", not self.getElement("active_info"))
def do_toggle_poly(self, event):
self.togglePoly()
def togglePoly(self):
if self.hasElement("mask_creator"):
if self.getElement("mask_creator").isActive():
self.getElement("mask_creator").setButtons([])
self.setElement("act_butt", [1])
else:
self.getElement("mask_creator").setButtons([1])
self.setElement("act_butt", [])
self.view.makeMenu()
self.getLayH().getToolbar().mouse_move()
def apply_mask(self, path, radius=0.0):
if path is not None and self.getCoords() is not None:
points = numpy.transpose((self.getCoords(0), self.getCoords(1)))
return [i for i,point in enumerate(points) if path.contains_point(point, radius=radius)]
return []
def do_deselect_all(self, more=None):
if self.isEntitiesPlt():
self.sendEmphasize(None)
def do_set_select(self, setp):
if self.isEntitiesPlt():
points = [i for (i,p) in enumerate(self.getVec()) if p in setp]
self.sendEmphasize(points)
def do_select_poly(self, more=None):
if self.isEntitiesPlt():
points = self.apply_mask(self.getElement("mask_creator").get_path())
self.getElement("mask_creator").clear()
if points != set():
self.sendEmphasize(points)
def do_flip_emphasized(self, more=None):
if self.isEntitiesPlt():
self.sendFlipEmphasizedR()
def save_supp_var(self, more=None):
if self.hasParent():
self.getParent().OnSaveSuppAsVar(self.getVec(), "%s" % self.getParentViewsm().getItemId(self.getId()))
def save_sel_var(self, more=None):
if self.hasParent() and self.isEntitiesPlt():
lids = self.getParentViewsm().getEmphasizedR(vkey=self.getId())
self.getParent().OnSaveSelAsVar(lids, "S%s" % self.getParentViewsm().getItemId(self.getId()))
#### SEC: FILL and WAIT PLOTTING
###########################################
def plot_void(self):
if self.view.wasKilled():
return
self.clearPlot()
self.axe.plot([r/10.0+0.3 for r in [0,2,4]], [0.5 for r in [0,2,4]], 's', markersize=10, mfc="#DDDDDD", mec="#DDDDDD")
self.axe.axis([0,1,0,1])
self.draw()
def init_wait(self):
self.call_wait = wx.CallLater(1, self.plot_wait)
self.cp = 0
def kill_wait(self):
if self.call_wait is not None:
self.call_wait.Stop()
if self.view.wasKilled():
return
self.clearPlot()
self.axe.plot([r/10.0+0.3 for r in [1,3]], [0.5, 0.5], 's', markersize=10, mfc="#DDDDDD", mec="#DDDDDD")
self.axe.plot([r/10.0+0.3 for r in [0,2,4]], [0.5, 0.5, 0.5], 'ks', markersize=10)
self.axe.axis([0,1,0,1])
self.draw()
def plot_wait(self):
if self.view.wasKilled():
return
self.clearPlot()
self.axe.plot([r/10.0+0.3 for r in range(5)], [0.5 for r in range(5)], 'ks', markersize=10, mfc="#DDDDDD", mec="#DDDDDD")
self.axe.plot(((self.cp)%5)/10.0+0.3, 0.5, 'ks', markersize=10)
self.axe.axis([0,1,0,1])
self.draw()
self.cp += 1
self.call_wait.Restart(self.wait_delay)
def setInfoText(self, text_str):
if not self.hasElement("info_text"):
info_text = {}
ax = self.getAxe()
dets = self.getInfoDets()
xlims = ax.get_xlim()
lx = xlims[0] + dets["px"]*(xlims[1]-xlims[0])
ylims = ax.get_ylim()
ly = ylims[0] + dets["py"]*(ylims[1]-ylims[0])
info_text["text"] = ax.annotate(text_str, xy=(lx, ly),
xycoords='data', xytext=(dets["dx"], dets["dy"]), textcoords='offset points',
color=dets["ec"], va=dets["va"], ha=dets["ha"], backgroundcolor="#FFFFFF",
bbox=dict(boxstyle="round", facecolor="#FFFFFF", ec=dets["ec"], alpha=dets["alpha"]),
zorder=8, **self.view.getFontProps())
self.setElement("info_text", info_text)
else:
self.getElement("info_text")["text"].set_text(text_str)
self.draw()
def clearInfoText(self):
self.delElement("info_text")
def delInfoText(self):
if self.hasElement("info_text"):
self.getElement("info_text")["text"].remove()
self.delElement("info_text")
self.draw()
def addStamp(self, pref=""):
if not self.getPltDtH().hasQueries():
return
if not self.hasElement("red_stamp"):
old_pos = self.getAxe().get_position()
# print "PosA", old_pos
new_pos = [old_pos.x0, old_pos.y0, old_pos.width, 7./8*old_pos.height]
# # pos2 = [0., 0., 1., 1.0]
self.getAxe().set_position(new_pos)
# pos1 = self.axe.get_position()
# print "PosB", pos1
qrs = self.getPltDtH().getQueries()
red = Redescription.fromQueriesPair(qrs, self.getParentData())
tex_fields = ["LHS:query:", "RHS:query:", ":acc:", ":perc:Exx"]
headers = ["qL=", "qR=", "J=", "%supp="]
if self.getParentData().hasLT():
tex_fields.extend(["acc_ratioTL", "len_I_ratioTA"])
headers.extend(["J$_{I/O}$=", "supp$_{I/A}$="])
rr = pref
tex_str = red.disp(self.getParentData().getNames(), list_fields=tex_fields, with_fname=headers, sep=" ", delim="", nblines=3, style="T") #, rid=rr)
if not self.hasElement("red_stamp"):
red_stamp = {"old_pos": old_pos}
old_pos = self.getAxe().get_position()
red_stamp["text"] = self.getAxe().text(0.5*self.getAxe().get_xlim()[1], self.getAxe().get_ylim()[1], tex_str, ha="center", va="bottom", **self.view.getFontProps())
self.setElement("red_stamp", red_stamp)
else:
self.getElement("red_stamp")["text"].set_text(tex_str, **self.view.getFontProps())
self.draw()
def delStamp(self):
if self.hasElement("red_stamp"):
red_stamp = self.getElement("red_stamp")
self.axe.set_position(red_stamp["old_pos"])
red_stamp["text"].remove()
self.delElement("red_stamp")
self.draw()
class DrawerEntitiesTD(DrawerBasis):
NBBINS = 20
MAP_POLY = False
max_emphlbl = 5
map_select_supp = [("l", "|E"+SSetts.sym_sparts[SSetts.Exo]+"|", [SSetts.Exo]),
("r", "|E"+SSetts.sym_sparts[SSetts.Eox]+"|", [SSetts.Eox]),
("i", "|E"+SSetts.sym_sparts[SSetts.Exx]+"|", [SSetts.Exx]),
("o", "|E"+SSetts.sym_sparts[SSetts.Eoo]+"|", [SSetts.Eoo])]
def __init__(self, view):
DrawerBasis.__init__(self, view)
self.dots_draws = None
def isEntitiesPlt(self):
return True
def drawPoly(self):
return self.getPltDtH().hasPolyCoords() & self.getSettV("map_poly", self.MAP_POLY)
def getAxisLims(self):
return self.getPltDtH().getParentCoordsExtrema()
#### SEC: ACTIONS
######################################
def makeAdditionalElements(self, panel=None):
if panel is None:
panel = self.getLayH().getPanel()
flags = wx.ALIGN_CENTER | wx.ALL # | wx.EXPAND
buttons = []
buttons.append({"element": wx.Button(panel, size=(self.getLayH().butt_w,-1), label="Expand"),
"function": self.view.OnExpandSimp})
buttons[-1]["element"].SetFont(wx.Font(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
inter_elems = {}
inter_elems["slide_opac"] = wx.Slider(panel, -1, 10, 0, 100, wx.DefaultPosition, (self.getLayH().sld_w, -1), wx.SL_HORIZONTAL)
##############################################
add_boxB = wx.BoxSizer(wx.HORIZONTAL)
add_boxB.AddSpacer((self.getLayH().getSpacerWn()/2.,-1))
v_box = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(panel, wx.ID_ANY,u"- opac. disabled +")
label.SetFont(wx.Font(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
v_box.Add(label, 0, border=1, flag=flags) #, userData={"where": "*"})
v_box.Add(inter_elems["slide_opac"], 0, border=1, flag=flags) #, userData={"where":"*"})
add_boxB.Add(v_box, 0, border=1, flag=flags)
add_boxB.AddSpacer((self.getLayH().getSpacerWn(),-1))
add_boxB.Add(buttons[-1]["element"], 0, border=1, flag=flags)
add_boxB.AddSpacer((self.getLayH().getSpacerWn()/2.,-1))
self.setElement("buttons", buttons)
self.setElement("inter_elems", inter_elems)
return [add_boxB]
def getCanvasConnections(self):
return [("MASK", None),
("key_press_event", self.key_press_callback),
("button_release_event", self.on_click),
("motion_notify_event", self.on_motion)]
def hoverActive(self):
return self.getSettV('hover_entities') and not self.hasToolbActive()
def hoverCoordsActive(self):
return self.getSettV('hover_coords') and not self.hasToolbActive()
def clickActive(self):
return self.getSettV('click_entities') and not self.hasToolbActive()
def getLidAt(self, x, y):
if self.drawPoly():
return self.getLidAtPoly(x, y)
return self.getLidAtSimple(x, y)
def getLidAtPoly(self, x, y):
ids_drawn = numpy.where(self.dots_draws["draw_dots"])[0]
d = scipy.spatial.distance.cdist(self.getCoordsXY(ids_drawn).T, [(x,y)])
cands = [ids_drawn[i[0]] for i in numpy.argsort(d, axis=0)[:5]]
i = 0
while i < len(cands):
path = Polygon(self.getPltDtH().getCoordsP(cands[i]), closed=True)
if path.contains_point((x,y), radius=0.0):
return cands[i]
i += 1
def getLidAtSimple(self, x, y):
ids_drawn = numpy.where(self.dots_draws["draw_dots"])[0]
sz = self.getPlotProp(0, "sz")
size_dots = self.getLayH().getFigure().get_dpi()*self.getLayH().getFigure().get_size_inches()
xlims = self.getAxe().get_xlim()
ylims = self.getAxe().get_ylim()
### resolution: value delta per figure dot
res = ((xlims[1]-xlims[0])/size_dots[0], (ylims[1]-ylims[0])/size_dots[1])
coords = self.getCoordsXY(ids_drawn)
for ss in range(3):
sc = sz*(ss+1)
tX = numpy.where((coords[0]-sc*res[0] <= x) & (x <= coords[0]+sc*res[0]) & (coords[1]-sc*res[1] <= y) & (y <= coords[1]+sc*res[1]))[0]
if len(tX) > 0:
return ids_drawn[tX[0]]
return None
def on_motion(self, event):
if self.inCapture(event):
if self.hoverActive():
lid = self.getLidAt(event.xdata, event.ydata)
if lid is None:
self.emphasizeOnOff(turn_off=None, hover=True, review=True)
elif not self.isHovered(lid):
self.emphasizeOnOff(turn_on=[lid], turn_off=None, hover=True, review=True)
elif self.hoverCoordsActive():
txt = self.getPosInfoTxt(event.xdata, event.ydata)
if txt is not None:
self.setInfoText(txt)
else:
self.delInfoText()
elif self.hoverCoordsActive():
self.delInfoText()
def getPosInfo(self, x, y):
return (x, y)
def getPosInfoTxt(self, x, y):
return "x=% 8.4f, y=% 8.4f" % self.getPosInfo(x, y)
def on_click(self, event):
if self.clickActive() and self.inCapture(event):
lid = self.getLidAt(event.xdata, event.ydata)
if lid is not None:
self.sendEmphasize([lid])
def prepareActions(self):
### First letter of action is the key, should be unique
self.actions_map = {"deselect_all": {"method": self.do_deselect_all, "label": "&Deselect all",
"legend": "Deselect all dots", "more": None, "type": "main",
"order":1, "active_q":self.q_has_selected},
"flip_able": {"method": self.do_flip_emphasized, "label": "(Dis)able selected",
"legend": "(Dis)able selected dots", "more": None, "type": "main",
"order":0, "active_q":self.q_has_selected},
"noggle_info": {"method": self.do_toggle_info, "label": "Toggle i&nfo",
"legend": "Toggle info", "more": None, "type": "check",
"order":101, "active_q":self.q_active_info},
"vave_sel_var": {"method": self.save_sel_var, "label": "Save selection as variable",
"legend": "Save the selection as a new data variable",
"more": None, "type": "main",
"order":10, "active_q":self.q_has_selected}
}
if self.getPltDtH().hasQueries():
self.actions_map["save_supp_var"] = {"method": self.save_supp_var, "label": "Save supp as variable",
"legend": "Save the support as a new data variable",
"more": None, "type": "main",
"order":11, "active_q":self.q_not_svar}
for setk, setl, setp in self.map_select_supp:
self.actions_map[setk+"_set"] = {"method": self.do_set_select, "label": "(De)select "+setl,
"legend": "(De)select dots in "+setl, "more": setp, "type": "main",
"order":2, "active_q":self.q_not_svar}
if self.hasElement("mask_creator"):
self.actions_map["poly_set"] = {"method": self.do_select_poly, "label": "(De)select &polygon",
"legend": "Select dots inside the polygon", "more": None, "type": "main",
"order":3, "active_q":self.q_has_poly}
self.actions_map["toggle_draw"] = {"method": self.do_toggle_poly, "label": "&Toggle polygon",
"legend": "Toggle polygon drawing", "more": None, "type": "check",
"order":100, "active_q":self.q_active_poly}
#### SEC: HANDLING HIGHLIGHTS
###########################################
def getCoordsXYA(self, idp):
return self.getPltDtH().getCoordsXYA(idp)
def getCoordsXY(self, idp):
return self.getPltDtH().getCoordsXY(idp)
def getCoords(self, axi=None, ids=None):
return self.getPltDtH().getCoords(axi, ids)
def makeEmphTag(self, lid):
# print self.getParentData().getRName(lid), ">", self.getPltDtH().pltdt.get("coords")[0][:,lid,0]
return self.getParentData().getRName(lid)
def emphasizeOn(self, lids, hover=False):
dsetts = self.getDrawSettings()
if not self.hasDotsReady():
return
hgs = {}
for lid in self.needingHighlight(lids):
hg = self.drawEntity(lid, dsetts["colhigh"], self.getPlotColor(lid, "ec"), self.getPlotProp(lid, "sz"), self.getPlotProp(lid, "zord"), dsetts["default"])
if lid not in hgs:
hgs[lid] = []
hgs[lid].extend(hg)
for lid in self.needingHighLbl(lids):
tag = self.makeEmphTag(lid)
hg = self.drawAnnotation(self.getCoordsXYA(lid), self.getPlotColor(lid, "ec"), tag, self.getAnnXY())
if lid not in hgs:
hgs[lid] = []
hgs[lid].extend(hg)
self.addHighlighted(hgs, hover)
def emphasizeOnOff(self, turn_on=set(), turn_off=set(), hover=False, review=True):
self.emphasizeOff(turn_off, hover)
self.emphasizeOn(turn_on, hover)
# if hover:
self.draw()
if not hover:
self.view.makeMenu()
def emphasizeOff(self, lids=None, hover=False):
self.removeHighlighted(lids, hover)
def sendEmphasize(self, lids):
return self.getParentViewsm().setEmphasizedR(vkey=self.getId(), lids=lids, show_info=self.q_active_info())
def sendFlipEmphasizedR(self):
return self.getParentViewsm().doFlipEmphasizedR(vkey=self.getId())
def initHighlighted(self):
self.highl = {}
self.high_lbl = set()
self.current_hover = {}
def clearHighlighted(self):
self.initHighlighted()
def isHovered(self, iid):
return iid in self.current_hover
def isHighlighted(self, iid):
return iid in self.highl
def isHighLbl(self, iid):
return iid in self.high_lbl
def needingHighLbl(self, iids):
if len(iids) <= self.max_emphlbl:
return [iid for iid in iids if not self.isHighLbl(iid)]
return []
def needingHighlight(self, iids):
return [iid for iid in iids if not self.isHighlighted(iid)]
def getHighlightedIds(self):
return self.highl.keys()
def addHighlighted(self, hgs, hover=False):
where = self.highl
if hover:
where = self.current_hover
for iid, high in hgs.items():
if iid not in where:
where[iid] = []
if type(high) is list:
has_lbl = any([isinstance(t, Text) for t in high])
where[iid].extend(high)
else:
has_lbl = isinstance(high, Text)
where[iid].append(high)
if has_lbl and not hover:
self.high_lbl.add(iid)
def removeHighlighted1(self, iid):
if iid in self.highl:
while len(self.highl[iid]) > 0:
t = self.highl[iid].pop()
t.remove()
del self.highl[iid]
self.high_lbl.discard(iid)
def removeHover1(self, iid):
if iid in self.current_hover:
while len(self.current_hover[iid]) > 0:
t = self.current_hover[iid].pop()
t.remove()
del self.current_hover[iid]
def removeHighlighted(self, iid=None, hover=False):
if iid is None:
if hover:
iids = self.current_hover.keys()
else:
iids = self.highl.keys()
elif type(iid) is list or type(iid) is set:
iids = iid
else:
iids = [iid]
for iid in iids:
if hover:
self.removeHover1(iid)
else:
self.removeHighlighted1(iid)
#### SEC: PLOTTING
###########################################
def hasDotsReady(self):
return self.dots_draws is not None
def getPlotColor(self, idp, prop):
return tuple(self.dots_draws[prop+"_dots"][idp])
def getPlotProp(self, idp, prop):
return self.dots_draws[prop+"_dots"][idp]
def prepareEntitiesDots(self, vec, vec_dets, draw_settings):
delta_on = draw_settings.get("delta_on", True)
u, indices = numpy.unique(vec, return_inverse=True)
styles = []
for i in u:
if draw_settings[i]["shape"] in [".",",","*","+","x"]:
#### point-wise shape -> same color face and edge
styles.append(draw_settings[i]["color_e"])
else:
#### otherwise -> possibly different colors face and edge
styles.append(draw_settings[i]["color_f"])
fc_clusts = numpy.array(styles)
# fc_clusts = numpy.array([draw_settings[i]["color_f"] for i in u])
fc_dots = fc_clusts[indices]
ec_clusts = numpy.array([draw_settings[i]["color_e"] for i in u])
ec_dots = ec_clusts[indices]
zord_clusts = numpy.array([draw_settings[i]["zord"] for i in u])
zord_dots = zord_clusts[indices]
delta_dots = vec==SSetts.Eoo
sz_dots = numpy.ones(vec.shape)*draw_settings["default"]["size"]
sz_dots[~ delta_dots] *= 0.5
if delta_on:
draw_dots = numpy.ones(vec.shape, dtype=bool)
else:
draw_dots = ~ delta_dots
return {"fc_dots": fc_dots, "ec_dots": ec_dots, "sz_dots": sz_dots, "zord_dots": zord_dots, "draw_dots": draw_dots}
def prepareSingleVarDots(self, vec, vec_dets, draw_settings):
delta_on = draw_settings.get("delta_on", True)
cmap, vmin, vmax = (self.getCMap(vec_dets["typeId"]), numpy.nanmin(vec), numpy.nanmax(vec))
if vec_dets.get("min_max") is not None:
vmin, vmax = vec_dets["min_max"]
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=True)
mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
# if min_max is not None:
# mmp = dict([(v, mapper.to_rgba(v, alpha=draw_settings["default"]["color_e"][-1])) for v in numpy.arange(vmin, vmax+1)])
# ec_dots = numpy.array([mmp[v] for v in vec])
# elif vec_dets["typeId"] == 3 or (vmin !=0 and min_max is None):
if vec_dets["typeId"] == 3 or (vmin !=0 and vec_dets.get("min_max") is None):
ec_dots = numpy.array([mapper.to_rgba(v, alpha=draw_settings["default"]["color_e"][-1]) for v in vec])
else:
mmp = numpy.array([mapper.to_rgba(v, alpha=draw_settings["default"]["color_e"][-1]) for v in numpy.arange(vmin, vmax+1)]+[draw_settings["default"]["color_f"]])
ec_dots = mmp[vec]
fc_dots = numpy.copy(ec_dots)
# fc_dots[:,-1] = dsetts["color_f"][-1]
dots_draws = {"fc_dots": fc_dots, "ec_dots": ec_dots,
"sz_dots": numpy.ones(vec.shape)*draw_settings["default"]["size"],
"zord_dots": numpy.ones(vec.shape)*draw_settings["default"]["zord"],
"draw_dots": numpy.ones(vec.shape, dtype=bool)}
mapper.set_array(vec)
return dots_draws, mapper
def plotMapperHist(self, axe, vec, vec_dets, mapper, nb_bins, corners, draw_settings):
x0, x1, y0, y1, bx, by = corners
fracts = [.1, .03] ## ratio bars adjusted/fixed
nb = nb_bins
idsan = numpy.where(~numpy.isnan(vec))[0]
if vec_dets["binLbls"] is not None:
if vec_dets.get("binHist") is not None:
nb = vec_dets["binHist"]
else:
df = max(numpy.diff(vec_dets["binVals"]))
nb = [vec_dets["binVals"][0]]+[(vec_dets["binVals"][i]+vec_dets["binVals"][i+1])/2. for i in range(len(vec_dets["binVals"])-1)]+[vec_dets["binVals"][-1]]
nb[0] -= df/2.
nb[-1] += df/2.
# else: vec_dets["typeId"] == 2: ### Categorical
# nb = [b-0.5 for b in numpy.unique(vec[idsan])]
# nb.append(nb[-1]+1)
# bins_ticks = numpy.unique(vec[idsan])
# bins_lbl = vec_dets["binLbls"]
n, bins, patches = plt.hist(vec[idsan], bins=nb)
sum_h = numpy.max(n)
norm_h = [ni*fracts[0]*float(x1-x0)/sum_h+fracts[1]*float(x1-x0) for ni in n]
if vec_dets["binLbls"] is not None:
bins_ticks = numpy.arange(len(vec_dets["binLbls"]))
tmpb = [b-0.5 for b in bins_ticks]
tmpb.append(tmpb[-1]+1)
norm_bins_ticks = [(bi-tmpb[0])/float(tmpb[-1]-tmpb[0]) * 0.95*float(y1-y0) + y0 + 0.025*float(y1-y0) for bi in bins_ticks]
norm_bins = [(bi-tmpb[0])/float(tmpb[-1]-tmpb[0]) * 0.95*float(y1-y0) + y0 + 0.025*float(y1-y0) for bi in tmpb]
bins_lbl = vec_dets["binLbls"]
colors = [mapper.to_rgba(i) for i in vec_dets["binVals"]]
else:
norm_bins = [(bi-bins[0])/float(bins[-1]-bins[0]) * 0.95*float(y1-y0) + y0 + 0.025*float(y1-y0) for bi in bins]
norm_bins_ticks = norm_bins
bins_lbl = bins
colors = [mapper.to_rgba(numpy.mean(bins[i:i+2])) for i in range(len(n))]
left = [norm_bins[i] for i in range(len(n))]
width = [norm_bins[i+1]-norm_bins[i] for i in range(len(n))]
bckc = "white"
axe.barh(y0, -((fracts[0]+fracts[1])*(x1-x0)+bx), y1-y0, x1+(fracts[0]+fracts[1])*(x1-x0)+2*bx, color=bckc, edgecolor=bckc)
axe.barh(left, -numpy.array(norm_h), width, x1+(fracts[0]+fracts[1])*(x1-x0)+2*bx, color=colors, edgecolor=bckc, linewidth=2)
axe.plot([x1+2*bx+fracts[0]*(x1-x0), x1+2*bx+fracts[0]*(x1-x0)], [norm_bins[0], norm_bins[-1]], color=bckc, linewidth=2)
x1 += (fracts[0]+fracts[1])*(x1-x0)+2*bx
axe.set_yticks(norm_bins_ticks)
axe.set_yticklabels(bins_lbl, **self.view.getFontProps())
# self.axe.yaxis.tick_right()
axe.tick_params(direction="inout", left="off", right="on",
labelleft="off", labelright="on", labelsize=self.view.getFontProps().get("size"))
return (x0, x1, y0, y1, bx, by)
def plotDotsSimple(self, axe, dots_draws, draw_indices, draw_settings):
ku, kindices = numpy.unique(dots_draws["zord_dots"][draw_indices], return_inverse=True)
## pdb.set_trace()
for vi, vv in enumerate(ku):
if vv != -1:
axe.scatter(self.getCoords(0,draw_indices[kindices==vi]),
self.getCoords(1,draw_indices[kindices==vi]),
c=dots_draws["fc_dots"][draw_indices[kindices==vi],:],
edgecolors=dots_draws["ec_dots"][draw_indices[kindices==vi],:],
s=5*dots_draws["sz_dots"][draw_indices[kindices==vi]], marker=draw_settings["default"]["shape"], #### HERE
zorder=vv)
def plotDotsPoly(self, axe, dots_draws, draw_indices, draw_settings):
for idp in draw_indices:
vv = self.getPlotProp(idp, "zord")
if vv != 1:
self.drawEntity(idp, self.getPlotColor(idp, "fc"), self.getPlotColor(idp, "ec"),
self.getPlotProp(idp, "sz"), vv, draw_settings["default"])
def drawEntity(self, idp, fc, ec, sz=1, zo=4, dsetts={}):
### HERE SAMPLE ###
if self.drawPoly():
return [self.axe.add_patch(Polygon(self.getPltDtH().getCoordsP(idp), closed=True, fill=True, fc=fc, ec=ec, zorder=zo))]
else:
## print idp, fc, ec
x, y = self.getCoordsXY(idp)
# return self.axe.plot(x, y, mfc=fc, mec=ec, marker=dsetts["shape"], markersize=sz, linestyle='None', zorder=zo)
return [self.axe.scatter([x], [y], c=fc, edgecolors=ec, s=5*sz, marker=dsetts["shape"], zorder=zo)]
def getAnnXY(self):
return self.ann_xy
def getInfoDets(self):
return self.info_dets
def drawAnnotation(self, xy, ec, tag, xytext=None):
if xytext is None:
xytext = self.getAnnXY()
return [self.axe.annotate(tag, xy=xy, zorder=8,
xycoords='data', xytext=xytext, textcoords='offset points',
color=ec, va="center", backgroundcolor="#FFFFFF",
bbox=dict(boxstyle="round", facecolor="#FFFFFF", ec=ec),
arrowprops=dict(arrowstyle="wedge,tail_width=1.", fc="#FFFFFF", ec=ec,
patchA=None, patchB=self.getElement("ellipse"), relpos=(0.2, 0.5)),**self.view.getFontProps())]
| [
"matplotlib.pyplot.hist",
"numpy.argsort",
"numpy.array",
"numpy.nanmin",
"wx.Font",
"numpy.arange",
"numpy.mean",
"wx.NewId",
"numpy.where",
"numpy.diff",
"numpy.max",
"matplotlib.cm.ScalarMappable",
"numpy.nanmax",
"numpy.ones",
"matplotlib.use",
"re.match",
"wx.StaticText",
"num... | [((108, 131), 'matplotlib.use', 'matplotlib.use', (['"""WXAgg"""'], {}), "('WXAgg')\n", (122, 131), False, 'import matplotlib\n'), ((14334, 14365), 'wx.CallLater', 'wx.CallLater', (['(1)', 'self.plot_wait'], {}), '(1, self.plot_wait)\n', (14346, 14365), False, 'import wx, numpy, re\n'), ((20004, 20030), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (20015, 20030), False, 'import wx, numpy, re\n'), ((20113, 20137), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (20124, 20137), False, 'import wx, numpy, re\n'), ((20154, 20208), 'wx.StaticText', 'wx.StaticText', (['panel', 'wx.ID_ANY', 'u"""- opac. disabled +"""'], {}), "(panel, wx.ID_ANY, u'- opac. disabled +')\n", (20167, 20208), False, 'import wx, numpy, re\n'), ((31634, 31672), 'numpy.unique', 'numpy.unique', (['vec'], {'return_inverse': '(True)'}), '(vec, return_inverse=True)\n', (31646, 31672), False, 'import wx, numpy, re\n'), ((32077, 32096), 'numpy.array', 'numpy.array', (['styles'], {}), '(styles)\n', (32088, 32096), False, 'import wx, numpy, re\n'), ((32230, 32283), 'numpy.array', 'numpy.array', (["[draw_settings[i]['color_e'] for i in u]"], {}), "([draw_settings[i]['color_e'] for i in u])\n", (32241, 32283), False, 'import wx, numpy, re\n'), ((32343, 32393), 'numpy.array', 'numpy.array', (["[draw_settings[i]['zord'] for i in u]"], {}), "([draw_settings[i]['zord'] for i in u])\n", (32354, 32393), False, 'import wx, numpy, re\n'), ((33195, 33255), 'matplotlib.colors.Normalize', 'matplotlib.colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax', 'clip': '(True)'}), '(vmin=vmin, vmax=vmax, clip=True)\n', (33222, 33255), False, 'import matplotlib\n'), ((33273, 33323), 'matplotlib.cm.ScalarMappable', 'matplotlib.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (33301, 33323), False, 'import matplotlib\n'), ((34071, 34090), 'numpy.copy', 'numpy.copy', (['ec_dots'], {}), '(ec_dots)\n', (34081, 34090), False, 'import wx, numpy, re\n'), ((35544, 35573), 'matplotlib.pyplot.hist', 'plt.hist', (['vec[idsan]'], {'bins': 'nb'}), '(vec[idsan], bins=nb)\n', (35552, 35573), True, 'import matplotlib.pyplot as plt\n'), ((35599, 35611), 'numpy.max', 'numpy.max', (['n'], {}), '(n)\n', (35608, 35611), False, 'import wx, numpy, re\n'), ((37678, 37750), 'numpy.unique', 'numpy.unique', (["dots_draws['zord_dots'][draw_indices]"], {'return_inverse': '(True)'}), "(dots_draws['zord_dots'][draw_indices], return_inverse=True)\n", (37690, 37750), False, 'import wx, numpy, re\n'), ((6217, 6243), 'matplotlib.patches.Ellipse', 'Ellipse', (['(2, -1)', '(0.5)', '(0.5)'], {}), '((2, -1), 0.5, 0.5)\n', (6224, 6243), False, 'from matplotlib.patches import Ellipse\n'), ((6896, 6919), 're.match', 're.match', (['"""slide"""', 'name'], {}), "('slide', name)\n", (6904, 6919), False, 'import wx, numpy, re\n'), ((7004, 7028), 're.match', 're.match', (['"""choice"""', 'name'], {}), "('choice', name)\n", (7012, 7028), False, 'import wx, numpy, re\n'), ((7249, 7272), 're.match', 're.match', (['"""slide"""', 'name'], {}), "('slide', name)\n", (7257, 7272), False, 'import wx, numpy, re\n'), ((7342, 7366), 're.match', 're.match', (['"""choice"""', 'name'], {}), "('choice', name)\n", (7350, 7366), False, 'import wx, numpy, re\n'), ((8904, 8913), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (8911, 8913), False, 'import wx, numpy, re\n'), ((9027, 9037), 'wx.NewId', 'wx.NewId', ([], {}), '()\n', (9035, 9037), False, 'import wx, numpy, re\n'), ((19690, 19766), 'wx.Font', 'wx.Font', (['(8)', 'wx.FONTFAMILY_DEFAULT', 'wx.FONTSTYLE_NORMAL', 'wx.FONTWEIGHT_NORMAL'], {}), '(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n', (19697, 19766), False, 'import wx, numpy, re\n'), ((20230, 20306), 'wx.Font', 'wx.Font', (['(8)', 'wx.FONTFAMILY_DEFAULT', 'wx.FONTSTYLE_NORMAL', 'wx.FONTWEIGHT_NORMAL'], {}), '(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n', (20237, 20306), False, 'import wx, numpy, re\n'), ((21642, 21683), 'numpy.where', 'numpy.where', (["self.dots_draws['draw_dots']"], {}), "(self.dots_draws['draw_dots'])\n", (21653, 21683), False, 'import wx, numpy, re\n'), ((22130, 22171), 'numpy.where', 'numpy.where', (["self.dots_draws['draw_dots']"], {}), "(self.dots_draws['draw_dots'])\n", (22141, 22171), False, 'import wx, numpy, re\n'), ((32512, 32533), 'numpy.ones', 'numpy.ones', (['vec.shape'], {}), '(vec.shape)\n', (32522, 32533), False, 'import wx, numpy, re\n'), ((32650, 32683), 'numpy.ones', 'numpy.ones', (['vec.shape'], {'dtype': 'bool'}), '(vec.shape, dtype=bool)\n', (32660, 32683), False, 'import wx, numpy, re\n'), ((33048, 33065), 'numpy.nanmin', 'numpy.nanmin', (['vec'], {}), '(vec)\n', (33060, 33065), False, 'import wx, numpy, re\n'), ((33067, 33084), 'numpy.nanmax', 'numpy.nanmax', (['vec'], {}), '(vec)\n', (33079, 33084), False, 'import wx, numpy, re\n'), ((34449, 34482), 'numpy.ones', 'numpy.ones', (['vec.shape'], {'dtype': 'bool'}), '(vec.shape, dtype=bool)\n', (34459, 34482), False, 'import wx, numpy, re\n'), ((2617, 2658), 'numpy.where', 'numpy.where', (["self.dots_draws['draw_dots']"], {}), "(self.dots_draws['draw_dots'])\n", (2628, 2658), False, 'import wx, numpy, re\n'), ((22650, 22796), 'numpy.where', 'numpy.where', (['((coords[0] - sc * res[0] <= x) & (x <= coords[0] + sc * res[0]) & (coords[\n 1] - sc * res[1] <= y) & (y <= coords[1] + sc * res[1]))'], {}), '((coords[0] - sc * res[0] <= x) & (x <= coords[0] + sc * res[0]) &\n (coords[1] - sc * res[1] <= y) & (y <= coords[1] + sc * res[1]))\n', (22661, 22796), False, 'import wx, numpy, re\n'), ((34267, 34288), 'numpy.ones', 'numpy.ones', (['vec.shape'], {}), '(vec.shape)\n', (34277, 34288), False, 'import wx, numpy, re\n'), ((34358, 34379), 'numpy.ones', 'numpy.ones', (['vec.shape'], {}), '(vec.shape)\n', (34368, 34379), False, 'import wx, numpy, re\n'), ((36913, 36932), 'numpy.array', 'numpy.array', (['norm_h'], {}), '(norm_h)\n', (36924, 36932), False, 'import wx, numpy, re\n'), ((21810, 21834), 'numpy.argsort', 'numpy.argsort', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (21823, 21834), False, 'import wx, numpy, re\n'), ((34787, 34803), 'numpy.isnan', 'numpy.isnan', (['vec'], {}), '(vec)\n', (34798, 34803), False, 'import wx, numpy, re\n'), ((34988, 35019), 'numpy.diff', 'numpy.diff', (["vec_dets['binVals']"], {}), "(vec_dets['binVals'])\n", (34998, 35019), False, 'import wx, numpy, re\n'), ((36544, 36569), 'numpy.mean', 'numpy.mean', (['bins[i:i + 2]'], {}), '(bins[i:i + 2])\n', (36554, 36569), False, 'import wx, numpy, re\n'), ((33946, 33974), 'numpy.arange', 'numpy.arange', (['vmin', '(vmax + 1)'], {}), '(vmin, vmax + 1)\n', (33958, 33974), False, 'import wx, numpy, re\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 7 14:08:39 2018
@author: Alankar
"""
import numpy as np
import matplotlib.pyplot as plt
profile = np.loadtxt('stm.txt')
x = np.arange(0,len(profile[0,:])+1,1)
y = np.arange(0,len(profile[:,0])+1,1)
plt.axis([0,len(x),0,len(y)])
plt.pcolormesh(profile,cmap='jet')
plt.colorbar()
plt.savefig('silicon_surface.jpg')
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.pcolormesh",
"numpy.loadtxt",
"matplotlib.pyplot.show"
] | [((159, 180), 'numpy.loadtxt', 'np.loadtxt', (['"""stm.txt"""'], {}), "('stm.txt')\n", (169, 180), True, 'import numpy as np\n'), ((293, 328), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['profile'], {'cmap': '"""jet"""'}), "(profile, cmap='jet')\n", (307, 328), True, 'import matplotlib.pyplot as plt\n'), ((329, 343), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (341, 343), True, 'import matplotlib.pyplot as plt\n'), ((345, 379), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""silicon_surface.jpg"""'], {}), "('silicon_surface.jpg')\n", (356, 379), True, 'import matplotlib.pyplot as plt\n'), ((381, 391), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (389, 391), True, 'import matplotlib.pyplot as plt\n')] |
'''
this is the CD (cone-domination) algorithm of Chiu et al.
REFERENCES:
<NAME>., <NAME>., <NAME>.: A knee point based evolutionary multi-objective optimization for mission planning problems. In: GECCO’17:
Proc. of the Genetic and Evolutionary Computation Conference. pp. 1216-1223(2017)
'''
import numpy as np
from random import sample
def coneDominance(angle_array, individual1, individual2):
con1 = individual1
con2 = individual2
Omig1 = np.dot(angle_array, con1.T)
Omig2 = np.dot(angle_array, con2.T)
if (np.all((Omig1 - Omig2) <= 0)) and np.any((Omig1 - Omig2)< 0):
return 1
else:
return 0
def main_function(data, K):
# 135 degree angles
fai = 135
del_index = []
num = len(data)
show_dim = len(data[0, :])
angle_array = np.zeros([show_dim, show_dim])
angle = np.tan((2*np.pi/360)*(fai-90)/2)
for i in range(show_dim):
for j in range(show_dim):
if i != j:
angle_array[i, j] = angle
else:
angle_array[i, j] = 1
index = [i for i in range(num)]
reserve = list(set(index).difference(set(del_index)))
# knee_points = np.delete(data, del_index, axis=0)
if num - len(del_index) > K:
SK = sample(reserve, K)
knee_points = data[SK, :]
elif num - len(del_index) < K:
add_list = sample(del_index, K - (num - len(del_index)))
res_index = list(set(reserve).union(set(add_list)))
knee_points = data[res_index, :]
else:
knee_points = np.delete(data, del_index, axis=0)
return knee_points
if __name__ == '__main__':
points = np.loadtxt(sys.path[0]+'/data/points1/PMOP1_M2_A2.out')
main_function(points, 1) | [
"numpy.all",
"random.sample",
"numpy.tan",
"numpy.delete",
"numpy.any",
"numpy.dot",
"numpy.zeros",
"numpy.loadtxt"
] | [((457, 484), 'numpy.dot', 'np.dot', (['angle_array', 'con1.T'], {}), '(angle_array, con1.T)\n', (463, 484), True, 'import numpy as np\n'), ((497, 524), 'numpy.dot', 'np.dot', (['angle_array', 'con2.T'], {}), '(angle_array, con2.T)\n', (503, 524), True, 'import numpy as np\n'), ((796, 826), 'numpy.zeros', 'np.zeros', (['[show_dim, show_dim]'], {}), '([show_dim, show_dim])\n', (804, 826), True, 'import numpy as np\n'), ((839, 879), 'numpy.tan', 'np.tan', (['(2 * np.pi / 360 * (fai - 90) / 2)'], {}), '(2 * np.pi / 360 * (fai - 90) / 2)\n', (845, 879), True, 'import numpy as np\n'), ((1639, 1696), 'numpy.loadtxt', 'np.loadtxt', (["(sys.path[0] + '/data/points1/PMOP1_M2_A2.out')"], {}), "(sys.path[0] + '/data/points1/PMOP1_M2_A2.out')\n", (1649, 1696), True, 'import numpy as np\n'), ((533, 559), 'numpy.all', 'np.all', (['(Omig1 - Omig2 <= 0)'], {}), '(Omig1 - Omig2 <= 0)\n', (539, 559), True, 'import numpy as np\n'), ((568, 593), 'numpy.any', 'np.any', (['(Omig1 - Omig2 < 0)'], {}), '(Omig1 - Omig2 < 0)\n', (574, 593), True, 'import numpy as np\n'), ((1253, 1271), 'random.sample', 'sample', (['reserve', 'K'], {}), '(reserve, K)\n', (1259, 1271), False, 'from random import sample\n'), ((1539, 1573), 'numpy.delete', 'np.delete', (['data', 'del_index'], {'axis': '(0)'}), '(data, del_index, axis=0)\n', (1548, 1573), True, 'import numpy as np\n')] |
# Copyright (c) 2018, NVIDIA CORPORATION.
from __future__ import division
import numpy as np
import pytest
from numba.cuda import compile_ptx
from numba.np import numpy_support
import cudf
from cudf import Series, _lib as libcudf
from cudf.utils import dtypes as dtypeutils
@pytest.mark.parametrize(
"dtype", sorted(list(dtypeutils.NUMERIC_TYPES - {"int8"}))
)
def test_generic_ptx(dtype):
size = 500
lhs_arr = np.random.random(size).astype(dtype)
lhs_col = Series(lhs_arr)._column
rhs_arr = np.random.random(size).astype(dtype)
rhs_col = Series(rhs_arr)._column
def generic_function(a, b):
return a ** 3 + b
nb_type = numpy_support.from_dtype(cudf.dtype(dtype))
type_signature = (nb_type, nb_type)
ptx_code, output_type = compile_ptx(
generic_function, type_signature, device=True
)
dtype = numpy_support.as_dtype(output_type).type
out_col = libcudf.binaryop.binaryop_udf(lhs_col, rhs_col, ptx_code, dtype)
result = lhs_arr ** 3 + rhs_arr
np.testing.assert_almost_equal(result, out_col.to_array())
| [
"numba.np.numpy_support.as_dtype",
"cudf.Series",
"numpy.random.random",
"cudf._lib.binaryop.binaryop_udf",
"cudf.dtype",
"numba.cuda.compile_ptx"
] | [((781, 839), 'numba.cuda.compile_ptx', 'compile_ptx', (['generic_function', 'type_signature'], {'device': '(True)'}), '(generic_function, type_signature, device=True)\n', (792, 839), False, 'from numba.cuda import compile_ptx\n'), ((923, 987), 'cudf._lib.binaryop.binaryop_udf', 'libcudf.binaryop.binaryop_udf', (['lhs_col', 'rhs_col', 'ptx_code', 'dtype'], {}), '(lhs_col, rhs_col, ptx_code, dtype)\n', (952, 987), True, 'from cudf import Series, _lib as libcudf\n'), ((480, 495), 'cudf.Series', 'Series', (['lhs_arr'], {}), '(lhs_arr)\n', (486, 495), False, 'from cudf import Series, _lib as libcudf\n'), ((570, 585), 'cudf.Series', 'Series', (['rhs_arr'], {}), '(rhs_arr)\n', (576, 585), False, 'from cudf import Series, _lib as libcudf\n'), ((693, 710), 'cudf.dtype', 'cudf.dtype', (['dtype'], {}), '(dtype)\n', (703, 710), False, 'import cudf\n'), ((867, 902), 'numba.np.numpy_support.as_dtype', 'numpy_support.as_dtype', (['output_type'], {}), '(output_type)\n', (889, 902), False, 'from numba.np import numpy_support\n'), ((429, 451), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (445, 451), True, 'import numpy as np\n'), ((519, 541), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (535, 541), True, 'import numpy as np\n')] |
import cv2
import glob, os
import numpy as np
import imutils
import pytesseract
import re
import time
import CSVHelper
import multiprocessing
import concurrent.futures as cf
DEBUG = False
def showIMG(mat, name, delay=0):
if not DEBUG:
return
cv2.namedWindow(name, cv2.WINDOW_KEEPRATIO)
cv2.imshow(name, mat)
cv2.resizeWindow(name, 1000, 1000)
cv2.waitKey(delay)
cv2.destroyWindow(name)
def splitChit(original):
# edge detection
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)
# showIMG(edged, "Canny")
edged = cv2.GaussianBlur(edged, (3, 3), 0)
# finding contours
cnts = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts = filter(lambda x: original.shape[0] * original.shape[1] / 6 < cv2.contourArea(x), cnts)
screenCnt = []
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
screenCnt.append(approx)
# show contours
# contourImage = original.copy()
# cv2.drawContours(contourImage, screenCnt, -1, (0, 255, 0), 5)
# showIMG(contourImage, "contours")
# create individual Images
chits = []
for c in screenCnt:
pts1 = [c[0][0], c[1][0], c[2][0], c[3][0]]
# sort the contour points in the right way
pts1 = sorted(pts1, key=lambda x: x[1])
if pts1[0][0] > pts1[1][0]:
pts1[0], pts1[1] = pts1[1], pts1[0]
if pts1[2][0] > pts1[3][0]:
pts1[2], pts1[3] = pts1[3], pts1[2]
# transform
pts1 = np.float32(pts1)
pts2 = np.float32([[0, 0], [2000, 0], [0, 1000], [2000, 1000]])
M = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(original, M, (2000, 1000))
chits.append(dst)
return chits
# iput: single chit in landscape orientation (top either on the left or right)
# returns: chit in portrait mode
def rotateChit(chit):
rows, cols, _ = chit.shape
grey = cv2.cvtColor(chit, cv2.COLOR_BGR2GRAY)
leftfill = 0
for c in range(cols):
for r in range(round(rows * 0.05)):
leftfill += grey[r][c]
rightfill = 0
for c in range(cols):
for r in range(round(rows * 0.95), rows):
leftfill += grey[r][c]
return imutils.rotate_bound(chit, 90 if leftfill < rightfill else 270)
def getNumber(chit):
tmp = chit[300:370, 90:270]
cv2.threshold(tmp, 127, 255, cv2.THRESH_BINARY)
text = pytesseract.image_to_string(chit[300:370, 90:270], config="-c tessedit_char_whitelist=ABCD0123456789")
if not re.compile("[0-9][0-9]?[A-D]").match(text):
return None
if len(text) == 2:
text = "0" + text
return text
#TODO rewrite with Tensorflow
def getPoints(chit):
text = pytesseract.image_to_string(chit[1330:1490, 800:999], config="-c tessedit_char_whitelist=0123456789")
text = text.replace(" ", "")
text = text.replace("\n", "")
if text == "":
return -1
return int(text)
# takes a whole chit file and returns target number, score, 10 and x of
def analyseAll(file):
# display original file
original = cv2.imread("" + file)
showIMG(original, "original")
chits = splitChit(original)
chits = map(lambda x: rotateChit(x), chits)
ret = []
# show them
for chit in chits:
showIMG(chit, "chit")
number = getNumber(chit)
points = getPoints(chit)
if number == None:
ret.append(None)
continue
print([number, points])
ret.append({CSVHelper.TARGET: number, CSVHelper.SCORE: points, CSVHelper.TEN: 0, CSVHelper.X: 0})
return ret
# MAIN PROGRAMM
if __name__ == "__main__":
startDir = os.getcwd()
os.chdir("data")
dataset = []
print("Started Running...")
timer = time.time()
fileIterator = glob.glob("*.[Jj][Pp][Gg]")
total = len(fileIterator)
# start analysation in a separate process for each chut
processPool = cf.ProcessPoolExecutor(max_workers=1 if DEBUG else multiprocessing.cpu_count())
for i in fileIterator:
dataset.append(processPool.submit(analyseAll, i))
cf.wait(dataset)
#unpack data received from pool
dataset = map(lambda date: date.result(), dataset)
dataset = [item for sublist in dataset for item in sublist] #flatten
#cleanup dataset & write to csv
oldsize = len(dataset)
dataset = filter(lambda date:date is not None, dataset)
dataset = sorted(dataset, key=lambda date: date[CSVHelper.TARGET])
os.chdir(startDir)
CSVHelper.Writer("score.csv").writeAll(dataset)
timer = time.time() - timer
print("Recognized:" + str(len(dataset)))
print("Number failed: " + str(oldsize - len(dataset)) +" of " + str(oldsize) + " found chits, " + str(total*4) + " total")
print("This took %d seconds" % timer) | [
"re.compile",
"imutils.is_cv2",
"multiprocessing.cpu_count",
"cv2.imshow",
"cv2.warpPerspective",
"cv2.approxPolyDP",
"cv2.resizeWindow",
"cv2.threshold",
"cv2.arcLength",
"cv2.contourArea",
"concurrent.futures.wait",
"cv2.waitKey",
"glob.glob",
"cv2.getPerspectiveTransform",
"cv2.cvtCol... | [((260, 303), 'cv2.namedWindow', 'cv2.namedWindow', (['name', 'cv2.WINDOW_KEEPRATIO'], {}), '(name, cv2.WINDOW_KEEPRATIO)\n', (275, 303), False, 'import cv2\n'), ((308, 329), 'cv2.imshow', 'cv2.imshow', (['name', 'mat'], {}), '(name, mat)\n', (318, 329), False, 'import cv2\n'), ((334, 368), 'cv2.resizeWindow', 'cv2.resizeWindow', (['name', '(1000)', '(1000)'], {}), '(name, 1000, 1000)\n', (350, 368), False, 'import cv2\n'), ((373, 391), 'cv2.waitKey', 'cv2.waitKey', (['delay'], {}), '(delay)\n', (384, 391), False, 'import cv2\n'), ((396, 419), 'cv2.destroyWindow', 'cv2.destroyWindow', (['name'], {}), '(name)\n', (413, 419), False, 'import cv2\n'), ((479, 521), 'cv2.cvtColor', 'cv2.cvtColor', (['original', 'cv2.COLOR_BGR2GRAY'], {}), '(original, cv2.COLOR_BGR2GRAY)\n', (491, 521), False, 'import cv2\n'), ((533, 566), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (549, 566), False, 'import cv2\n'), ((579, 603), 'cv2.Canny', 'cv2.Canny', (['gray', '(75)', '(200)'], {}), '(gray, 75, 200)\n', (588, 603), False, 'import cv2\n'), ((646, 680), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['edged', '(3, 3)', '(0)'], {}), '(edged, (3, 3), 0)\n', (662, 680), False, 'import cv2\n'), ((716, 783), 'cv2.findContours', 'cv2.findContours', (['edged', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (732, 783), False, 'import cv2\n'), ((2145, 2183), 'cv2.cvtColor', 'cv2.cvtColor', (['chit', 'cv2.COLOR_BGR2GRAY'], {}), '(chit, cv2.COLOR_BGR2GRAY)\n', (2157, 2183), False, 'import cv2\n'), ((2448, 2511), 'imutils.rotate_bound', 'imutils.rotate_bound', (['chit', '(90 if leftfill < rightfill else 270)'], {}), '(chit, 90 if leftfill < rightfill else 270)\n', (2468, 2511), False, 'import imutils\n'), ((2571, 2618), 'cv2.threshold', 'cv2.threshold', (['tmp', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(tmp, 127, 255, cv2.THRESH_BINARY)\n', (2584, 2618), False, 'import cv2\n'), ((2630, 2737), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['chit[300:370, 90:270]'], {'config': '"""-c tessedit_char_whitelist=ABCD0123456789"""'}), "(chit[300:370, 90:270], config=\n '-c tessedit_char_whitelist=ABCD0123456789')\n", (2657, 2737), False, 'import pytesseract\n'), ((2937, 3043), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['chit[1330:1490, 800:999]'], {'config': '"""-c tessedit_char_whitelist=0123456789"""'}), "(chit[1330:1490, 800:999], config=\n '-c tessedit_char_whitelist=0123456789')\n", (2964, 3043), False, 'import pytesseract\n'), ((3302, 3323), 'cv2.imread', 'cv2.imread', (["('' + file)"], {}), "('' + file)\n", (3312, 3323), False, 'import cv2\n'), ((3877, 3888), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3886, 3888), False, 'import glob, os\n'), ((3893, 3909), 'os.chdir', 'os.chdir', (['"""data"""'], {}), "('data')\n", (3901, 3909), False, 'import glob, os\n'), ((3971, 3982), 'time.time', 'time.time', ([], {}), '()\n', (3980, 3982), False, 'import time\n'), ((4002, 4029), 'glob.glob', 'glob.glob', (['"""*.[Jj][Pp][Gg]"""'], {}), "('*.[Jj][Pp][Gg]')\n", (4011, 4029), False, 'import glob, os\n'), ((4308, 4324), 'concurrent.futures.wait', 'cf.wait', (['dataset'], {}), '(dataset)\n', (4315, 4324), True, 'import concurrent.futures as cf\n'), ((4690, 4708), 'os.chdir', 'os.chdir', (['startDir'], {}), '(startDir)\n', (4698, 4708), False, 'import glob, os\n'), ((806, 822), 'imutils.is_cv2', 'imutils.is_cv2', ([], {}), '()\n', (820, 822), False, 'import imutils\n'), ((987, 1009), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (1000, 1009), False, 'import cv2\n'), ((1027, 1065), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['c', '(0.02 * peri)', '(True)'], {}), '(c, 0.02 * peri, True)\n', (1043, 1065), False, 'import cv2\n'), ((1722, 1738), 'numpy.float32', 'np.float32', (['pts1'], {}), '(pts1)\n', (1732, 1738), True, 'import numpy as np\n'), ((1754, 1810), 'numpy.float32', 'np.float32', (['[[0, 0], [2000, 0], [0, 1000], [2000, 1000]]'], {}), '([[0, 0], [2000, 0], [0, 1000], [2000, 1000]])\n', (1764, 1810), True, 'import numpy as np\n'), ((1823, 1862), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (1850, 1862), False, 'import cv2\n'), ((1877, 1923), 'cv2.warpPerspective', 'cv2.warpPerspective', (['original', 'M', '(2000, 1000)'], {}), '(original, M, (2000, 1000))\n', (1896, 1923), False, 'import cv2\n'), ((4775, 4786), 'time.time', 'time.time', ([], {}), '()\n', (4784, 4786), False, 'import time\n'), ((4713, 4742), 'CSVHelper.Writer', 'CSVHelper.Writer', (['"""score.csv"""'], {}), "('score.csv')\n", (4729, 4742), False, 'import CSVHelper\n'), ((908, 926), 'cv2.contourArea', 'cv2.contourArea', (['x'], {}), '(x)\n', (923, 926), False, 'import cv2\n'), ((2744, 2774), 're.compile', 're.compile', (['"""[0-9][0-9]?[A-D]"""'], {}), "('[0-9][0-9]?[A-D]')\n", (2754, 2774), False, 'import re\n'), ((4190, 4217), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (4215, 4217), False, 'import multiprocessing\n')] |
import cv2
import numpy as np
#画线
def lines():
img=np.zeros((300,400,3),np.uint8)
cv2.line(img,(10,10),(200,200),(0,255,0),3)
cv2.imshow('line.jpg',img)
cv2.waitKey()
#矩形
def rectangle():
img = np.zeros((300, 400, 3), np.uint8)
cv2.rectangle(img,(10,10),(30,40),(134,2,34),1)
cv2.imshow('line.jpg', img)
cv2.waitKey()
#圆
def circle():
img = np.zeros((300, 400, 3), np.uint8)
cv2.circle(img,(60,60),30,(0,0,213),-1)
cv2.imshow('line.jpg', img)
cv2.waitKey()
#文字
def text():
img = np.zeros((300, 400, 3), np.uint8)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'text at here', (80, 90), font, 1, (255, 255, 255), 3)
cv2.imshow('line.jpg', img)
cv2.waitKey()
def main():
text()
main() | [
"cv2.rectangle",
"cv2.line",
"cv2.imshow",
"cv2.putText",
"numpy.zeros",
"cv2.circle",
"cv2.waitKey"
] | [((55, 88), 'numpy.zeros', 'np.zeros', (['(300, 400, 3)', 'np.uint8'], {}), '((300, 400, 3), np.uint8)\n', (63, 88), True, 'import numpy as np\n'), ((90, 141), 'cv2.line', 'cv2.line', (['img', '(10, 10)', '(200, 200)', '(0, 255, 0)', '(3)'], {}), '(img, (10, 10), (200, 200), (0, 255, 0), 3)\n', (98, 141), False, 'import cv2\n'), ((138, 165), 'cv2.imshow', 'cv2.imshow', (['"""line.jpg"""', 'img'], {}), "('line.jpg', img)\n", (148, 165), False, 'import cv2\n'), ((169, 182), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (180, 182), False, 'import cv2\n'), ((216, 249), 'numpy.zeros', 'np.zeros', (['(300, 400, 3)', 'np.uint8'], {}), '((300, 400, 3), np.uint8)\n', (224, 249), True, 'import numpy as np\n'), ((254, 309), 'cv2.rectangle', 'cv2.rectangle', (['img', '(10, 10)', '(30, 40)', '(134, 2, 34)', '(1)'], {}), '(img, (10, 10), (30, 40), (134, 2, 34), 1)\n', (267, 309), False, 'import cv2\n'), ((306, 333), 'cv2.imshow', 'cv2.imshow', (['"""line.jpg"""', 'img'], {}), "('line.jpg', img)\n", (316, 333), False, 'import cv2\n'), ((338, 351), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (349, 351), False, 'import cv2\n'), ((379, 412), 'numpy.zeros', 'np.zeros', (['(300, 400, 3)', 'np.uint8'], {}), '((300, 400, 3), np.uint8)\n', (387, 412), True, 'import numpy as np\n'), ((417, 463), 'cv2.circle', 'cv2.circle', (['img', '(60, 60)', '(30)', '(0, 0, 213)', '(-1)'], {}), '(img, (60, 60), 30, (0, 0, 213), -1)\n', (427, 463), False, 'import cv2\n'), ((461, 488), 'cv2.imshow', 'cv2.imshow', (['"""line.jpg"""', 'img'], {}), "('line.jpg', img)\n", (471, 488), False, 'import cv2\n'), ((493, 506), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (504, 506), False, 'import cv2\n'), ((533, 566), 'numpy.zeros', 'np.zeros', (['(300, 400, 3)', 'np.uint8'], {}), '((300, 400, 3), np.uint8)\n', (541, 566), True, 'import numpy as np\n'), ((607, 678), 'cv2.putText', 'cv2.putText', (['img', '"""text at here"""', '(80, 90)', 'font', '(1)', '(255, 255, 255)', '(3)'], {}), "(img, 'text at here', (80, 90), font, 1, (255, 255, 255), 3)\n", (618, 678), False, 'import cv2\n'), ((682, 709), 'cv2.imshow', 'cv2.imshow', (['"""line.jpg"""', 'img'], {}), "('line.jpg', img)\n", (692, 709), False, 'import cv2\n'), ((714, 727), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (725, 727), False, 'import cv2\n')] |
#Here we extract feature using pre-trained pointnet++ model on the single-view pcd
#The extracted shape feature would be thee input of the GCN for shape classification
from dataloader import SinglePoint
import numpy as np
import os
import torch
from tqdm import tqdm
import pointnet2_cls as pointnet
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-bs", "--batchSize", type=int, default=1)
parser.add_argument("-num_class", type=int, default=40)
parser.add_argument('--val_path', default='../data/single_view_modelnet/modelnetdata/*/test', help='path of the test data')
parser.add_argument('--train_path', default='../data/single_view_modelnet/modelnetdata/*/train', help='path of the train data')
parser.add_argument('--output_data_path', default='../data/modelnet_trained_feature/', help='path of the output feature')
parser.add_argument('--checkpoint_path', default='../log/pointnet_on_single_view.pth', help='path of the pre_trained model')
parser.add_argument("--workers", default=0)
parser.set_defaults(train=False)
if __name__ == '__main__':
args = parser.parse_args()
train_dataset = SinglePoint(args.train_path)
train_loader = torch.utils.data.DataLoader(train_dataset, shuffle=False,batch_size=args.batchSize) # shuffle needs to be false! it's done within the trainer
val_dataset = SinglePoint(args.val_path)
val_loader = torch.utils.data.DataLoader(val_dataset, shuffle=False,batch_size=args.batchSize)
model = pointnet.get_model(args.num_class).cuda()
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
mean_correct_train = []
class_acc_train = np.zeros((args.num_class, 3))
mean_correct_test = []
class_acc_test = np.zeros((args.num_class, 3))
with torch.no_grad():
for j, data in tqdm(enumerate(train_loader), total=len(train_loader)):
points = np.asarray(data[0], dtype=np.float32)
target1 = data[1]
points = points.transpose(0, 2, 1)
points = torch.tensor(points)
points, target1 = points.cuda(), target1.cuda()
classifier = model.eval()
_, _, features_train = classifier.forward(points)
name = data[3][-1]
name1 = os.path.split(name)[-1]
# scene_name = name1[:-13]
scene_name = data[2][-1]
name2 = name1[:-4]
out_file = args.output_data_path + str(scene_name) + '/' + 'train'
if not os.path.exists(out_file):
os.makedirs(out_file)
path = os.path.join(out_file, name2 + '.pth')
torch.save(features_train, path)
vote_pool = torch.zeros(target1.shape[0], args.num_class).cuda()
for _ in range(1):
pred, _,_ = classifier(points)
vote_pool += pred
pred_choice = pred.data.max(1)[1]
for cat in np.unique(target1.cpu()):
classacc = pred_choice[target1 == cat].eq(target1[target1 == cat].long().data).cpu().sum()
class_acc_train[cat, 0] += classacc.item() / float(points[target1 == cat].size()[0])
class_acc_train[cat, 1] += 1
correct = pred_choice.eq(target1.long().data).cpu().sum()
mean_correct_train.append(correct.item() / float(points.size()[0]))
class_acc_train[:, 2] = class_acc_train[:, 0] / class_acc_train[:, 1]
print(class_acc_train[:, 2])
class_acc_train = np.mean(class_acc_train[:, 2])
instance_acc_train = np.mean(mean_correct_train)
print('Train Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc_train, class_acc_train))
for j, data in tqdm(enumerate(val_loader), total=len(val_loader)):
points = np.asarray(data[0], dtype=np.float32)
target1 = data[1]
points = points.transpose(0, 2, 1)
points = torch.tensor(points)
points, target1 = points.cuda(), target1.cuda()
classifier = model.eval()
_, _, features_test = classifier.forward(points)
name = data[3][-1]
name1 = os.path.split(name)[-1]
# scene_name = name1[:-13]
scene_name = data[2][-1]
name2 = name1[:-4]
out_file = args.output_data_path + str(scene_name) + '/' + 'test'
if not os.path.exists(out_file):
os.makedirs(out_file)
path = os.path.join(out_file, name2 + '.pth')
torch.save(features_test, path)
vote_pool = torch.zeros(target1.shape[0], args.num_class).cuda()
for _ in range(1):
pred, _,_ = classifier(points)
vote_pool += pred
pred_choice = pred.data.max(1)[1]
for cat in np.unique(target1.cpu()):
classacc = pred_choice[target1 == cat].eq(target1[target1 == cat].long().data).cpu().sum()
class_acc_test[cat, 0] += classacc.item() / float(points[target1 == cat].size()[0])
class_acc_test[cat, 1] += 1
correct = pred_choice.eq(target1.long().data).cpu().sum()
mean_correct_test.append(correct.item() / float(points.size()[0]))
class_acc_test[:, 2] = class_acc_test[:, 0] / class_acc_test[:, 1]
print(class_acc_test[:, 2])
class_acc_test = np.mean(class_acc_test[:, 2])
instance_acc_test = np.mean(mean_correct_test)
print('test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc_test, class_acc_test))
| [
"numpy.mean",
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"torch.load",
"pointnet2_cls.get_model",
"numpy.asarray",
"os.path.join",
"os.path.split",
"torch.no_grad",
"numpy.zeros",
"torch.tensor",
"torch.save",
"torch.utils.data.DataLoader",
"dataloader.SinglePoint",
"t... | [((327, 352), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (350, 352), False, 'import argparse\n'), ((1128, 1156), 'dataloader.SinglePoint', 'SinglePoint', (['args.train_path'], {}), '(args.train_path)\n', (1139, 1156), False, 'from dataloader import SinglePoint\n'), ((1176, 1265), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'shuffle': '(False)', 'batch_size': 'args.batchSize'}), '(train_dataset, shuffle=False, batch_size=args.\n batchSize)\n', (1203, 1265), False, 'import torch\n'), ((1336, 1362), 'dataloader.SinglePoint', 'SinglePoint', (['args.val_path'], {}), '(args.val_path)\n', (1347, 1362), False, 'from dataloader import SinglePoint\n'), ((1380, 1467), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'shuffle': '(False)', 'batch_size': 'args.batchSize'}), '(val_dataset, shuffle=False, batch_size=args.\n batchSize)\n', (1407, 1467), False, 'import torch\n'), ((1534, 1566), 'torch.load', 'torch.load', (['args.checkpoint_path'], {}), '(args.checkpoint_path)\n', (1544, 1566), False, 'import torch\n'), ((1692, 1721), 'numpy.zeros', 'np.zeros', (['(args.num_class, 3)'], {}), '((args.num_class, 3))\n', (1700, 1721), True, 'import numpy as np\n'), ((1770, 1799), 'numpy.zeros', 'np.zeros', (['(args.num_class, 3)'], {}), '((args.num_class, 3))\n', (1778, 1799), True, 'import numpy as np\n'), ((1809, 1824), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1822, 1824), False, 'import torch\n'), ((3520, 3550), 'numpy.mean', 'np.mean', (['class_acc_train[:, 2]'], {}), '(class_acc_train[:, 2])\n', (3527, 3550), True, 'import numpy as np\n'), ((3580, 3607), 'numpy.mean', 'np.mean', (['mean_correct_train'], {}), '(mean_correct_train)\n', (3587, 3607), True, 'import numpy as np\n'), ((5394, 5423), 'numpy.mean', 'np.mean', (['class_acc_test[:, 2]'], {}), '(class_acc_test[:, 2])\n', (5401, 5423), True, 'import numpy as np\n'), ((5452, 5478), 'numpy.mean', 'np.mean', (['mean_correct_test'], {}), '(mean_correct_test)\n', (5459, 5478), True, 'import numpy as np\n'), ((1475, 1509), 'pointnet2_cls.get_model', 'pointnet.get_model', (['args.num_class'], {}), '(args.num_class)\n', (1493, 1509), True, 'import pointnet2_cls as pointnet\n'), ((1926, 1963), 'numpy.asarray', 'np.asarray', (['data[0]'], {'dtype': 'np.float32'}), '(data[0], dtype=np.float32)\n', (1936, 1963), True, 'import numpy as np\n'), ((2062, 2082), 'torch.tensor', 'torch.tensor', (['points'], {}), '(points)\n', (2074, 2082), False, 'import torch\n'), ((2607, 2645), 'os.path.join', 'os.path.join', (['out_file', "(name2 + '.pth')"], {}), "(out_file, name2 + '.pth')\n", (2619, 2645), False, 'import os\n'), ((2658, 2690), 'torch.save', 'torch.save', (['features_train', 'path'], {}), '(features_train, path)\n', (2668, 2690), False, 'import torch\n'), ((3810, 3847), 'numpy.asarray', 'np.asarray', (['data[0]'], {'dtype': 'np.float32'}), '(data[0], dtype=np.float32)\n', (3820, 3847), True, 'import numpy as np\n'), ((3946, 3966), 'torch.tensor', 'torch.tensor', (['points'], {}), '(points)\n', (3958, 3966), False, 'import torch\n'), ((4490, 4528), 'os.path.join', 'os.path.join', (['out_file', "(name2 + '.pth')"], {}), "(out_file, name2 + '.pth')\n", (4502, 4528), False, 'import os\n'), ((4541, 4572), 'torch.save', 'torch.save', (['features_test', 'path'], {}), '(features_test, path)\n', (4551, 4572), False, 'import torch\n'), ((2294, 2313), 'os.path.split', 'os.path.split', (['name'], {}), '(name)\n', (2307, 2313), False, 'import os\n'), ((2523, 2547), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (2537, 2547), False, 'import os\n'), ((2565, 2586), 'os.makedirs', 'os.makedirs', (['out_file'], {}), '(out_file)\n', (2576, 2586), False, 'import os\n'), ((4178, 4197), 'os.path.split', 'os.path.split', (['name'], {}), '(name)\n', (4191, 4197), False, 'import os\n'), ((4406, 4430), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (4420, 4430), False, 'import os\n'), ((4448, 4469), 'os.makedirs', 'os.makedirs', (['out_file'], {}), '(out_file)\n', (4459, 4469), False, 'import os\n'), ((2715, 2760), 'torch.zeros', 'torch.zeros', (['target1.shape[0]', 'args.num_class'], {}), '(target1.shape[0], args.num_class)\n', (2726, 2760), False, 'import torch\n'), ((4597, 4642), 'torch.zeros', 'torch.zeros', (['target1.shape[0]', 'args.num_class'], {}), '(target1.shape[0], args.num_class)\n', (4608, 4642), False, 'import torch\n')] |
"""
This module calculates corrections for the species listed below, fitted to the experimental and computed
entries given to the CorrectionCalculator constructor.
"""
import warnings
from collections import OrderedDict
from typing import Dict, List, Tuple, Union, Sequence
try:
import ruamel.yaml as yaml
except ImportError:
try:
import ruamel_yaml as yaml # type: ignore # noqa
except ImportError:
import yaml # type: ignore # noqa
import numpy as np
import plotly.graph_objects as go
from monty.serialization import loadfn
from scipy.optimize import curve_fit
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.analysis.reaction_calculator import ComputedReaction
from pymatgen.analysis.structure_analyzer import sulfide_type
def _func(x, *m):
"""
Helper function for curve_fit.
"""
return np.dot(x, m)
class CorrectionCalculator:
"""
A CorrectionCalculator contains experimental and computed entries which it uses to compute corrections.
It graphs residual errors after applying the computed corrections and creates the MPCompatibility.yaml
file the Correction classes use.
Attributes:
species: list of species that corrections are being calculated for
exp_compounds: list of dictionaries which each contain a compound's formula and experimental data
calc_compounds: dictionary of ComputedEntry objects
corrections: list of corrections in same order as species list
corrections_std_error: list of the variances of the corrections in same order as species list
corrections_dict: dictionary of format {'species': (value, uncertainty)} for easier correction lookup
"""
def __init__(
self,
species: Sequence[str] = (
"oxide",
"peroxide",
"superoxide",
"S",
"F",
"Cl",
"Br",
"I",
"N",
"Se",
"Si",
"Sb",
"Te",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"W",
"Mo",
"H",
),
max_error: float = 0.1,
allow_unstable: Union[float, bool] = 0.1,
exclude_polyanions: Sequence[str] = (
"SO4",
"CO3",
"NO3",
"OCl3",
"SiO4",
"SeO3",
"TiO3",
"TiO4",
),
) -> None:
"""
Initializes a CorrectionCalculator.
Args:
species: list of species to calculate corrections for
max_error: maximum tolerable relative uncertainty in experimental energy.
Compounds with relative uncertainty greater than this value will be excluded from the fit
allow_unstable: whether unstable entries are to be included in the fit. If True, all compounds will
be included regardless of their energy above hull. If False or a float, compounds with
energy above hull greater than the given value (defaults to 0.1 eV/atom) will be
excluded
exclude_polyanions: a list of polyanions that contain additional sources of error that may negatively
influence the quality of the fitted corrections. Compounds with these polyanions
will be excluded from the fit
"""
self.species = species
self.max_error = max_error
if not allow_unstable:
self.allow_unstable = 0.1
else:
self.allow_unstable = allow_unstable
self.exclude_polyanions = exclude_polyanions
self.corrections: List[float] = []
self.corrections_std_error: List[float] = []
self.corrections_dict: Dict[str, Tuple[float, float]] = {} # {'species': (value, uncertainty)}
# to help the graph_residual_error_per_species() method differentiate between oxygen containing compounds
if "oxide" in self.species:
self.oxides: List[str] = []
if "peroxide" in self.species:
self.peroxides: List[str] = []
if "superoxide" in self.species:
self.superoxides: List[str] = []
if "S" in self.species:
self.sulfides: List[str] = []
def compute_from_files(self, exp_gz: str, comp_gz: str):
"""
Args:
exp_gz: name of .json.gz file that contains experimental data
data in .json.gz file should be a list of dictionary objects with the following keys/values:
{"formula": chemical formula, "exp energy": formation energy in eV/formula unit,
"uncertainty": uncertainty in formation energy}
comp_gz: name of .json.gz file that contains computed entries
data in .json.gz file should be a dictionary of {chemical formula: ComputedEntry}
"""
exp_entries = loadfn(exp_gz)
calc_entries = loadfn(comp_gz)
return self.compute_corrections(exp_entries, calc_entries)
def compute_corrections(self, exp_entries: list, calc_entries: dict) -> dict:
"""
Computes the corrections and fills in correction, corrections_std_error, and corrections_dict.
Args:
exp_entries: list of dictionary objects with the following keys/values:
{"formula": chemical formula, "exp energy": formation energy in eV/formula unit,
"uncertainty": uncertainty in formation energy}
calc_entries: dictionary of computed entries, of the form {chemical formula: ComputedEntry}
Raises:
ValueError: calc_compounds is missing an entry
"""
self.exp_compounds = exp_entries
self.calc_compounds = calc_entries
self.names: List[str] = []
self.diffs: List[float] = []
self.coeff_mat: List[List[float]] = []
self.exp_uncer: List[float] = []
# remove any corrections in calc_compounds
for entry in self.calc_compounds.values():
entry.correction = 0
for cmpd_info in self.exp_compounds:
# to get consistent element ordering in formula
name = Composition(cmpd_info["formula"]).reduced_formula
allow = True
compound = self.calc_compounds.get(name, None)
if not compound:
warnings.warn(
"Compound {} is not found in provided computed entries and is excluded from the fit".format(name)
)
continue
# filter out compounds with large uncertainties
relative_uncertainty = abs(cmpd_info["uncertainty"] / cmpd_info["exp energy"])
if relative_uncertainty > self.max_error:
allow = False
warnings.warn(
"Compound {} is excluded from the fit due to high experimental uncertainty ({}%)".format(
name, relative_uncertainty
)
)
# filter out compounds containing certain polyanions
for anion in self.exclude_polyanions:
if anion in name or anion in cmpd_info["formula"]:
allow = False
warnings.warn(
"Compound {} contains the polyanion {} and is excluded from the fit".format(name, anion)
)
break
# filter out compounds that are unstable
if isinstance(self.allow_unstable, float):
try:
eah = compound.data["e_above_hull"]
except KeyError:
raise ValueError("Missing e above hull data")
if eah > self.allow_unstable:
allow = False
warnings.warn(
"Compound {} is unstable and excluded from the fit (e_above_hull = {})".format(name, eah)
)
if allow:
comp = Composition(name)
elems = list(comp.as_dict())
reactants = []
for elem in elems:
try:
elem_name = Composition(elem).reduced_formula
reactants.append(self.calc_compounds[elem_name])
except KeyError:
raise ValueError("Computed entries missing " + elem)
rxn = ComputedReaction(reactants, [compound])
rxn.normalize_to(comp)
energy = rxn.calculated_reaction_energy
coeff = []
for specie in self.species:
if specie == "oxide":
if compound.data["oxide_type"] == "oxide":
coeff.append(comp["O"])
self.oxides.append(name)
else:
coeff.append(0)
elif specie == "peroxide":
if compound.data["oxide_type"] == "peroxide":
coeff.append(comp["O"])
self.peroxides.append(name)
else:
coeff.append(0)
elif specie == "superoxide":
if compound.data["oxide_type"] == "superoxide":
coeff.append(comp["O"])
self.superoxides.append(name)
else:
coeff.append(0)
elif specie == "S":
if Element("S") in comp:
sf_type = "sulfide"
if compound.data.get("sulfide_type"):
sf_type = compound.data["sulfide_type"]
elif hasattr(compound, "structure"):
sf_type = sulfide_type(compound.structure)
if sf_type == "sulfide":
coeff.append(comp["S"])
self.sulfides.append(name)
else:
coeff.append(0)
else:
coeff.append(0)
else:
try:
coeff.append(comp[specie])
except ValueError:
raise ValueError("We can't detect this specie: {}".format(specie))
self.names.append(name)
self.diffs.append((cmpd_info["exp energy"] - energy) / comp.num_atoms)
self.coeff_mat.append([i / comp.num_atoms for i in coeff])
self.exp_uncer.append((cmpd_info["uncertainty"]) / comp.num_atoms)
# for any exp entries with no uncertainty value, assign average uncertainty value
sigma = np.array(self.exp_uncer)
sigma[sigma == 0] = np.nan
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=RuntimeWarning
) # numpy raises warning if the entire array is nan values
mean_uncer = np.nanmean(sigma)
sigma = np.where(np.isnan(sigma), mean_uncer, sigma)
if np.isnan(mean_uncer):
# no uncertainty values for any compounds, don't try to weight
popt, self.pcov = curve_fit(_func, self.coeff_mat, self.diffs, p0=np.ones(len(self.species)))
else:
popt, self.pcov = curve_fit(
_func,
self.coeff_mat,
self.diffs,
p0=np.ones(len(self.species)),
sigma=sigma,
absolute_sigma=True,
)
self.corrections = popt.tolist()
self.corrections_std_error = np.sqrt(np.diag(self.pcov)).tolist()
for i in range(len(self.species)):
self.corrections_dict[self.species[i]] = (
round(self.corrections[i], 3),
round(self.corrections_std_error[i], 4),
)
return self.corrections_dict
def graph_residual_error(self) -> go.Figure:
"""
Graphs the residual errors for all compounds after applying computed corrections.
"""
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)]
labels_graph = self.names.copy()
abs_errors, labels_graph = (list(t) for t in zip(*sorted(zip(abs_errors, labels_graph)))) # sort by error
num = len(abs_errors)
fig = go.Figure(
data=go.Scatter(
x=np.linspace(1, num, num),
y=abs_errors,
mode="markers",
text=labels_graph,
),
layout=go.Layout(
title=go.layout.Title(text="Residual Errors"),
yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text="Residual Error (eV/atom)")),
),
)
print("Residual Error:")
print("Median = " + str(np.median(np.array(abs_errors))))
print("Mean = " + str(np.mean(np.array(abs_errors))))
print("Std Dev = " + str(np.std(np.array(abs_errors))))
print("Original Error:")
print("Median = " + str(abs(np.median(np.array(self.diffs)))))
print("Mean = " + str(abs(np.mean(np.array(self.diffs)))))
print("Std Dev = " + str(np.std(np.array(self.diffs))))
return fig
def graph_residual_error_per_species(self, specie: str) -> go.Figure:
"""
Graphs the residual errors for each compound that contains specie after applying computed corrections.
Args:
specie: the specie/group that residual errors are being plotted for
Raises:
ValueError: the specie is not a valid specie that this class fits corrections for
"""
if specie not in self.species:
raise ValueError("not a valid specie")
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)]
labels_species = self.names.copy()
diffs_cpy = self.diffs.copy()
num = len(labels_species)
if specie in ("oxide", "peroxide", "superoxide", "S"):
if specie == "oxide":
compounds = self.oxides
elif specie == "peroxide":
compounds = self.peroxides
elif specie == "superoxides":
compounds = self.superoxides
else:
compounds = self.sulfides
for i in range(num):
if labels_species[num - i - 1] not in compounds:
del labels_species[num - i - 1]
del abs_errors[num - i - 1]
del diffs_cpy[num - i - 1]
else:
for i in range(num):
if not Composition(labels_species[num - i - 1])[specie]:
del labels_species[num - i - 1]
del abs_errors[num - i - 1]
del diffs_cpy[num - i - 1]
abs_errors, labels_species = (list(t) for t in zip(*sorted(zip(abs_errors, labels_species)))) # sort by error
num = len(abs_errors)
fig = go.Figure(
data=go.Scatter(
x=np.linspace(1, num, num),
y=abs_errors,
mode="markers",
text=labels_species,
),
layout=go.Layout(
title=go.layout.Title(text="Residual Errors for " + specie),
yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text="Residual Error (eV/atom)")),
),
)
print("Residual Error:")
print("Median = " + str(np.median(np.array(abs_errors))))
print("Mean = " + str(np.mean(np.array(abs_errors))))
print("Std Dev = " + str(np.std(np.array(abs_errors))))
print("Original Error:")
print("Median = " + str(abs(np.median(np.array(diffs_cpy)))))
print("Mean = " + str(abs(np.mean(np.array(diffs_cpy)))))
print("Std Dev = " + str(np.std(np.array(diffs_cpy))))
return fig
def make_yaml(self, name: str = "MP2020") -> None:
"""
Creates the _name_Compatibility.yaml that stores corrections as well as _name_CompatibilityUncertainties.yaml
for correction uncertainties.
Args:
name: str, alternate name for the created .yaml file.
Default: "MP2020"
"""
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
# elements with U values
ggaucorrection_species = ["V", "Cr", "Mn", "Fe", "Co", "Ni", "W", "Mo"]
comp_corr: "OrderedDict[str, float]" = OrderedDict()
o: "OrderedDict[str, float]" = OrderedDict()
f: "OrderedDict[str, float]" = OrderedDict()
comp_corr_error: "OrderedDict[str, float]" = OrderedDict()
o_error: "OrderedDict[str, float]" = OrderedDict()
f_error: "OrderedDict[str, float]" = OrderedDict()
for specie in self.species:
if specie in ggaucorrection_species:
o[specie] = self.corrections_dict[specie][0]
f[specie] = self.corrections_dict[specie][0]
o_error[specie] = self.corrections_dict[specie][1]
f_error[specie] = self.corrections_dict[specie][1]
else:
comp_corr[specie] = self.corrections_dict[specie][0]
comp_corr_error[specie] = self.corrections_dict[specie][1]
comp_corr["ozonide"] = 0 # do i need this??
comp_corr_error["ozonide"] = 0
outline = """\
Name:
Corrections:
GGAUMixingCorrections:
O:
F:
CompositionCorrections:
Uncertainties:
GGAUMixingCorrections:
O:
F:
CompositionCorrections:
"""
fn = name + "Compatibility.yaml"
file = open(fn, "w")
yml = yaml.YAML()
yml.Representer.add_representer(OrderedDict, yml.Representer.represent_dict)
yml.default_flow_style = False
contents = yml.load(outline)
contents["Name"] = name
# make CommentedMap so comments can be added
contents["Corrections"]["GGAUMixingCorrections"]["O"] = yaml.comments.CommentedMap(o)
contents["Corrections"]["GGAUMixingCorrections"]["F"] = yaml.comments.CommentedMap(f)
contents["Corrections"]["CompositionCorrections"] = yaml.comments.CommentedMap(comp_corr)
contents["Uncertainties"]["GGAUMixingCorrections"]["O"] = yaml.comments.CommentedMap(o_error)
contents["Uncertainties"]["GGAUMixingCorrections"]["F"] = yaml.comments.CommentedMap(f_error)
contents["Uncertainties"]["CompositionCorrections"] = yaml.comments.CommentedMap(comp_corr_error)
contents["Corrections"].yaml_set_start_comment("Energy corrections in eV/atom", indent=2)
contents["Corrections"]["GGAUMixingCorrections"].yaml_set_start_comment(
"Composition-based corrections applied to transition metal oxides\nand fluorides to "
+ 'make GGA and GGA+U energies compatible\nwhen compat_type = "Advanced" (default)',
indent=4,
)
contents["Corrections"]["CompositionCorrections"].yaml_set_start_comment(
"Composition-based corrections applied to any compound containing\nthese species as anions",
indent=4,
)
contents["Uncertainties"].yaml_set_start_comment(
"Uncertainties corresponding to each energy correction (eV/atom)", indent=2
)
yaml.dump(contents, file)
file.close()
| [
"pymatgen.analysis.structure_analyzer.sulfide_type",
"numpy.array",
"numpy.nanmean",
"plotly.graph_objects.layout.Title",
"plotly.graph_objects.layout.yaxis.Title",
"yaml.comments.CommentedMap",
"pymatgen.core.composition.Composition",
"yaml.YAML",
"monty.serialization.loadfn",
"pymatgen.core.peri... | [((907, 919), 'numpy.dot', 'np.dot', (['x', 'm'], {}), '(x, m)\n', (913, 919), True, 'import numpy as np\n'), ((5106, 5120), 'monty.serialization.loadfn', 'loadfn', (['exp_gz'], {}), '(exp_gz)\n', (5112, 5120), False, 'from monty.serialization import loadfn\n'), ((5144, 5159), 'monty.serialization.loadfn', 'loadfn', (['comp_gz'], {}), '(comp_gz)\n', (5150, 5159), False, 'from monty.serialization import loadfn\n'), ((11128, 11152), 'numpy.array', 'np.array', (['self.exp_uncer'], {}), '(self.exp_uncer)\n', (11136, 11152), True, 'import numpy as np\n'), ((11503, 11523), 'numpy.isnan', 'np.isnan', (['mean_uncer'], {}), '(mean_uncer)\n', (11511, 11523), True, 'import numpy as np\n'), ((17348, 17361), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17359, 17361), False, 'from collections import OrderedDict\n'), ((17401, 17414), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17412, 17414), False, 'from collections import OrderedDict\n'), ((17454, 17467), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17465, 17467), False, 'from collections import OrderedDict\n'), ((17522, 17535), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17533, 17535), False, 'from collections import OrderedDict\n'), ((17581, 17594), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17592, 17594), False, 'from collections import OrderedDict\n'), ((17640, 17653), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17651, 17653), False, 'from collections import OrderedDict\n'), ((18650, 18661), 'yaml.YAML', 'yaml.YAML', ([], {}), '()\n', (18659, 18661), False, 'import yaml\n'), ((18974, 19003), 'yaml.comments.CommentedMap', 'yaml.comments.CommentedMap', (['o'], {}), '(o)\n', (19000, 19003), False, 'import yaml\n'), ((19068, 19097), 'yaml.comments.CommentedMap', 'yaml.comments.CommentedMap', (['f'], {}), '(f)\n', (19094, 19097), False, 'import yaml\n'), ((19158, 19195), 'yaml.comments.CommentedMap', 'yaml.comments.CommentedMap', (['comp_corr'], {}), '(comp_corr)\n', (19184, 19195), False, 'import yaml\n'), ((19262, 19297), 'yaml.comments.CommentedMap', 'yaml.comments.CommentedMap', (['o_error'], {}), '(o_error)\n', (19288, 19297), False, 'import yaml\n'), ((19364, 19399), 'yaml.comments.CommentedMap', 'yaml.comments.CommentedMap', (['f_error'], {}), '(f_error)\n', (19390, 19399), False, 'import yaml\n'), ((19462, 19505), 'yaml.comments.CommentedMap', 'yaml.comments.CommentedMap', (['comp_corr_error'], {}), '(comp_corr_error)\n', (19488, 19505), False, 'import yaml\n'), ((20297, 20322), 'yaml.dump', 'yaml.dump', (['contents', 'file'], {}), '(contents, file)\n', (20306, 20322), False, 'import yaml\n'), ((11202, 11227), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (11225, 11227), False, 'import warnings\n'), ((11241, 11297), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (11262, 11297), False, 'import warnings\n'), ((11411, 11428), 'numpy.nanmean', 'np.nanmean', (['sigma'], {}), '(sigma)\n', (11421, 11428), True, 'import numpy as np\n'), ((11455, 11470), 'numpy.isnan', 'np.isnan', (['sigma'], {}), '(sigma)\n', (11463, 11470), True, 'import numpy as np\n'), ((6394, 6427), 'pymatgen.core.composition.Composition', 'Composition', (["cmpd_info['formula']"], {}), "(cmpd_info['formula'])\n", (6405, 6427), False, 'from pymatgen.core.composition import Composition\n'), ((8214, 8231), 'pymatgen.core.composition.Composition', 'Composition', (['name'], {}), '(name)\n', (8225, 8231), False, 'from pymatgen.core.composition import Composition\n'), ((8649, 8688), 'pymatgen.analysis.reaction_calculator.ComputedReaction', 'ComputedReaction', (['reactants', '[compound]'], {}), '(reactants, [compound])\n', (8665, 8688), False, 'from pymatgen.analysis.reaction_calculator import ComputedReaction\n'), ((12057, 12075), 'numpy.diag', 'np.diag', (['self.pcov'], {}), '(self.pcov)\n', (12064, 12075), True, 'import numpy as np\n'), ((12714, 12754), 'numpy.dot', 'np.dot', (['self.coeff_mat', 'self.corrections'], {}), '(self.coeff_mat, self.corrections)\n', (12720, 12754), True, 'import numpy as np\n'), ((14571, 14611), 'numpy.dot', 'np.dot', (['self.coeff_mat', 'self.corrections'], {}), '(self.coeff_mat, self.corrections)\n', (14577, 14611), True, 'import numpy as np\n'), ((13015, 13039), 'numpy.linspace', 'np.linspace', (['(1)', 'num', 'num'], {}), '(1, num, num)\n', (13026, 13039), True, 'import numpy as np\n'), ((13205, 13244), 'plotly.graph_objects.layout.Title', 'go.layout.Title', ([], {'text': '"""Residual Errors"""'}), "(text='Residual Errors')\n", (13220, 13244), True, 'import plotly.graph_objects as go\n'), ((13448, 13468), 'numpy.array', 'np.array', (['abs_errors'], {}), '(abs_errors)\n', (13456, 13468), True, 'import numpy as np\n'), ((13510, 13530), 'numpy.array', 'np.array', (['abs_errors'], {}), '(abs_errors)\n', (13518, 13530), True, 'import numpy as np\n'), ((13574, 13594), 'numpy.array', 'np.array', (['abs_errors'], {}), '(abs_errors)\n', (13582, 13594), True, 'import numpy as np\n'), ((13809, 13829), 'numpy.array', 'np.array', (['self.diffs'], {}), '(self.diffs)\n', (13817, 13829), True, 'import numpy as np\n'), ((15410, 15450), 'pymatgen.core.composition.Composition', 'Composition', (['labels_species[num - i - 1]'], {}), '(labels_species[num - i - 1])\n', (15421, 15450), False, 'from pymatgen.core.composition import Composition\n'), ((15829, 15853), 'numpy.linspace', 'np.linspace', (['(1)', 'num', 'num'], {}), '(1, num, num)\n', (15840, 15853), True, 'import numpy as np\n'), ((16021, 16074), 'plotly.graph_objects.layout.Title', 'go.layout.Title', ([], {'text': "('Residual Errors for ' + specie)"}), "(text='Residual Errors for ' + specie)\n", (16036, 16074), True, 'import plotly.graph_objects as go\n'), ((16278, 16298), 'numpy.array', 'np.array', (['abs_errors'], {}), '(abs_errors)\n', (16286, 16298), True, 'import numpy as np\n'), ((16340, 16360), 'numpy.array', 'np.array', (['abs_errors'], {}), '(abs_errors)\n', (16348, 16360), True, 'import numpy as np\n'), ((16404, 16424), 'numpy.array', 'np.array', (['abs_errors'], {}), '(abs_errors)\n', (16412, 16424), True, 'import numpy as np\n'), ((16637, 16656), 'numpy.array', 'np.array', (['diffs_cpy'], {}), '(diffs_cpy)\n', (16645, 16656), True, 'import numpy as np\n'), ((8405, 8422), 'pymatgen.core.composition.Composition', 'Composition', (['elem'], {}), '(elem)\n', (8416, 8422), False, 'from pymatgen.core.composition import Composition\n'), ((13677, 13697), 'numpy.array', 'np.array', (['self.diffs'], {}), '(self.diffs)\n', (13685, 13697), True, 'import numpy as np\n'), ((13744, 13764), 'numpy.array', 'np.array', (['self.diffs'], {}), '(self.diffs)\n', (13752, 13764), True, 'import numpy as np\n'), ((16507, 16526), 'numpy.array', 'np.array', (['diffs_cpy'], {}), '(diffs_cpy)\n', (16515, 16526), True, 'import numpy as np\n'), ((16573, 16592), 'numpy.array', 'np.array', (['diffs_cpy'], {}), '(diffs_cpy)\n', (16581, 16592), True, 'import numpy as np\n'), ((13290, 13344), 'plotly.graph_objects.layout.yaxis.Title', 'go.layout.yaxis.Title', ([], {'text': '"""Residual Error (eV/atom)"""'}), "(text='Residual Error (eV/atom)')\n", (13311, 13344), True, 'import plotly.graph_objects as go\n'), ((16120, 16174), 'plotly.graph_objects.layout.yaxis.Title', 'go.layout.yaxis.Title', ([], {'text': '"""Residual Error (eV/atom)"""'}), "(text='Residual Error (eV/atom)')\n", (16141, 16174), True, 'import plotly.graph_objects as go\n'), ((9815, 9827), 'pymatgen.core.periodic_table.Element', 'Element', (['"""S"""'], {}), "('S')\n", (9822, 9827), False, 'from pymatgen.core.periodic_table import Element\n'), ((10130, 10162), 'pymatgen.analysis.structure_analyzer.sulfide_type', 'sulfide_type', (['compound.structure'], {}), '(compound.structure)\n', (10142, 10162), False, 'from pymatgen.analysis.structure_analyzer import sulfide_type\n')] |
import multipletau
from extractSpadData import extractSpadData
import matplotlib.pyplot as plt
import numpy as np
from distance2detElements import distance2detElements
from distance2detElements import SPADcoordFromDetNumb as coord
from distance2detElements import SPADshiftvectorCrossCorr
from colorFromMap import colorFromMap
import fnmatch
from plotColors import plotColors
from getFCSinfo import getFCSinfo
from meas_to_count import file_to_FCScount
from os import getcwd
from pathlib import Path
from listFiles import listFiles
import ntpath
from corr2csv import corr2csv
class correlations:
pass
def FCS2Corr(data, dwellTime, listOfG=['central', 'sum3', 'sum5', 'chessboard', 'ullr'], accuracy=50):
"""
Convert SPAD-FCS data to correlation curves
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
data Data variable, i.e. output from binFile2Data
dwellTime Bin time [in µs]
listofG List of correlations to be calculated
accuracy Accuracy of the autocorrelation function, typically 50
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
G Object with all autocorrelations
E.g. G.central contains the array with the central detector
element autocorrelation
========== ===============================================================
"""
# object from correlations class in which all correlation data is stored
G = correlations()
# dwell time
G.dwellTime = dwellTime
if len(np.shape(data)) == 1:
# vector is given instead of matrix, single detector only
print('Calculating autocorrelation ')
setattr(G, 'det0', multipletau.correlate(data, data, m=accuracy, deltat=dwellTime*1e-6, normalize=True))
for i in listOfG:
if isinstance(i, int):
# autocorrelation of a detector element i
print('Calculating autocorrelation of detector element ' + str(i))
dataSingle = extractSpadData(data, i)
setattr(G, 'det' + str(i), multipletau.correlate(dataSingle, dataSingle, m=accuracy, deltat=dwellTime*1e-6, normalize=True))
elif i == "central":
# autocorrelation central detector element
print('Calculating autocorrelation central detector element')
dataCentral = extractSpadData(data, "central")
G.central = multipletau.correlate(dataCentral, dataCentral, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
elif i == "sum3":
# autocorrelation sum3x3
print('Calculating autocorrelation sum3x3')
dataSum3 = extractSpadData(data, "sum3")
G.sum3 = multipletau.correlate(dataSum3, dataSum3, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
elif i == "sum5":
# autocorrelation sum3x3
print('Calculating autocorrelation sum5x5')
dataSum5 = extractSpadData(data, "sum5")
G.sum5 = multipletau.correlate(dataSum5, dataSum5, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
elif i == "allbuthot":
# autocorrelation sum5x5 except for the hot pixels
print('Calculating autocorrelation allbuthot')
dataAllbuthot = extractSpadData(data, "allbuthot")
G.allbuthot = multipletau.correlate(dataAllbuthot, dataAllbuthot, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
elif i == "chessboard":
# crosscorrelation chessboard
print('Calculating crosscorrelation chessboard')
dataChess0 = extractSpadData(data, "chess0")
dataChess1 = extractSpadData(data, "chess1")
G.chessboard = multipletau.correlate(dataChess0, dataChess1, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
elif i == "chess3":
# crosscorrelation small 3x3 chessboard
print('Calculating crosscorrelation small chessboard')
dataChess0 = extractSpadData(data, "chess3a")
dataChess1 = extractSpadData(data, "chess3b")
G.chess3 = multipletau.correlate(dataChess0, dataChess1, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
elif i == "ullr":
# crosscorrelation upper left and lower right
print('Calculating crosscorrelation upper left and lower right')
dataUL = extractSpadData(data, "upperleft")
dataLR = extractSpadData(data, "lowerright")
G.ullr = multipletau.correlate(dataUL, dataLR, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
elif i == "crossCenter":
# crosscorrelation center element with L, R, T, B
dataCenter = extractSpadData(data, 12)
for j in range(25):
print('Calculating crosscorrelation central element with ' + str(j))
data2 = extractSpadData(data, j)
Gtemp = multipletau.correlate(dataCenter, data2, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
setattr(G, 'det12x' + str(j), Gtemp)
elif i == "2MPD":
# crosscorrelation element 12 and 13
data1 = extractSpadData(data, 12)
data2 = extractSpadData(data, 13)
print('Cross correlation elements 12 and 13')
Gtemp = multipletau.correlate(data1, data2, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
G.cross12 = Gtemp
print('Cross correlation elements 13 and 12')
Gtemp = multipletau.correlate(data2, data1, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
G.cross21 = Gtemp
print('Autocorrelation element 12')
Gtemp = multipletau.correlate(data1, data1, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
G.auto1 = Gtemp
print('Autocorrelation element 13')
Gtemp = multipletau.correlate(data2, data2, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
G.auto2 = Gtemp
elif i == "crossAll":
# crosscorrelation every element with every other element
for j in range(25):
data1 = extractSpadData(data, j)
for k in range(25):
data2 = extractSpadData(data, k)
print('Calculating crosscorrelation det' + str(j) + ' and det' + str(k))
Gtemp = multipletau.correlate(data1, data2, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
setattr(G, 'det' + str(j) + 'x' + str(k), Gtemp)
elif i == "autoSpatial":
# number of time points
Nt = np.size(data, 0)
# detector size (5 for SPAD)
N = int(np.round(np.sqrt(np.size(data, 1)-1)))
# G size
M = 2 * N - 1
deltats = range(0, 1, 1) # in units of dwell times
G.autoSpatial = np.zeros((M, M, len(deltats)))
# normalization
print("Calculating average image")
avIm = np.mean(data, 0)
# avInt = np.mean(avIm[0:N*N]) - can't be used since every pixel
# has a different PSF amplitude!!
# for j in range(np.size(data, 0)):
# data[j, :] = data[j, :] - avIm
avIm = np.resize(avIm[0:N*N], (N, N))
# calculate autocorrelation
k = 0
for deltat in deltats:
print("Calculating spatial autocorr delta t = " + str(deltat * dwellTime) + " µs")
for j in range(Nt-deltat):
im1 = np.resize(data[j, 0:N*N], (N, N))
im1 = np.ndarray.astype(im1, 'int64')
im2 = np.resize(data[j + deltat, 0:N*N], (N, N))
im2 = np.ndarray.astype(im2, 'int64')
# G.autoSpatial[:,:,k] = G.autoSpatial[:,:,k] + ssig.correlate2d(im1, im2)
# calculate correlation between im1 and im2
for shifty in np.arange(-4, 5):
for shiftx in np.arange(-4, 5):
# go through all detector elements
n = 0 # number of overlapping detector elements
Gtemp = 0
for detx in np.arange(np.max((0, shiftx)), np.min((5, 5+shiftx))):
for dety in np.arange(np.max((0, shifty)), np.min((5, 5+shifty))):
GtempUnNorm = im1[dety, detx] * im2[dety-shifty, detx-shiftx]
GtempNorm = GtempUnNorm - avIm[dety, detx] * avIm[dety-shifty, detx-shiftx]
GtempNorm /= avIm[dety, detx] * avIm[dety-shifty, detx-shiftx]
Gtemp += GtempNorm
n += 1
Gtemp /= n
G.autoSpatial[shifty+4,shiftx+4,k] += Gtemp
G.autoSpatial[:,:,k] /= (Nt-deltat)
k = k + 1
elif i == "av":
# average of all 25 individual autocorrelation curves
for j in range(25):
# autocorrelation of a detector element j
print('Calculating autocorrelation of detector element ' + str(j))
dataSingle = extractSpadData(data, j)
Gtemp = multipletau.correlate(dataSingle, dataSingle, m=accuracy, deltat=dwellTime*1e-6, normalize=True)
setattr(G, 'det' + str(j), Gtemp)
Gav = Gtemp[:, 1]
for j in range(24):
Gav = np.add(Gav, getattr(G, 'det' + str(j))[:, 1])
Gav = Gav / 25
G.av = np.zeros([np.size(Gav, 0), 2])
G.av[:, 0] = Gtemp[:, 0]
G.av[:, 1] = Gav
return G
def FCS2CorrSplit(data, dwellTime, listOfG=['central', 'sum3', 'sum5', 'chessboard', 'ullr'], accuracy=50, split=10):
"""
Chunk SPAD-FCS trace into different parts and calculate correlation curves
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
data Data variable, i.e. output from binFile2Data
dwellTime Bin time [in µs]
listofG List of correlations to be calculated
accuracy Accuracy of the autocorrelation function, typically 50
split Number of traces to split the data into
E.g. split=10 will divide a 60 second stream in 10 six second
traces and calculate G for each individual trace
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
G Object with all autocorrelations
E.g. G.central contains the array with the central detector
element autocorrelation
========== ===============================================================
"""
if split == 1:
G = FCS2Corr(data, dwellTime, listOfG, accuracy)
else:
G = correlations()
G.dwellTime = dwellTime
N = int(np.size(data, 0))
chunkSize = int(np.floor(N / split))
for j in listOfG:
# --------------------- CALCULATE CORRELATION ---------------------
print('Calculating correlation ' + str(j))
i = 0
for chunk in range(split):
print(' Chunk ' + str(chunk+1) + ' --> ', end = '')
# ------------------ CHUNK ------------------
if data.ndim == 2:
dataSplit = data[i:i+chunkSize, :]
else:
dataSplit = data[i:i+chunkSize]
newList = [j]
Gsplit = FCS2Corr(dataSplit, dwellTime, newList, accuracy)
GsplitList = list(Gsplit.__dict__.keys())
for k in GsplitList:
if k.find('dwellTime') == -1:
setattr(G, k + '_chunk' + str(chunk), getattr(Gsplit, k))
i += chunkSize
# ---------- CALCULATE AVERAGE CORRELATION OF ALL CHUNKS ----------
if j == '2MPD':
avListBase = ['cross12', 'cross21', 'auto1', 'auto2']
for avBase in avListBase:
avList = list(G.__dict__.keys())
avList = [i for i in avList if i.startswith(avBase + '_chunk')]
print('Calculating average correlation ' + avBase)
Gav = sum(getattr(G, i) for i in avList) / len(avList)
setattr(G, avBase + '_average', Gav)
print('Calculating average cross correlation')
G.cross_average = (G.cross12_average + G.cross21_average) / 2
else:
# Get list of "root" names, i.e. without "_chunk"
Gfields = list(G.__dict__.keys())
t = [Gfields[i].split("_chunk")[0] for i in range(len(Gfields))]
t = list(dict.fromkeys(t))
t.remove("dwellTime")
# average over chunks
for field in t:
print('Calculating average correlation ' + str(field))
avList = [i for i in Gfields if i.startswith(field + '_chunk')]
Gav = sum(getattr(G, i) for i in avList) / len(avList)
setattr(G, str(field) + '_average', Gav)
return G
def FCSLoadAndCorrSplit(fname, listOfG=['central', 'sum3', 'sum5', 'chessboard', 'ullr'], accuracy=16, split=10):
"""
Load SPAD-FCS data in chunks (of 10 s) and calculate G and Gav
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
fname File name with the .bin data
listofG List of correlations to be calculated
accuracy Accuracy of the autocorrelation function, typically 16
split Number of seconds of each chunk to split the data into
E.g. split=10 will divide a 60 second stream in 6 ten-second
traces and calculate G for each individual trace
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
G Object with all autocorrelations
E.g. G.central contains the array with the central detector
element autocorrelation
data Last chunk of raw data
========== ===============================================================
"""
info = getFCSinfo(fname[:-4] + "_info.txt")
dwellTime = info.dwellTime
duration = info.duration
N = np.int(np.floor(duration / split)) # number of chunks
G = correlations()
G.dwellTime = dwellTime
chunkSize = int(np.floor(split / dwellTime))
for chunk in range(N):
# --------------------- CALCULATE CORRELATIONS SINGLE CHUNK ---------------------
print("+-----------------------")
print("| Loading chunk " + str(chunk))
print("+-----------------------")
data = file_to_FCScount(fname, np.uint8, chunkSize, chunk*chunkSize)
for j in listOfG:
print(' --> ' + str(j) + ": ", end = '')
# ------------------ CHUNK ------------------
newList = [j]
Gsplit = FCS2Corr(data, 1e6*dwellTime, newList, accuracy)
GsplitList = list(Gsplit.__dict__.keys())
for k in GsplitList:
if k.find('dwellTime') == -1:
setattr(G, k + '_chunk' + str(chunk), getattr(Gsplit, k))
# ---------- CALCULATE AVERAGE CORRELATION OF ALL CHUNKS ----------
print("Calculating average correlations")
# Get list of "root" names, i.e. without "_chunk"
Gfields = list(G.__dict__.keys())
t = [Gfields[i].split("_chunk")[0] for i in range(len(Gfields))]
t = list(dict.fromkeys(t))
t.remove("dwellTime")
# average over chunks
for field in t:
avList = [i for i in Gfields if i.startswith(field + '_chunk')]
# check if all elements have same dimension
Ntau = [len(getattr(G, i)) for i in avList]
avList2 = [avList[i] for i in range(len(avList)) if Ntau[i] == Ntau[0]]
Gav = sum(getattr(G, i) for i in avList2) / len(avList2)
setattr(G, str(field) + '_average', Gav)
# average over same shifts in case of 'crossAll'
if 'crossAll' in listOfG:
print("Calculating spatially averaged correlations.")
spatialCorr = np.zeros([9, 9, len(G.det0x0_average)])
for shifty in np.arange(-4, 5):
for shiftx in np.arange(-4, 5):
avList = SPADshiftvectorCrossCorr([shifty, shiftx])
avList = [s + '_average' for s in avList]
Gav = sum(getattr(G, i) for i in avList) / len(avList)
spatialCorr[shifty+4, shiftx+4, :] = Gav[:,1]
G.spatialCorr = spatialCorr
return G, data
def FCSSpatialCorrAv(G, N=5):
spatialCorr = np.zeros([2*N-1, 2*N-1, len(G.det0x0_average)])
for shifty in np.arange(-(N-1), N):
for shiftx in np.arange(-(N-1), N):
avList = SPADshiftvectorCrossCorr([shifty, shiftx], N)
avList = [s + '_average' for s in avList]
Gav = sum(getattr(G, i) for i in avList) / len(avList)
spatialCorr[shifty+N-1, shiftx+N-1, :] = Gav[:,1]
G.spatialCorr = spatialCorr
return G
def FCSCrossCenterAv(G):
"""
Average pair-correlations between central pixel and other pixels that are
located at the same distance from the center
===========================================================================
Input Meaning
---------- ---------------------------------------------------------------
G Correlations object that (at least) contains all
cross-correlations between central pixel and all other pixels:
G.det12x12_average, G.det12x13_average, etc.
============================================================================
Output Meaning
---------- ---------------------------------------------------------------
G Same object as input but with the additional field
G.crossCenterAv, which contains array of 6 columns, containing
averaged cross-correlations between central pixel and pixels
located at a distance of
| 0 | 1 | sqrt(2) | 2 | sqrt(5) | sqrt(8) |
===========================================================================
"""
tau = G.det12x12_average[:,0]
G.crossCenterAv = np.zeros((len(tau), 6))
# autocorrelation central element
G.crossCenterAv[:,0] = G.det12x12_average[:,1]
# average pair-correlations 4 elements located at distance 1 from center
G.crossCenterAv[:,1] = np.mean(np.transpose(np.array([getattr(G, 'det12x' + str(det) + '_average')[:,1] for det in [7, 11, 13, 17]])), 1)
# average pair-correlations 4 elements located at distance sqrt(2) from center
G.crossCenterAv[:,2] = np.mean(np.transpose(np.array([getattr(G, 'det12x' + str(det) + '_average')[:,1] for det in [6, 8, 16, 18]])), 1)
# average pair-correlation 4 elements located at distance 2 from center
G.crossCenterAv[:,3] = np.mean(np.transpose(np.array([getattr(G, 'det12x' + str(det) + '_average')[:,1] for det in [2, 10, 14, 22]])), 1)
# average pair-correlation 8 elements located at distance sqrt(5) from center
G.crossCenterAv[:,4] = np.mean(np.transpose(np.array([getattr(G, 'det12x' + str(det) + '_average')[:,1] for det in [1, 3, 5, 9, 15, 19, 21, 23]])), 1)
# average pair-correlation 4 elements located at distance sqrt(8) from center
G.crossCenterAv[:,5] = np.mean(np.transpose(np.array([getattr(G, 'det12x' + str(det) + '_average')[:,1] for det in [0, 4, 20, 24]])), 1)
return G
def FCSBinToCSVAll(folderName=[], Glist=['central', 'sum3', 'sum5', 'chessboard', 'ullr'], split=10):
# PARSE INPUT
if folderName == []:
folderName = getcwd()
folderName = folderName.replace("\\", "/")
folderName = Path(folderName)
# CHECK BIN FILES
allFiles = listFiles(folderName, 'bin')
# GO THROUGH EACH FILE
for file in allFiles:
fileName = ntpath.basename(file)
print("File found: " + fileName)
[G, data] = FCSLoadAndCorrSplit(file, Glist, 50, split)
corr2csv(G, file[0:-4], [0, 0], 0)
def plotFCScorrelations(G, plotList='all', limits=[0, -1], vector=[], pColors='auto', yscale='lin'):
"""
Plot all correlation curves
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
G Object with all autocorrelations
Possible attributes:
det*,
central, sum3, sum5, allbuthot, chessboard, ullr, av
autoSpatial,
det12x*
dwellTime (is not plotted)
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
figure
========== ===============================================================
"""
spatialCorrList = ['autoSpatial']
start = limits[0]
stop = limits[1]
# plotList contains all attributes of G that have to be plotted
if plotList == 'all':
plotList = list(G.__dict__.keys())
# remove dwellTime from plotList
if 'dwellTime' in plotList:
plotList.remove('dwellTime')
if 'av' in plotList:
# remove all single detector element correlations
plotListRemove = fnmatch.filter(plotList, 'det?')
for elem in plotListRemove:
plotList.remove(elem)
plotListRemove = fnmatch.filter(plotList, 'det??')
for elem in plotListRemove:
plotList.remove(elem)
if np.size(fnmatch.filter(plotList, 'det12x??')) > 10:
# replace all individual cross-correlations by single crossCenter element
plotListRemove = fnmatch.filter(plotList, 'det12x?')
for elem in plotListRemove:
plotList.remove(elem)
plotListRemove = fnmatch.filter(plotList, 'det12x??')
for elem in plotListRemove:
plotList.remove(elem)
plotList.append('crossCenter')
if fnmatch.filter(plotList, '*_'):
plotListStart = plotList[0]
# plot chunks of data and average
plotList = list(G.__dict__.keys())
plotList.remove('dwellTime')
plotList = [i for i in plotList if i.startswith(plotListStart)]
# -------------------- Check for temporal correlations --------------------
plotTempCorr = False
for i in range(np.size(plotList)):
if plotList[i] not in spatialCorrList:
plotTempCorr = True
break
if plotTempCorr:
leg = [] # figure legend
h = plt.figure()
maxy = 0
miny = 0
minx = 25e-9
maxx = 10
pColIndex = 0
for i in plotList:
#if i not in list(G.__dict__.keys()):
# break
if i in ["central", "central_average", "sum3", "sum3_average", "sum5", "sum5_average", "allbuthot", "allbuthot_average", "chessboard", "chessboard_average", "chess3", "chess3_average", "ullr", "ullr_average", "av", "cross12_average", "cross21_average", "cross_average", "auto1_average", "auto2_average"]:
# plot autocorrelation
Gtemp = getattr(G, i)
plt.plot(Gtemp[start:stop, 0], Gtemp[start:stop, 1], color=plotColors(i), linewidth=1.3)
maxy = np.max([maxy, np.max(Gtemp[start+1:stop, 1])])
miny = np.min([miny, np.min(Gtemp[start+1:stop, 1])])
minx = Gtemp[start, 0]
maxx = Gtemp[stop, 0]
leg.append(i)
elif i == 'crossCenter':
for j in range(25):
Gsingle = getattr(G, 'det12x' + str(j))
# plotColor = colorFromMap(distance2detElements(12, j), 0, np.sqrt(8))
plt.plot(Gsingle[start:stop, 0], Gsingle[start:stop, 1], color=plotColors(i))
maxy = np.max([maxy, np.max(Gsingle[start+1:stop, 1])])
miny = np.min([miny, np.min(Gtemp[start+1:stop, 1])])
leg.append(i + str(j))
elif i == 'crossCenterAv':
tau = G.det12x12_average[:,0]
for j in range(6):
plt.plot(tau[start:stop], G.crossCenterAv[start:stop, j], color=plotColors(j))
miny = np.min(G.crossCenterAv[start+10:stop,:])
maxy = np.max(G.crossCenterAv[start+1:stop,:])
leg = ['$\Delta r = 0$', '$\Delta r = 1$', '$\Delta r = \sqrt{2}$', '$\Delta r = 2$', '$\Delta r = \sqrt{5}$', '$\Delta r = 2\sqrt{2}$']
elif i != 'autoSpatial' and i != 'stics' and i != 'crossAll' and i != 'crossVector':
# plot autocorr single detector element
if pColors == 'auto':
plt.plot(getattr(G, i)[start:stop, 0], getattr(G, i)[start:stop, 1])
else:
plt.plot(getattr(G, i)[start:stop, 0], getattr(G, i)[start:stop, 1], color=plotColors(pColors[pColIndex]))
pColIndex += 1
maxy = np.max([maxy, np.max(getattr(G, i)[start+1:stop, 1])])
miny = np.min([miny, np.min(getattr(G, i)[start+1:stop, 1])])
minx = getattr(G, i)[start, 0]
maxx = getattr(G, i)[stop, 0]
if '_average' in i:
iLeg = i[0:-8]
else:
iLeg = i
leg.append(iLeg)
# figure lay-out
plt.xscale('log')
plt.xlabel('Temporal shift [s]')
plt.ylabel('G')
if yscale == 'log':
plt.yscale('log')
else:
plt.yscale('linear')
axes = plt.gca()
axes.set_xlim([minx, maxx])
axes.set_ylim([miny, maxy])
if np.size(leg) > 0 and np.size(leg) < 10 and 'crossCenter' not in plotList:
axes.legend(leg)
plt.rcParams.update({'font.size': 15})
plt.tight_layout()
if 'crossCenter' in plotList:
plotCrossCenterScheme()
# -------------------- Check for spatial correlations --------------------
if 'autoSpatial' in plotList:
Gtemp = G.autoSpatial
Gmax = np.max(Gtemp)
xmax = (np.size(Gtemp, 0)) / 2
extent = [-xmax, xmax, -xmax, xmax]
for j in range(np.size(Gtemp, 2)):
h = plt.figure()
plt.imshow(Gtemp[:, :, j], extent=extent, vmin=0, vmax=Gmax)
plt.title('delta_t = ' + str(G.dwellTime * j) + ' µs')
if 'crossAll' in plotList:
Gtemp = G.spatialCorr
tau = G.det0x0_average[:,0]
for vector in [[4, 4], [3, 4], [3, 3], [2, 4], [2, 3], [2, 2]]:
plt.plot(tau, Gtemp[vector[0], vector[1], :])
plt.legend(['[0, 0]', '[1, 0]', '[1, 1]', '[0, 2]', '[2, 1]', '[2, 2]'])
plt.xscale('log')
plt.xlabel('Temporal shift [s]')
plt.ylabel('G')
axes.set_ylim([miny, np.max(Gtemp[:,:,2:])])
# Gtemp = G.spatialCorr
# Gmax = np.sort(Gtemp.flatten())[-2] # second highest number
# extent = [-4, 5, -4, 5]
# for j in range(np.size(Gtemp, 2)):
# h = plt.figure()
# plt.imshow(Gtemp[:, :, j], extent=extent, vmin=0)
# plt.title('delta_t = ' + str(G.dwellTime * j) + ' µs')
if 'crossVector' in plotList:
Gtemp = G.spatialCorr
tau = G.det0x0_chunk0[:,0]
for i in range(len(vector)):
vectorI = vector[i]
plt.plot(tau, Gtemp[4+vectorI[0], 4+vectorI[1], :], label='[' + str(vectorI[0]) + ', ' + str(vectorI[1]) + ']')
plt.xscale('log')
plt.xlabel('Temporal shift [s]')
plt.ylabel('G')
plt.legend()
axes.set_ylim([miny, np.max(Gtemp[:,:,2:])])
if 'stics' in plotList:
Gtemp = getattr(G, 'det12x12')
Gplot = np.zeros([9, 9])
N = 10
indArray = np.concatenate(([0], np.round(np.logspace(0, np.log10(len(Gtemp) - 1), N)).astype('int')))
for i in range(N):
# go through all lag times
ind = np.round(indArray[i])
print(ind)
for yshift in np.arange(-4, 5):
for xshift in np.arange(-4, 5):
# go through each shift vector
detDiff = -yshift * 5 - xshift
Gv = 0
nG = 0
for det1 in range(25):
[y, x] = coord(det1)
if x-xshift < 0 or x-xshift>4 or y-yshift < 0 or y-yshift > 4:
# don't do anything
pass
else:
det2 = det1 + detDiff
print('det1 = ' + str(det1) + ' and det2 = ' + str(det2))
if det2 >= 0 and det2 <= 24:
Gv += getattr(G, 'det' + str(det1) + 'x' + str(det2))[int(ind), 1]
nG += 1
Gplot[yshift+4, xshift+4] = Gv / nG
if i == 0:
plotMax = np.max(Gplot)
xmax = 9 / 2
extent = [-xmax, xmax, -xmax, xmax]
h = plt.figure()
plt.imshow(Gplot, extent=extent, vmin=0, vmax=plotMax)
plt.title('delta_t = ' + str(int(G.dwellTime * ind)) + ' µs')
return h
def plotGsurf(G):
N = np.size(G, 0)
N = (N - 1) / 2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-N, N + 1)
X, Y = np.meshgrid(x, y)
ax.plot_surface(X, Y, G)
return fig
def plotCrossCenterScheme():
detEl = range(25)
distances = np.zeros(25)
for i in detEl:
distances[i] = distance2detElements(12, i)
distances = np.resize(distances, (5, 5))
plt.figure()
plt.imshow(distances, 'viridis')
plt.title('Color scheme cross-correlations')
| [
"matplotlib.pyplot.ylabel",
"listFiles.listFiles",
"getFCSinfo.getFCSinfo",
"distance2detElements.SPADcoordFromDetNumb",
"distance2detElements.distance2detElements",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.mean",
"extractSpadData.extractSpadData",
"pathlib.Path",
"plotColors.plotColor... | [((15038, 15074), 'getFCSinfo.getFCSinfo', 'getFCSinfo', (["(fname[:-4] + '_info.txt')"], {}), "(fname[:-4] + '_info.txt')\n", (15048, 15074), False, 'from getFCSinfo import getFCSinfo\n'), ((17562, 17584), 'numpy.arange', 'np.arange', (['(-(N - 1))', 'N'], {}), '(-(N - 1), N)\n', (17571, 17584), True, 'import numpy as np\n'), ((20618, 20634), 'pathlib.Path', 'Path', (['folderName'], {}), '(folderName)\n', (20622, 20634), False, 'from pathlib import Path\n'), ((20677, 20705), 'listFiles.listFiles', 'listFiles', (['folderName', '"""bin"""'], {}), "(folderName, 'bin')\n", (20686, 20705), False, 'from listFiles import listFiles\n'), ((22993, 23023), 'fnmatch.filter', 'fnmatch.filter', (['plotList', '"""*_"""'], {}), "(plotList, '*_')\n", (23007, 23023), False, 'import fnmatch\n'), ((30395, 30408), 'numpy.size', 'np.size', (['G', '(0)'], {}), '(G, 0)\n', (30402, 30408), True, 'import numpy as np\n'), ((30439, 30451), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (30449, 30451), True, 'import matplotlib.pyplot as plt\n'), ((30511, 30531), 'numpy.arange', 'np.arange', (['(-N)', '(N + 1)'], {}), '(-N, N + 1)\n', (30520, 30531), True, 'import numpy as np\n'), ((30543, 30560), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (30554, 30560), True, 'import numpy as np\n'), ((30674, 30686), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (30682, 30686), True, 'import numpy as np\n'), ((30774, 30802), 'numpy.resize', 'np.resize', (['distances', '(5, 5)'], {}), '(distances, (5, 5))\n', (30783, 30802), True, 'import numpy as np\n'), ((30807, 30819), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (30817, 30819), True, 'import matplotlib.pyplot as plt\n'), ((30824, 30856), 'matplotlib.pyplot.imshow', 'plt.imshow', (['distances', '"""viridis"""'], {}), "(distances, 'viridis')\n", (30834, 30856), True, 'import matplotlib.pyplot as plt\n'), ((30861, 30905), 'matplotlib.pyplot.title', 'plt.title', (['"""Color scheme cross-correlations"""'], {}), "('Color scheme cross-correlations')\n", (30870, 30905), True, 'import matplotlib.pyplot as plt\n'), ((15155, 15181), 'numpy.floor', 'np.floor', (['(duration / split)'], {}), '(duration / split)\n', (15163, 15181), True, 'import numpy as np\n'), ((15274, 15301), 'numpy.floor', 'np.floor', (['(split / dwellTime)'], {}), '(split / dwellTime)\n', (15282, 15301), True, 'import numpy as np\n'), ((15566, 15629), 'meas_to_count.file_to_FCScount', 'file_to_FCScount', (['fname', 'np.uint8', 'chunkSize', '(chunk * chunkSize)'], {}), '(fname, np.uint8, chunkSize, chunk * chunkSize)\n', (15582, 15629), False, 'from meas_to_count import file_to_FCScount\n'), ((17069, 17085), 'numpy.arange', 'np.arange', (['(-4)', '(5)'], {}), '(-4, 5)\n', (17078, 17085), True, 'import numpy as np\n'), ((17606, 17628), 'numpy.arange', 'np.arange', (['(-(N - 1))', 'N'], {}), '(-(N - 1), N)\n', (17615, 17628), True, 'import numpy as np\n'), ((20545, 20553), 'os.getcwd', 'getcwd', ([], {}), '()\n', (20551, 20553), False, 'from os import getcwd\n'), ((20783, 20804), 'ntpath.basename', 'ntpath.basename', (['file'], {}), '(file)\n', (20798, 20804), False, 'import ntpath\n'), ((20918, 20952), 'corr2csv.corr2csv', 'corr2csv', (['G', 'file[0:-4]', '[0, 0]', '(0)'], {}), '(G, file[0:-4], [0, 0], 0)\n', (20926, 20952), False, 'from corr2csv import corr2csv\n'), ((22301, 22333), 'fnmatch.filter', 'fnmatch.filter', (['plotList', '"""det?"""'], {}), "(plotList, 'det?')\n", (22315, 22333), False, 'import fnmatch\n'), ((22429, 22462), 'fnmatch.filter', 'fnmatch.filter', (['plotList', '"""det??"""'], {}), "(plotList, 'det??')\n", (22443, 22462), False, 'import fnmatch\n'), ((22704, 22739), 'fnmatch.filter', 'fnmatch.filter', (['plotList', '"""det12x?"""'], {}), "(plotList, 'det12x?')\n", (22718, 22739), False, 'import fnmatch\n'), ((22835, 22871), 'fnmatch.filter', 'fnmatch.filter', (['plotList', '"""det12x??"""'], {}), "(plotList, 'det12x??')\n", (22849, 22871), False, 'import fnmatch\n'), ((23389, 23406), 'numpy.size', 'np.size', (['plotList'], {}), '(plotList)\n', (23396, 23406), True, 'import numpy as np\n'), ((23574, 23586), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23584, 23586), True, 'import matplotlib.pyplot as plt\n'), ((26461, 26478), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (26471, 26478), True, 'import matplotlib.pyplot as plt\n'), ((26487, 26519), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temporal shift [s]"""'], {}), "('Temporal shift [s]')\n", (26497, 26519), True, 'import matplotlib.pyplot as plt\n'), ((26528, 26543), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""G"""'], {}), "('G')\n", (26538, 26543), True, 'import matplotlib.pyplot as plt\n'), ((26664, 26673), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (26671, 26673), True, 'import matplotlib.pyplot as plt\n'), ((26868, 26906), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 15}"], {}), "({'font.size': 15})\n", (26887, 26906), True, 'import matplotlib.pyplot as plt\n'), ((26915, 26933), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26931, 26933), True, 'import matplotlib.pyplot as plt\n'), ((27188, 27201), 'numpy.max', 'np.max', (['Gtemp'], {}), '(Gtemp)\n', (27194, 27201), True, 'import numpy as np\n'), ((27737, 27809), 'matplotlib.pyplot.legend', 'plt.legend', (["['[0, 0]', '[1, 0]', '[1, 1]', '[0, 2]', '[2, 1]', '[2, 2]']"], {}), "(['[0, 0]', '[1, 0]', '[1, 1]', '[0, 2]', '[2, 1]', '[2, 2]'])\n", (27747, 27809), True, 'import matplotlib.pyplot as plt\n'), ((27818, 27835), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (27828, 27835), True, 'import matplotlib.pyplot as plt\n'), ((27844, 27876), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temporal shift [s]"""'], {}), "('Temporal shift [s]')\n", (27854, 27876), True, 'import matplotlib.pyplot as plt\n'), ((27885, 27900), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""G"""'], {}), "('G')\n", (27895, 27900), True, 'import matplotlib.pyplot as plt\n'), ((28597, 28614), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (28607, 28614), True, 'import matplotlib.pyplot as plt\n'), ((28623, 28655), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temporal shift [s]"""'], {}), "('Temporal shift [s]')\n", (28633, 28655), True, 'import matplotlib.pyplot as plt\n'), ((28664, 28679), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""G"""'], {}), "('G')\n", (28674, 28679), True, 'import matplotlib.pyplot as plt\n'), ((28688, 28700), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (28698, 28700), True, 'import matplotlib.pyplot as plt\n'), ((28842, 28858), 'numpy.zeros', 'np.zeros', (['[9, 9]'], {}), '([9, 9])\n', (28850, 28858), True, 'import numpy as np\n'), ((30730, 30757), 'distance2detElements.distance2detElements', 'distance2detElements', (['(12)', 'i'], {}), '(12, i)\n', (30750, 30757), False, 'from distance2detElements import distance2detElements\n'), ((1772, 1786), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (1780, 1786), True, 'import numpy as np\n'), ((1933, 2024), 'multipletau.correlate', 'multipletau.correlate', (['data', 'data'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(data, data, m=accuracy, deltat=dwellTime * 1e-06,\n normalize=True)\n', (1954, 2024), False, 'import multipletau\n'), ((2231, 2255), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', 'i'], {}), '(data, i)\n', (2246, 2255), False, 'from extractSpadData import extractSpadData\n'), ((11471, 11487), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (11478, 11487), True, 'import numpy as np\n'), ((11513, 11532), 'numpy.floor', 'np.floor', (['(N / split)'], {}), '(N / split)\n', (11521, 11532), True, 'import numpy as np\n'), ((17113, 17129), 'numpy.arange', 'np.arange', (['(-4)', '(5)'], {}), '(-4, 5)\n', (17122, 17129), True, 'import numpy as np\n'), ((17649, 17694), 'distance2detElements.SPADshiftvectorCrossCorr', 'SPADshiftvectorCrossCorr', (['[shifty, shiftx]', 'N'], {}), '([shifty, shiftx], N)\n', (17673, 17694), False, 'from distance2detElements import SPADshiftvectorCrossCorr\n'), ((22553, 22589), 'fnmatch.filter', 'fnmatch.filter', (['plotList', '"""det12x??"""'], {}), "(plotList, 'det12x??')\n", (22567, 22589), False, 'import fnmatch\n'), ((26584, 26601), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (26594, 26601), True, 'import matplotlib.pyplot as plt\n'), ((26628, 26648), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""linear"""'], {}), "('linear')\n", (26638, 26648), True, 'import matplotlib.pyplot as plt\n'), ((27218, 27235), 'numpy.size', 'np.size', (['Gtemp', '(0)'], {}), '(Gtemp, 0)\n', (27225, 27235), True, 'import numpy as np\n'), ((27308, 27325), 'numpy.size', 'np.size', (['Gtemp', '(2)'], {}), '(Gtemp, 2)\n', (27315, 27325), True, 'import numpy as np\n'), ((27344, 27356), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (27354, 27356), True, 'import matplotlib.pyplot as plt\n'), ((27369, 27429), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Gtemp[:, :, j]'], {'extent': 'extent', 'vmin': '(0)', 'vmax': 'Gmax'}), '(Gtemp[:, :, j], extent=extent, vmin=0, vmax=Gmax)\n', (27379, 27429), True, 'import matplotlib.pyplot as plt\n'), ((27683, 27728), 'matplotlib.pyplot.plot', 'plt.plot', (['tau', 'Gtemp[vector[0], vector[1], :]'], {}), '(tau, Gtemp[vector[0], vector[1], :])\n', (27691, 27728), True, 'import matplotlib.pyplot as plt\n'), ((29068, 29089), 'numpy.round', 'np.round', (['indArray[i]'], {}), '(indArray[i])\n', (29076, 29089), True, 'import numpy as np\n'), ((29139, 29155), 'numpy.arange', 'np.arange', (['(-4)', '(5)'], {}), '(-4, 5)\n', (29148, 29155), True, 'import numpy as np\n'), ((30187, 30199), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (30197, 30199), True, 'import matplotlib.pyplot as plt\n'), ((30212, 30266), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Gplot'], {'extent': 'extent', 'vmin': '(0)', 'vmax': 'plotMax'}), '(Gplot, extent=extent, vmin=0, vmax=plotMax)\n', (30222, 30266), True, 'import matplotlib.pyplot as plt\n'), ((2295, 2398), 'multipletau.correlate', 'multipletau.correlate', (['dataSingle', 'dataSingle'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(dataSingle, dataSingle, m=accuracy, deltat=dwellTime *\n 1e-06, normalize=True)\n', (2316, 2398), False, 'import multipletau\n'), ((2578, 2610), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '"""central"""'], {}), "(data, 'central')\n", (2593, 2610), False, 'from extractSpadData import extractSpadData\n'), ((2635, 2741), 'multipletau.correlate', 'multipletau.correlate', (['dataCentral', 'dataCentral'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(dataCentral, dataCentral, m=accuracy, deltat=\n dwellTime * 1e-06, normalize=True)\n', (2656, 2741), False, 'import multipletau\n'), ((17156, 17198), 'distance2detElements.SPADshiftvectorCrossCorr', 'SPADshiftvectorCrossCorr', (['[shifty, shiftx]'], {}), '([shifty, shiftx])\n', (17180, 17198), False, 'from distance2detElements import SPADshiftvectorCrossCorr\n'), ((26757, 26769), 'numpy.size', 'np.size', (['leg'], {}), '(leg)\n', (26764, 26769), True, 'import numpy as np\n'), ((26778, 26790), 'numpy.size', 'np.size', (['leg'], {}), '(leg)\n', (26785, 26790), True, 'import numpy as np\n'), ((27930, 27953), 'numpy.max', 'np.max', (['Gtemp[:, :, 2:]'], {}), '(Gtemp[:, :, 2:])\n', (27936, 27953), True, 'import numpy as np\n'), ((28730, 28753), 'numpy.max', 'np.max', (['Gtemp[:, :, 2:]'], {}), '(Gtemp[:, :, 2:])\n', (28736, 28753), True, 'import numpy as np\n'), ((29187, 29203), 'numpy.arange', 'np.arange', (['(-4)', '(5)'], {}), '(-4, 5)\n', (29196, 29203), True, 'import numpy as np\n'), ((30084, 30097), 'numpy.max', 'np.max', (['Gplot'], {}), '(Gplot)\n', (30090, 30097), True, 'import numpy as np\n'), ((2877, 2906), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '"""sum3"""'], {}), "(data, 'sum3')\n", (2892, 2906), False, 'from extractSpadData import extractSpadData\n'), ((2928, 3028), 'multipletau.correlate', 'multipletau.correlate', (['dataSum3', 'dataSum3'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(dataSum3, dataSum3, m=accuracy, deltat=dwellTime * \n 1e-06, normalize=True)\n', (2949, 3028), False, 'import multipletau\n'), ((3164, 3193), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '"""sum5"""'], {}), "(data, 'sum5')\n", (3179, 3193), False, 'from extractSpadData import extractSpadData\n'), ((3215, 3315), 'multipletau.correlate', 'multipletau.correlate', (['dataSum5', 'dataSum5'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(dataSum5, dataSum5, m=accuracy, deltat=dwellTime * \n 1e-06, normalize=True)\n', (3236, 3315), False, 'import multipletau\n'), ((24253, 24266), 'plotColors.plotColors', 'plotColors', (['i'], {}), '(i)\n', (24263, 24266), False, 'from plotColors import plotColors\n'), ((24320, 24352), 'numpy.max', 'np.max', (['Gtemp[start + 1:stop, 1]'], {}), '(Gtemp[start + 1:stop, 1])\n', (24326, 24352), True, 'import numpy as np\n'), ((24390, 24422), 'numpy.min', 'np.min', (['Gtemp[start + 1:stop, 1]'], {}), '(Gtemp[start + 1:stop, 1])\n', (24396, 24422), True, 'import numpy as np\n'), ((25301, 25344), 'numpy.min', 'np.min', (['G.crossCenterAv[start + 10:stop, :]'], {}), '(G.crossCenterAv[start + 10:stop, :])\n', (25307, 25344), True, 'import numpy as np\n'), ((25365, 25407), 'numpy.max', 'np.max', (['G.crossCenterAv[start + 1:stop, :]'], {}), '(G.crossCenterAv[start + 1:stop, :])\n', (25371, 25407), True, 'import numpy as np\n'), ((29437, 29448), 'distance2detElements.SPADcoordFromDetNumb', 'coord', (['det1'], {}), '(det1)\n', (29442, 29448), True, 'from distance2detElements import SPADcoordFromDetNumb as coord\n'), ((3502, 3536), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '"""allbuthot"""'], {}), "(data, 'allbuthot')\n", (3517, 3536), False, 'from extractSpadData import extractSpadData\n'), ((3563, 3673), 'multipletau.correlate', 'multipletau.correlate', (['dataAllbuthot', 'dataAllbuthot'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(dataAllbuthot, dataAllbuthot, m=accuracy, deltat=\n dwellTime * 1e-06, normalize=True)\n', (3584, 3673), False, 'import multipletau\n'), ((3827, 3858), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '"""chess0"""'], {}), "(data, 'chess0')\n", (3842, 3858), False, 'from extractSpadData import extractSpadData\n'), ((3884, 3915), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '"""chess1"""'], {}), "(data, 'chess1')\n", (3899, 3915), False, 'from extractSpadData import extractSpadData\n'), ((3943, 4046), 'multipletau.correlate', 'multipletau.correlate', (['dataChess0', 'dataChess1'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(dataChess0, dataChess1, m=accuracy, deltat=dwellTime *\n 1e-06, normalize=True)\n', (3964, 4046), False, 'import multipletau\n'), ((24838, 24851), 'plotColors.plotColors', 'plotColors', (['i'], {}), '(i)\n', (24848, 24851), False, 'from plotColors import plotColors\n'), ((24894, 24928), 'numpy.max', 'np.max', (['Gsingle[start + 1:stop, 1]'], {}), '(Gsingle[start + 1:stop, 1])\n', (24900, 24928), True, 'import numpy as np\n'), ((24970, 25002), 'numpy.min', 'np.min', (['Gtemp[start + 1:stop, 1]'], {}), '(Gtemp[start + 1:stop, 1])\n', (24976, 25002), True, 'import numpy as np\n'), ((4225, 4257), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '"""chess3a"""'], {}), "(data, 'chess3a')\n", (4240, 4257), False, 'from extractSpadData import extractSpadData\n'), ((4283, 4315), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '"""chess3b"""'], {}), "(data, 'chess3b')\n", (4298, 4315), False, 'from extractSpadData import extractSpadData\n'), ((4339, 4442), 'multipletau.correlate', 'multipletau.correlate', (['dataChess0', 'dataChess1'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(dataChess0, dataChess1, m=accuracy, deltat=dwellTime *\n 1e-06, normalize=True)\n', (4360, 4442), False, 'import multipletau\n'), ((25263, 25276), 'plotColors.plotColors', 'plotColors', (['j'], {}), '(j)\n', (25273, 25276), False, 'from plotColors import plotColors\n'), ((4619, 4653), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '"""upperleft"""'], {}), "(data, 'upperleft')\n", (4634, 4653), False, 'from extractSpadData import extractSpadData\n'), ((4675, 4710), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '"""lowerright"""'], {}), "(data, 'lowerright')\n", (4690, 4710), False, 'from extractSpadData import extractSpadData\n'), ((4732, 4827), 'multipletau.correlate', 'multipletau.correlate', (['dataUL', 'dataLR'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(dataUL, dataLR, m=accuracy, deltat=dwellTime * 1e-06,\n normalize=True)\n', (4753, 4827), False, 'import multipletau\n'), ((25956, 25986), 'plotColors.plotColors', 'plotColors', (['pColors[pColIndex]'], {}), '(pColors[pColIndex])\n', (25966, 25986), False, 'from plotColors import plotColors\n'), ((4954, 4979), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '(12)'], {}), '(data, 12)\n', (4969, 4979), False, 'from extractSpadData import extractSpadData\n'), ((5121, 5145), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', 'j'], {}), '(data, j)\n', (5136, 5145), False, 'from extractSpadData import extractSpadData\n'), ((5170, 5269), 'multipletau.correlate', 'multipletau.correlate', (['dataCenter', 'data2'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(dataCenter, data2, m=accuracy, deltat=dwellTime * \n 1e-06, normalize=True)\n', (5191, 5269), False, 'import multipletau\n'), ((5419, 5444), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '(12)'], {}), '(data, 12)\n', (5434, 5444), False, 'from extractSpadData import extractSpadData\n'), ((5465, 5490), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', '(13)'], {}), '(data, 13)\n', (5480, 5490), False, 'from extractSpadData import extractSpadData\n'), ((5569, 5662), 'multipletau.correlate', 'multipletau.correlate', (['data1', 'data2'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(data1, data2, m=accuracy, deltat=dwellTime * 1e-06,\n normalize=True)\n', (5590, 5662), False, 'import multipletau\n'), ((5764, 5857), 'multipletau.correlate', 'multipletau.correlate', (['data2', 'data1'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(data2, data1, m=accuracy, deltat=dwellTime * 1e-06,\n normalize=True)\n', (5785, 5857), False, 'import multipletau\n'), ((5949, 6042), 'multipletau.correlate', 'multipletau.correlate', (['data1', 'data1'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(data1, data1, m=accuracy, deltat=dwellTime * 1e-06,\n normalize=True)\n', (5970, 6042), False, 'import multipletau\n'), ((6132, 6225), 'multipletau.correlate', 'multipletau.correlate', (['data2', 'data2'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(data2, data2, m=accuracy, deltat=dwellTime * 1e-06,\n normalize=True)\n', (6153, 6225), False, 'import multipletau\n'), ((6420, 6444), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', 'j'], {}), '(data, j)\n', (6435, 6444), False, 'from extractSpadData import extractSpadData\n'), ((6906, 6922), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (6913, 6922), True, 'import numpy as np\n'), ((7286, 7302), 'numpy.mean', 'np.mean', (['data', '(0)'], {}), '(data, 0)\n', (7293, 7302), True, 'import numpy as np\n'), ((7542, 7574), 'numpy.resize', 'np.resize', (['avIm[0:N * N]', '(N, N)'], {}), '(avIm[0:N * N], (N, N))\n', (7551, 7574), True, 'import numpy as np\n'), ((6509, 6533), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', 'k'], {}), '(data, k)\n', (6524, 6533), False, 'from extractSpadData import extractSpadData\n'), ((6655, 6748), 'multipletau.correlate', 'multipletau.correlate', (['data1', 'data2'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(data1, data2, m=accuracy, deltat=dwellTime * 1e-06,\n normalize=True)\n', (6676, 6748), False, 'import multipletau\n'), ((7834, 7869), 'numpy.resize', 'np.resize', (['data[j, 0:N * N]', '(N, N)'], {}), '(data[j, 0:N * N], (N, N))\n', (7843, 7869), True, 'import numpy as np\n'), ((7894, 7925), 'numpy.ndarray.astype', 'np.ndarray.astype', (['im1', '"""int64"""'], {}), "(im1, 'int64')\n", (7911, 7925), True, 'import numpy as np\n'), ((7952, 7996), 'numpy.resize', 'np.resize', (['data[j + deltat, 0:N * N]', '(N, N)'], {}), '(data[j + deltat, 0:N * N], (N, N))\n', (7961, 7996), True, 'import numpy as np\n'), ((8021, 8052), 'numpy.ndarray.astype', 'np.ndarray.astype', (['im2', '"""int64"""'], {}), "(im2, 'int64')\n", (8038, 8052), True, 'import numpy as np\n'), ((8246, 8262), 'numpy.arange', 'np.arange', (['(-4)', '(5)'], {}), '(-4, 5)\n', (8255, 8262), True, 'import numpy as np\n'), ((9581, 9605), 'extractSpadData.extractSpadData', 'extractSpadData', (['data', 'j'], {}), '(data, j)\n', (9596, 9605), False, 'from extractSpadData import extractSpadData\n'), ((9630, 9733), 'multipletau.correlate', 'multipletau.correlate', (['dataSingle', 'dataSingle'], {'m': 'accuracy', 'deltat': '(dwellTime * 1e-06)', 'normalize': '(True)'}), '(dataSingle, dataSingle, m=accuracy, deltat=dwellTime *\n 1e-06, normalize=True)\n', (9651, 9733), False, 'import multipletau\n'), ((8302, 8318), 'numpy.arange', 'np.arange', (['(-4)', '(5)'], {}), '(-4, 5)\n', (8311, 8318), True, 'import numpy as np\n'), ((9963, 9978), 'numpy.size', 'np.size', (['Gav', '(0)'], {}), '(Gav, 0)\n', (9970, 9978), True, 'import numpy as np\n'), ((7001, 7017), 'numpy.size', 'np.size', (['data', '(1)'], {}), '(data, 1)\n', (7008, 7017), True, 'import numpy as np\n'), ((8548, 8567), 'numpy.max', 'np.max', (['(0, shiftx)'], {}), '((0, shiftx))\n', (8554, 8567), True, 'import numpy as np\n'), ((8569, 8592), 'numpy.min', 'np.min', (['(5, 5 + shiftx)'], {}), '((5, 5 + shiftx))\n', (8575, 8592), True, 'import numpy as np\n'), ((8647, 8666), 'numpy.max', 'np.max', (['(0, shifty)'], {}), '((0, shifty))\n', (8653, 8666), True, 'import numpy as np\n'), ((8668, 8691), 'numpy.min', 'np.min', (['(5, 5 + shifty)'], {}), '((5, 5 + shifty))\n', (8674, 8691), True, 'import numpy as np\n')] |
import numpy as np
import scipy.optimize as optimization
import matplotlib.pyplot as plt
try:
from submm_python_routines.KIDs import calibrate
except:
from KIDs import calibrate
from numba import jit # to get working on python 2 I had to downgrade llvmlite pip install llvmlite==0.31.0
# module for fitting resonances curves for kinetic inductance detectors.
# written by <NAME> 12/21/16
# for example see test_fit.py in this directory
# To Do
# I think the error analysis on the fit_nonlinear_iq_with_err probably needs some work
# add in step by step fitting i.e. first amplitude normalizaiton, then cabel delay, then i0,q0 subtraction, then phase rotation, then the rest of the fit.
# need to have fit option that just specifies tau becuase that never really changes for your cryostat
#Change log
#JDW 2017-08-17 added in a keyword/function to allow for gain varation "amp_var" to be taken out before fitting
#JDW 2017-08-30 added in fitting for magnitude fitting of resonators i.e. not in iq space
#JDW 2018-03-05 added more clever function for guessing x0 for fits
#JDW 2018-08-23 added more clever guessing for resonators with large phi into guess seperate functions
J=np.exp(2j*np.pi/3)
Jc=1/J
@jit(nopython=True)
def cardan(a,b,c,d):
'''
analytical root finding fast: using numba looks like x10 speed up
returns only the largest real root
'''
u=np.empty(2,np.complex128)
z0=b/3/a
a2,b2 = a*a,b*b
p=-b2/3/a2 +c/a
q=(b/27*(2*b2/a2-9*c/a)+d)/a
D=-4*p*p*p-27*q*q
r=np.sqrt(-D/27+0j)
u=((-q-r)/2)**(1/3.)#0.33333333333333333333333
v=((-q+r)/2)**(1/3.)#0.33333333333333333333333
w=u*v
w0=np.abs(w+p/3)
w1=np.abs(w*J+p/3)
w2=np.abs(w*Jc+p/3)
if w0<w1:
if w2<w0 : v*=Jc
elif w2<w1 : v*=Jc
else: v*=J
roots = np.asarray((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
#print(roots)
where_real = np.where(np.abs(np.imag(roots)) < 1e-15)
#if len(where_real)>1: print(len(where_real))
#print(D)
if D>0: return np.max(np.real(roots)) # three real roots
else: return np.real(roots[np.argsort(np.abs(np.imag(roots)))][0]) #one real root get the value that has smallest imaginary component
#return np.max(np.real(roots[where_real]))
#return np.asarray((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
# function to descript the magnitude S21 of a non linear resonator
@jit(nopython=True)
def nonlinear_mag(x,fr,Qr,amp,phi,a,b0,b1,flin):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# b0 DC level of s21 away from resonator
# b1 Frequency dependant gain varation
# flin is probably the frequency of the resonator when a = 0
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0+b1 x_lin)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# where the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) where yg = Qr*xg and xg = (f-fr)/fr
#
'''
xlin = (x - flin)/flin
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
#print(roots)
#roots = np.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about real roots
#where_real = np.where(np.imag(roots) == 0)
#where_real = np.where(np.abs(np.imag(roots)) < 1e-10) #analytic version has some floating point error accumulation
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))#np.max(np.real(roots[where_real]))
z = (b0 +b1*xlin)*np.abs(1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))**2
return z
@jit(nopython=True)
def linear_mag(x,fr,Qr,amp,phi,b0):
'''
# simplier version for quicker fitting when applicable
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# b0 DC level of s21 away from resonator
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jxg) 2 /
#
# no y just xg
# with no nonlinear kinetic inductance
'''
if not np.isscalar(fr): #vectorize
x = np.reshape(x,(x.shape[0],1,1,1,1,1))
xg = (x-fr)/fr
z = (b0)*np.abs(1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*xg*Qr) + amp/2.*(np.exp(1.0j*phi) -1.0))**2
return z
# function to describe the i q loop of a nonlinear resonator
@jit(nopython=True)
def nonlinear_iq(x,fr,Qr,amp,phi,a,i0,q0,tau,f0):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readou system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# i0
# q0 these are constants that describes an overall phase rotation of the iq loop + a DC gain offset
# tau cabel delay
# f0 is all the center frequency, not sure why we include this as a secondary paramter should be the same as fr
#
# This is based of fitting code from MUSIC
#
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# (-j 2 pi deltaf tau) / (j phi) (j phi) \
# (i0+j*q0)*e^ *|1 -amp*e^ +amp*(e^ -1) |
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# where the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) where yg = Qr*xg and xg = (f-fr)/fr
#
'''
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = np.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about real roots
#where_real = np.where(np.imag(roots) == 0)
#y[i] = np.max(np.real(roots[where_real]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* np.exp(-1.0j* 2* np.pi *deltaf*tau) * (1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))
return z
def nonlinear_iq_for_fitter(x,fr,Qr,amp,phi,a,i0,q0,tau,f0,**keywords):
'''
when using a fitter that can't handel complex number
one needs to return both the real and imaginary components seperatly
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
print("hello")
else:
use_given_tau = False
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
for i in range(0,x.shape[0]):
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#where_real = np.where(np.imag(roots) == 0)
#y[i] = np.max(np.real(roots[where_real]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* np.exp(-1.0j* 2* np.pi *deltaf*tau) * (1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))
real_z = np.real(z)
imag_z = np.imag(z)
return np.hstack((real_z,imag_z))
def brute_force_linear_mag_fit(x,z,ranges,n_grid_points,error = None, plot = False,**keywords):
'''
x frequencies Hz
z complex or abs of s21
ranges is the ranges for each parameter i.e. np.asarray(([f_low,Qr_low,amp_low,phi_low,b0_low],[f_high,Qr_high,amp_high,phi_high,b0_high]))
n_grid_points how finely to sample each parameter space.
this can be very slow for n>10
an increase by a factor of 2 will take 2**5 times longer
to marginalize over you must minimize over the unwanted axies of sum_dev
i.e for fr np.min(np.min(np.min(np.min(fit['sum_dev'],axis = 4),axis = 3),axis = 2),axis = 1)
'''
if error is None:
error = np.ones(len(x))
fs = np.linspace(ranges[0][0],ranges[1][0],n_grid_points)
Qrs = np.linspace(ranges[0][1],ranges[1][1],n_grid_points)
amps = np.linspace(ranges[0][2],ranges[1][2],n_grid_points)
phis = np.linspace(ranges[0][3],ranges[1][3],n_grid_points)
b0s = np.linspace(ranges[0][4],ranges[1][4],n_grid_points)
evaluated_ranges = np.vstack((fs,Qrs,amps,phis,b0s))
a,b,c,d,e = np.meshgrid(fs,Qrs,amps,phis,b0s,indexing = "ij") #always index ij
evaluated = linear_mag(x,a,b,c,d,e)
data_values = np.reshape(np.abs(z)**2,(abs(z).shape[0],1,1,1,1,1))
error = np.reshape(error,(abs(z).shape[0],1,1,1,1,1))
sum_dev = np.sum(((np.sqrt(evaluated)-np.sqrt(data_values))**2/error**2),axis = 0) # comparing in magnitude space rather than magnitude squared
min_index = np.where(sum_dev == np.min(sum_dev))
index1 = min_index[0][0]
index2 = min_index[1][0]
index3 = min_index[2][0]
index4 = min_index[3][0]
index5 = min_index[4][0]
fit_values = np.asarray((fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5]))
fit_values_names = ('f0','Qr','amp','phi','b0')
fit_result = linear_mag(x,fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5])
marginalized_1d = np.zeros((5,n_grid_points))
marginalized_1d[0,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 1)
marginalized_1d[1,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 0)
marginalized_1d[2,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1),axis = 0)
marginalized_1d[3,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1),axis = 0)
marginalized_1d[4,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1),axis = 0)
marginalized_2d = np.zeros((5,5,n_grid_points,n_grid_points))
#0 _
#1 x _
#2 x x _
#3 x x x _
#4 x x x x _
# 0 1 2 3 4
marginalized_2d[0,1,:] = marginalized_2d[1,0,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2)
marginalized_2d[2,0,:] = marginalized_2d[0,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1)
marginalized_2d[2,1,:] = marginalized_2d[1,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 0)
marginalized_2d[3,0,:] = marginalized_2d[0,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1)
marginalized_2d[3,1,:] = marginalized_2d[1,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 0)
marginalized_2d[3,2,:] = marginalized_2d[2,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 1),axis = 0)
marginalized_2d[4,0,:] = marginalized_2d[0,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1)
marginalized_2d[4,1,:] = marginalized_2d[1,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 0)
marginalized_2d[4,2,:] = marginalized_2d[2,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 1),axis = 0)
marginalized_2d[4,3,:] = marginalized_2d[3,4,:] = np.min(np.min(np.min(sum_dev,axis = 2),axis = 1),axis = 0)
if plot:
levels = [2.3,4.61] #delta chi squared two parameters 68 90 % confidence
fig_fit = plt.figure(-1)
axs = fig_fit.subplots(5, 5)
for i in range(0,5): # y starting from top
for j in range(0,5): #x starting from left
if i > j:
#plt.subplot(5,5,i+1+5*j)
#axs[i, j].set_aspect('equal', 'box')
extent = [evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1],evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1]]
axs[i,j].imshow(marginalized_2d[i,j,:]-np.min(sum_dev),extent =extent,origin = 'lower', cmap = 'jet')
axs[i,j].contour(evaluated_ranges[j],evaluated_ranges[i],marginalized_2d[i,j,:]-np.min(sum_dev),levels = levels,colors = 'white')
axs[i,j].set_ylim(evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1])
axs[i,j].set_xlim(evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1])
axs[i,j].set_aspect((evaluated_ranges[j,0]-evaluated_ranges[j,n_grid_points-1])/(evaluated_ranges[i,0]-evaluated_ranges[i,n_grid_points-1]))
if j == 0:
axs[i, j].set_ylabel(fit_values_names[i])
if i == 4:
axs[i, j].set_xlabel("\n"+fit_values_names[j])
if i<4:
axs[i,j].get_xaxis().set_ticks([])
if j>0:
axs[i,j].get_yaxis().set_ticks([])
elif i < j:
fig_fit.delaxes(axs[i,j])
for i in range(0,5):
#axes.subplot(5,5,i+1+5*i)
axs[i,i].plot(evaluated_ranges[i,:],marginalized_1d[i,:]-np.min(sum_dev))
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*1.,color = 'k')
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*2.7,color = 'k')
axs[i,i].yaxis.set_label_position("right")
axs[i,i].yaxis.tick_right()
axs[i,i].xaxis.set_label_position("top")
axs[i,i].xaxis.tick_top()
axs[i,i].set_xlabel(fit_values_names[i])
#axs[0,0].set_ylabel(fit_values_names[0])
#axs[4,4].set_xlabel(fit_values_names[4])
axs[4,4].xaxis.set_label_position("bottom")
axs[4,4].xaxis.tick_bottom()
#make a dictionary to return
fit_dict = {'fit_values': fit_values,'fit_values_names':fit_values_names, 'sum_dev': sum_dev, 'fit_result': fit_result,'marginalized_2d':marginalized_2d,'marginalized_1d':marginalized_1d,'evaluated_ranges':evaluated_ranges}#, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_iq(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
# tau forces tau to specific value
# tau_guess fixes the guess for tau without have to specifiy all of x0
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
else:
use_given_tau = False
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),50,.01,-np.pi,0,-np.inf,-np.inf,0,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear(x,z,verbose = True)
print(x0)
if ('fr_guess' in keywords):
x0[0] = keywords['fr_guess']
if ('tau_guess' in keywords):
x0[7] = keywords['tau_guess']
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_given_tau == True:
del bounds[0][7]
del bounds[1][7]
del x0[7]
fit = optimization.curve_fit(lambda x_lamb,a,b,c,d,e,f,g,h: nonlinear_iq_for_fitter(x_lamb,a,b,c,d,e,f,g,tau,h), x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],tau,fit[0][7])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],tau,x0[7])
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_iq_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above funciton but takes fine and gain scans seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),500.,.01,-np.pi,0,-np.inf,-np.inf,1*10**-9,np.min(fine_x)],[np.max(fine_x),1000000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
#print(x0)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_err:
z_err_stacked = np.hstack((np.real(z_err),np.imag(z_err)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,sigma = z_err_stacked,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
if use_err:
#only do it for fine data
#red_chi_sqr = np.sum(z_stacked-np.hstack((np.real(fit_result),np.imag(fit_result))))**2/z_err_stacked**2)/(len(z_stacked)-8.)
#only do it for fine data
red_chi_sqr = np.sum((np.hstack((np.real(fine_z),np.imag(fine_z)))-np.hstack((np.real(fit_result[0:len(fine_z)]),np.imag(fit_result[0:len(fine_z)]))))**2/np.hstack((np.real(fine_z_err),np.imag(fine_z_err)))**2)/(len(fine_z)*2.-8.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
# same function but double fits so that it can get error and a proper covariance matrix out
def fit_nonlinear_iq_with_err(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),2000,.01,-np.pi,0,-5,-5,1*10**-9,np.min(x)],[np.max(x),200000,1,np.pi,5,5,5,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
x0 = guess_x0_iq_nonlinear(x,z)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
fit_result_stacked = nonlinear_iq_for_fitter(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
# get error
var = np.sum((z_stacked-fit_result_stacked)**2)/(z_stacked.shape[0] - 1)
err = np.ones(z_stacked.shape[0])*np.sqrt(var)
# refit
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,err,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_mag(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.abs(z[0])**2,np.abs(z[0])**2,fr_guess]
x0 = guess_x0_mag_nonlinear(x,z,verbose = True)
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_mag_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above but fine and gain scans are provided seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(fine_x)],[np.max(fine_x),1000000,100,np.pi,5,np.inf,np.inf,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
x0 = guess_x0_mag_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
#stack the scans for curvefit
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
z_err = np.sqrt(4*np.real(z_err)**2*np.real(z)**2+4*np.imag(z_err)**2*np.imag(z)**2) #propogation of errors left out cross term
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,sigma = z_err,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#compute reduced chi squared
print(len(z))
if use_err:
#red_chi_sqr = np.sum((np.abs(z)**2-fit_result)**2/z_err**2)/(len(z)-7.)
# only use fine scan for reduced chi squared.
red_chi_sqr = np.sum((np.abs(fine_z)**2-fit_result[0:len(fine_z)])**2/z_err[0:len(fine_z)]**2)/(len(fine_z)-7.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
def amplitude_normalization(x,z):
'''
# normalize the amplitude varation requires a gain scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(x-np.median(x))>100000) #100kHz away from resonator
poly = np.polyfit(x[index_use],np.abs(z[index_use]),2)
poly_func = np.poly1d(poly)
normalized_data = z/poly_func(x)*np.median(np.abs(z[index_use]))
return normalized_data
def amplitude_normalization_sep(gain_x,gain_z,fine_x,fine_z,stream_x,stream_z):
'''
# normalize the amplitude varation requires a gain scan
# uses gain scan to normalize does not use fine scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(gain_x-np.median(gain_x))>100000) #100kHz away from resonator
poly = np.polyfit(gain_x[index_use],np.abs(gain_z[index_use]),2)
poly_func = np.poly1d(poly)
poly_data = poly_func(gain_x)
normalized_gain = gain_z/poly_data*np.median(np.abs(gain_z[index_use]))
normalized_fine = fine_z/poly_func(fine_x)*np.median(np.abs(gain_z[index_use]))
normalized_stream = stream_z/poly_func(stream_x)*np.median(np.abs(gain_z[index_use]))
amp_norm_dict = {'normalized_gain':normalized_gain,
'normalized_fine':normalized_fine,
'normalized_stream':normalized_stream,
'poly_data':poly_data}
return amp_norm_dict
def guess_x0_iq_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_iq_nonlinear_sep
# below. it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = np.argmin(np.abs(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = np.argmin(np.abs(right))+fr_guess_index_fine
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(z)))-np.min(20*np.log10(np.abs(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#i0 and iq guess
if np.max(np.abs(fine_z))==np.max(np.abs(z)): #if the resonator has an impedance mismatch rotation that makes the fine greater that the cabel delay
i0_guess = np.real(fine_z[np.argmax(np.abs(fine_z))])
q0_guess = np.imag(fine_z[np.argmax(np.abs(fine_z))])
else:
i0_guess = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.
q0_guess = (np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
if len(gain_z)>1: #is there a gain scan?
m = (gain_phase - np.roll(gain_phase,1))/(gain_x-np.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = np.median(m[~np.isnan(m)])
tau_guess = m_best/(2*np.pi)
else:
tau_guess = 3*10**-9
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("i0 guess = %.2f" %i0_guess)
print("q0 guess = %.2f" %q0_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,i0_guess,q0_guess,tau_guess,fr_guess]
return x0
def guess_x0_mag_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_mag_nonlinear_sep
#below it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
#this will probably break if there is no fine scan
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = np.argmin(np.abs(fine_z))
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = np.argmin(np.abs(right))+fr_guess_index_fine
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(z)))-np.min(20*np.log10(np.abs(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#b0 and b1 guess
if len(gain_z)>1:
xlin = (gain_x - fr_guess)/fr_guess
b1_guess = (np.abs(gain_z)[-1]**2-np.abs(gain_z)[0]**2)/(xlin[-1]-xlin[0])
else:
xlin = (fine_x - fr_guess)/fr_guess
b1_guess = (np.abs(fine_z)[-1]**2-np.abs(fine_z)[0]**2)/(xlin[-1]-xlin[0])
b0_guess = np.median(np.abs(gain_z)**2)
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("phi guess = %.2f" %phi_guess)
print("b0 guess = %.2f" %b0_guess)
print("b1 guess = %.2f" %b1_guess)
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,b0_guess,b1_guess,fr_guess]
return x0
def guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z,verbose = False):
'''
# this is the same as guess_x0_iq_nonlinear except that it takes
# takes the fine scan and the gain scan as seperate variables
# this runs into less issues when trying to sort out what part of
# data is fine and what part is gain for the guessing
#make sure data is sorted from low to high frequency
'''
#gain phase
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index == 0:
fr_guess_index = len(fine_x)//2
elif fr_guess_index == (len(fine_x)-1):
fr_guess_index = len(fine_x)//2
fr_guess = fine_x[fr_guess_index]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index:-1]
left = half_distance[0:fr_guess_index]
right_index = np.argmin(np.abs(right))+fr_guess_index
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(gain_z)))-np.min(20*np.log10(np.abs(fine_z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
#phi_guess = 0
#guess impedance rotation phi
#fit a circle to the iq loop
xc, yc, R, residu = calibrate.leastsq_circle(np.real(fine_z),np.imag(fine_z))
#compute angle between (off_res,off_res),(0,0) and (off_ress,off_res),(xc,yc) of the the fitted circle
off_res_i,off_res_q = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.,(np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
x1, y1, = -off_res_i,-off_res_q
x2, y2 = xc-off_res_i,yc-off_res_q
dot = x1*x2 + y1*y2 # dot product
det = x1*y2 - y1*x2 # determinant
angle = np.arctan2(det, dot)
phi_guess = angle
# if phi is large better re guess f0
# f0 should be the farthers from the off res point
if (np.abs(phi_guess)>0.3):
dist1 = np.sqrt((np.real(fine_z[0])-np.real(fine_z))**2+(np.imag(fine_z[0])-np.imag(fine_z))**2)
dist2 = np.sqrt((np.real(fine_z[-1])-np.real(fine_z))**2+(np.imag(fine_z[-1])-np.imag(fine_z))**2)
fr_guess_index = np.argmax((dist1+dist2))
fr_guess = fine_x[fr_guess_index]
#also fix the Q gues
fine_z_derot = (fine_z-(off_res_i+1.j*off_res_q))*np.exp(1j*(-phi_guess))+(off_res_i+1.j*off_res_q)
#fr_guess_index = np.argmin(np.abs(fine_z_derot))
#fr_guess = fine_x[fr_guess_index]
mag_max = np.max(np.abs(fine_z_derot)**2)
mag_min = np.min(np.abs(fine_z_derot)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z_derot)**2-mag_3dB
right = half_distance[np.argmin(np.abs(fine_z_derot)):-1]
left = half_distance[0:np.argmin(np.abs(fine_z_derot))]
right_index = np.argmin(np.abs(right))+np.argmin(np.abs(fine_z_derot))
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#also fix amp guess
d = np.max(20*np.log10(np.abs(gain_z)))-np.min(20*np.log10(np.abs(fine_z_derot)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#i0 and iq guess
if np.max(np.abs(fine_z))>np.max(np.abs(gain_z)): #if the resonator has an impedance mismatch rotation that makes the fine greater that the cabel delay
i0_guess = np.real(fine_z[np.argmax(np.abs(fine_z))])
q0_guess = np.imag(fine_z[np.argmax(np.abs(fine_z))])
else:
i0_guess = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.
q0_guess = (np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
m = (gain_phase - np.roll(gain_phase,1))/(gain_x-np.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = np.median(m[~np.isnan(m)])
tau_guess = m_best/(2*np.pi)
if verbose == True:
print("fr guess = %.3f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("phi guess = %.2f" %phi_guess)
print("i0 guess = %.2f" %i0_guess)
print("q0 guess = %.2f" %q0_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,i0_guess,q0_guess,tau_guess,fr_guess]
return x0
def guess_x0_mag_nonlinear_sep(fine_x,fine_z,gain_x,gain_z,verbose = False):
'''
# this is the same as guess_x0_mag_nonlinear except that it takes
# takes the fine scan and the gain scan as seperate variables
# this runs into less issues when trying to sort out what part of
# data is fine and what part is gain for the guessing
#make sure data is sorted from low to high frequency
'''
#phase of gain
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(fine_z))
#protect against guessing the first or last data points
if fr_guess_index == 0:
fr_guess_index = len(fine_x)//2
elif fr_guess_index == (len(fine_x)-1):
fr_guess_index = len(fine_x)//2
fr_guess = fine_x[fr_guess_index]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index:-1]
left = half_distance[0:fr_guess_index]
right_index = np.argmin(np.abs(right))+fr_guess_index
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(gain_z)))-np.min(20*np.log10(np.abs(fine_z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4
#polynomial fit to amp verus depth calculated emperically
#guess impedance rotation phi
#fit a circle to the iq loop
xc, yc, R, residu = calibrate.leastsq_circle(np.real(fine_z),np.imag(fine_z))
#compute angle between (off_res,off_res),(0,0) and (off_ress,off_res),(xc,yc) of the the fitted circle
off_res_i,off_res_q = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.,(np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
x1, y1, = -off_res_i,-off_res_q
x2, y2 = xc-off_res_i,yc-off_res_q
dot = x1*x2 + y1*y2 # dot product
det = x1*y2 - y1*x2 # determinant
angle = np.arctan2(det, dot)
phi_guess = angle
# if phi is large better re guess f0
# f0 should be the farthers from the off res point
if (np.abs(phi_guess)>0.3):
dist1 = np.sqrt((np.real(fine_z[0])-np.real(fine_z))**2+(np.imag(fine_z[0])-np.imag(fine_z))**2)
dist2 = np.sqrt((np.real(fine_z[-1])-np.real(fine_z))**2+(np.imag(fine_z[-1])-np.imag(fine_z))**2)
fr_guess_index = np.argmax((dist1+dist2))
fr_guess = fine_x[fr_guess_index]
fine_z_derot = (fine_z-(off_res_i+1.j*off_res_q))*np.exp(1j*(-phi_guess))+(off_res_i+1.j*off_res_q)
#fr_guess_index = np.argmin(np.abs(fine_z_derot))
#fr_guess = fine_x[fr_guess_index]
mag_max = np.max(np.abs(fine_z_derot)**2)
mag_min = np.min(np.abs(fine_z_derot)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z_derot)**2-mag_3dB
right = half_distance[np.argmin(np.abs(fine_z_derot)):-1]
left = half_distance[0:np.argmin(np.abs(fine_z_derot))]
right_index = np.argmin(np.abs(right))+np.argmin(np.abs(fine_z_derot))
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#also fix amp guess
d = np.max(20*np.log10(np.abs(gain_z)))-np.min(20*np.log10(np.abs(fine_z_derot)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#b0 and b1 guess
xlin = (gain_x - fr_guess)/fr_guess
b1_guess = (np.abs(gain_z)[-1]**2-np.abs(gain_z)[0]**2)/(xlin[-1]-xlin[0])
b0_guess = np.max((np.max(np.abs(fine_z)**2),np.max(np.abs(gain_z)**2)))
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
m = (gain_phase - np.roll(gain_phase,1))/(gain_x-np.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = np.median(m[~np.isnan(m)])
tau_guess = m_best/(2*np.pi)
if verbose == True:
print("fr guess = %.3f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("phi guess = %.2f" %phi_guess)
print("b0 guess = %.2f" %b0_guess)
print("b1 guess = %.2f" %b1_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,b0_guess,b1_guess,fr_guess]
return x0
| [
"numpy.sqrt",
"numpy.hstack",
"numpy.argsort",
"numpy.arctan2",
"numpy.poly1d",
"numpy.imag",
"numpy.reshape",
"numpy.isscalar",
"numpy.where",
"numpy.asarray",
"numpy.max",
"numpy.exp",
"numpy.real",
"numpy.linspace",
"numpy.empty",
"numpy.vstack",
"numpy.min",
"numpy.meshgrid",
... | [((1190, 1214), 'numpy.exp', 'np.exp', (['(2.0j * np.pi / 3)'], {}), '(2.0j * np.pi / 3)\n', (1196, 1214), True, 'import numpy as np\n'), ((1218, 1236), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1221, 1236), False, 'from numba import jit\n'), ((2387, 2405), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2390, 2405), False, 'from numba import jit\n'), ((4812, 4830), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (4815, 4830), False, 'from numba import jit\n'), ((6209, 6227), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (6212, 6227), False, 'from numba import jit\n'), ((1390, 1416), 'numpy.empty', 'np.empty', (['(2)', 'np.complex128'], {}), '(2, np.complex128)\n', (1398, 1416), True, 'import numpy as np\n'), ((1534, 1557), 'numpy.sqrt', 'np.sqrt', (['(-D / 27 + 0.0j)'], {}), '(-D / 27 + 0.0j)\n', (1541, 1557), True, 'import numpy as np\n'), ((1679, 1696), 'numpy.abs', 'np.abs', (['(w + p / 3)'], {}), '(w + p / 3)\n', (1685, 1696), True, 'import numpy as np\n'), ((1700, 1721), 'numpy.abs', 'np.abs', (['(w * J + p / 3)'], {}), '(w * J + p / 3)\n', (1706, 1721), True, 'import numpy as np\n'), ((1723, 1745), 'numpy.abs', 'np.abs', (['(w * Jc + p / 3)'], {}), '(w * Jc + p / 3)\n', (1729, 1745), True, 'import numpy as np\n'), ((1830, 1896), 'numpy.asarray', 'np.asarray', (['(u + v - z0, u * J + v * Jc - z0, u * Jc + v * J - z0)'], {}), '((u + v - z0, u * J + v * Jc - z0, u * Jc + v * J - z0))\n', (1840, 1896), True, 'import numpy as np\n'), ((3936, 3956), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (3944, 3956), True, 'import numpy as np\n'), ((7917, 7937), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (7925, 7937), True, 'import numpy as np\n'), ((9077, 9097), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (9085, 9097), True, 'import numpy as np\n'), ((9506, 9516), 'numpy.real', 'np.real', (['z'], {}), '(z)\n', (9513, 9516), True, 'import numpy as np\n'), ((9530, 9540), 'numpy.imag', 'np.imag', (['z'], {}), '(z)\n', (9537, 9540), True, 'import numpy as np\n'), ((9552, 9579), 'numpy.hstack', 'np.hstack', (['(real_z, imag_z)'], {}), '((real_z, imag_z))\n', (9561, 9579), True, 'import numpy as np\n'), ((10282, 10336), 'numpy.linspace', 'np.linspace', (['ranges[0][0]', 'ranges[1][0]', 'n_grid_points'], {}), '(ranges[0][0], ranges[1][0], n_grid_points)\n', (10293, 10336), True, 'import numpy as np\n'), ((10345, 10399), 'numpy.linspace', 'np.linspace', (['ranges[0][1]', 'ranges[1][1]', 'n_grid_points'], {}), '(ranges[0][1], ranges[1][1], n_grid_points)\n', (10356, 10399), True, 'import numpy as np\n'), ((10409, 10463), 'numpy.linspace', 'np.linspace', (['ranges[0][2]', 'ranges[1][2]', 'n_grid_points'], {}), '(ranges[0][2], ranges[1][2], n_grid_points)\n', (10420, 10463), True, 'import numpy as np\n'), ((10473, 10527), 'numpy.linspace', 'np.linspace', (['ranges[0][3]', 'ranges[1][3]', 'n_grid_points'], {}), '(ranges[0][3], ranges[1][3], n_grid_points)\n', (10484, 10527), True, 'import numpy as np\n'), ((10536, 10590), 'numpy.linspace', 'np.linspace', (['ranges[0][4]', 'ranges[1][4]', 'n_grid_points'], {}), '(ranges[0][4], ranges[1][4], n_grid_points)\n', (10547, 10590), True, 'import numpy as np\n'), ((10612, 10649), 'numpy.vstack', 'np.vstack', (['(fs, Qrs, amps, phis, b0s)'], {}), '((fs, Qrs, amps, phis, b0s))\n', (10621, 10649), True, 'import numpy as np\n'), ((10663, 10715), 'numpy.meshgrid', 'np.meshgrid', (['fs', 'Qrs', 'amps', 'phis', 'b0s'], {'indexing': '"""ij"""'}), "(fs, Qrs, amps, phis, b0s, indexing='ij')\n", (10674, 10715), True, 'import numpy as np\n'), ((11268, 11346), 'numpy.asarray', 'np.asarray', (['(fs[index1], Qrs[index2], amps[index3], phis[index4], b0s[index5])'], {}), '((fs[index1], Qrs[index2], amps[index3], phis[index4], b0s[index5]))\n', (11278, 11346), True, 'import numpy as np\n'), ((11510, 11538), 'numpy.zeros', 'np.zeros', (['(5, n_grid_points)'], {}), '((5, n_grid_points))\n', (11518, 11538), True, 'import numpy as np\n'), ((12076, 12122), 'numpy.zeros', 'np.zeros', (['(5, 5, n_grid_points, n_grid_points)'], {}), '((5, 5, n_grid_points, n_grid_points))\n', (12084, 12122), True, 'import numpy as np\n'), ((20725, 20752), 'numpy.hstack', 'np.hstack', (['(fine_x, gain_x)'], {}), '((fine_x, gain_x))\n', (20734, 20752), True, 'import numpy as np\n'), ((20760, 20787), 'numpy.hstack', 'np.hstack', (['(fine_z, gain_z)'], {}), '((fine_z, gain_z))\n', (20769, 20787), True, 'import numpy as np\n'), ((23782, 23867), 'scipy.optimize.curve_fit', 'optimization.curve_fit', (['nonlinear_iq_for_fitter', 'x', 'z_stacked', 'x0'], {'bounds': 'bounds'}), '(nonlinear_iq_for_fitter, x, z_stacked, x0, bounds=bounds\n )\n', (23804, 23867), True, 'import scipy.optimize as optimization\n'), ((24380, 24469), 'scipy.optimize.curve_fit', 'optimization.curve_fit', (['nonlinear_iq_for_fitter', 'x', 'z_stacked', 'x0', 'err'], {'bounds': 'bounds'}), '(nonlinear_iq_for_fitter, x, z_stacked, x0, err,\n bounds=bounds)\n', (24402, 24469), True, 'import scipy.optimize as optimization\n'), ((27617, 27644), 'numpy.hstack', 'np.hstack', (['(fine_x, gain_x)'], {}), '((fine_x, gain_x))\n', (27626, 27644), True, 'import numpy as np\n'), ((27652, 27679), 'numpy.hstack', 'np.hstack', (['(fine_z, gain_z)'], {}), '((fine_z, gain_z))\n', (27661, 27679), True, 'import numpy as np\n'), ((29263, 29278), 'numpy.poly1d', 'np.poly1d', (['poly'], {}), '(poly)\n', (29272, 29278), True, 'import numpy as np\n'), ((29823, 29838), 'numpy.poly1d', 'np.poly1d', (['poly'], {}), '(poly)\n', (29832, 29838), True, 'import numpy as np\n'), ((30627, 30640), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (30637, 30640), True, 'import numpy as np\n'), ((30809, 30837), 'numpy.where', 'np.where', (['(df < fine_df * 1.1)'], {}), '(df < fine_df * 1.1)\n', (30817, 30837), True, 'import numpy as np\n'), ((30938, 30966), 'numpy.where', 'np.where', (['(df > fine_df * 1.1)'], {}), '(df > fine_df * 1.1)\n', (30946, 30966), True, 'import numpy as np\n'), ((34016, 34029), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (34026, 34029), True, 'import numpy as np\n'), ((34253, 34281), 'numpy.where', 'np.where', (['(df < fine_df * 1.1)'], {}), '(df < fine_df * 1.1)\n', (34261, 34281), True, 'import numpy as np\n'), ((34382, 34410), 'numpy.where', 'np.where', (['(df > fine_df * 1.1)'], {}), '(df > fine_df * 1.1)\n', (34390, 34410), True, 'import numpy as np\n'), ((38834, 38854), 'numpy.arctan2', 'np.arctan2', (['det', 'dot'], {}), '(det, dot)\n', (38844, 38854), True, 'import numpy as np\n'), ((43894, 43914), 'numpy.arctan2', 'np.arctan2', (['det', 'dot'], {}), '(det, dot)\n', (43904, 43914), True, 'import numpy as np\n'), ((5925, 5940), 'numpy.isscalar', 'np.isscalar', (['fr'], {}), '(fr)\n', (5936, 5940), True, 'import numpy as np\n'), ((5966, 6008), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], 1, 1, 1, 1, 1)'], {}), '(x, (x.shape[0], 1, 1, 1, 1, 1))\n', (5976, 6008), True, 'import numpy as np\n'), ((13446, 13460), 'matplotlib.pyplot.figure', 'plt.figure', (['(-1)'], {}), '(-1)\n', (13456, 13460), True, 'import matplotlib.pyplot as plt\n'), ((18529, 18614), 'scipy.optimize.curve_fit', 'optimization.curve_fit', (['nonlinear_iq_for_fitter', 'x', 'z_stacked', 'x0'], {'bounds': 'bounds'}), '(nonlinear_iq_for_fitter, x, z_stacked, x0, bounds=bounds\n )\n', (18551, 18614), True, 'import scipy.optimize as optimization\n'), ((20819, 20854), 'numpy.hstack', 'np.hstack', (['(fine_z_err, gain_z_err)'], {}), '((fine_z_err, gain_z_err))\n', (20828, 20854), True, 'import numpy as np\n'), ((21097, 21203), 'scipy.optimize.curve_fit', 'optimization.curve_fit', (['nonlinear_iq_for_fitter', 'x', 'z_stacked', 'x0'], {'sigma': 'z_err_stacked', 'bounds': 'bounds'}), '(nonlinear_iq_for_fitter, x, z_stacked, x0, sigma=\n z_err_stacked, bounds=bounds)\n', (21119, 21203), True, 'import scipy.optimize as optimization\n'), ((21224, 21309), 'scipy.optimize.curve_fit', 'optimization.curve_fit', (['nonlinear_iq_for_fitter', 'x', 'z_stacked', 'x0'], {'bounds': 'bounds'}), '(nonlinear_iq_for_fitter, x, z_stacked, x0, bounds=bounds\n )\n', (21246, 21309), True, 'import scipy.optimize as optimization\n'), ((24240, 24285), 'numpy.sum', 'np.sum', (['((z_stacked - fit_result_stacked) ** 2)'], {}), '((z_stacked - fit_result_stacked) ** 2)\n', (24246, 24285), True, 'import numpy as np\n'), ((24317, 24344), 'numpy.ones', 'np.ones', (['z_stacked.shape[0]'], {}), '(z_stacked.shape[0])\n', (24324, 24344), True, 'import numpy as np\n'), ((24345, 24357), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (24352, 24357), True, 'import numpy as np\n'), ((27711, 27746), 'numpy.hstack', 'np.hstack', (['(fine_z_err, gain_z_err)'], {}), '((fine_z_err, gain_z_err))\n', (27720, 27746), True, 'import numpy as np\n'), ((29223, 29243), 'numpy.abs', 'np.abs', (['z[index_use]'], {}), '(z[index_use])\n', (29229, 29243), True, 'import numpy as np\n'), ((29778, 29803), 'numpy.abs', 'np.abs', (['gain_z[index_use]'], {}), '(gain_z[index_use])\n', (29784, 29803), True, 'import numpy as np\n'), ((31049, 31064), 'numpy.real', 'np.real', (['gain_z'], {}), '(gain_z)\n', (31056, 31064), True, 'import numpy as np\n'), ((31065, 31080), 'numpy.imag', 'np.imag', (['gain_z'], {}), '(gain_z)\n', (31072, 31080), True, 'import numpy as np\n'), ((31132, 31141), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (31138, 31141), True, 'import numpy as np\n'), ((31213, 31227), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (31219, 31227), True, 'import numpy as np\n'), ((31886, 31898), 'numpy.abs', 'np.abs', (['left'], {}), '(left)\n', (31892, 31898), True, 'import numpy as np\n'), ((34493, 34508), 'numpy.real', 'np.real', (['gain_z'], {}), '(gain_z)\n', (34500, 34508), True, 'import numpy as np\n'), ((34509, 34524), 'numpy.imag', 'np.imag', (['gain_z'], {}), '(gain_z)\n', (34516, 34524), True, 'import numpy as np\n'), ((34576, 34585), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (34582, 34585), True, 'import numpy as np\n'), ((34657, 34671), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (34663, 34671), True, 'import numpy as np\n'), ((35256, 35268), 'numpy.abs', 'np.abs', (['left'], {}), '(left)\n', (35262, 35268), True, 'import numpy as np\n'), ((37134, 37149), 'numpy.real', 'np.real', (['gain_z'], {}), '(gain_z)\n', (37141, 37149), True, 'import numpy as np\n'), ((37150, 37165), 'numpy.imag', 'np.imag', (['gain_z'], {}), '(gain_z)\n', (37157, 37165), True, 'import numpy as np\n'), ((37217, 37231), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (37223, 37231), True, 'import numpy as np\n'), ((37850, 37862), 'numpy.abs', 'np.abs', (['left'], {}), '(left)\n', (37856, 37862), True, 'import numpy as np\n'), ((38407, 38422), 'numpy.real', 'np.real', (['fine_z'], {}), '(fine_z)\n', (38414, 38422), True, 'import numpy as np\n'), ((38423, 38438), 'numpy.imag', 'np.imag', (['fine_z'], {}), '(fine_z)\n', (38430, 38438), True, 'import numpy as np\n'), ((38982, 38999), 'numpy.abs', 'np.abs', (['phi_guess'], {}), '(phi_guess)\n', (38988, 38999), True, 'import numpy as np\n'), ((39243, 39267), 'numpy.argmax', 'np.argmax', (['(dist1 + dist2)'], {}), '(dist1 + dist2)\n', (39252, 39267), True, 'import numpy as np\n'), ((42227, 42242), 'numpy.real', 'np.real', (['gain_z'], {}), '(gain_z)\n', (42234, 42242), True, 'import numpy as np\n'), ((42243, 42258), 'numpy.imag', 'np.imag', (['gain_z'], {}), '(gain_z)\n', (42250, 42258), True, 'import numpy as np\n'), ((42310, 42324), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (42316, 42324), True, 'import numpy as np\n'), ((42929, 42941), 'numpy.abs', 'np.abs', (['left'], {}), '(left)\n', (42935, 42941), True, 'import numpy as np\n'), ((43467, 43482), 'numpy.real', 'np.real', (['fine_z'], {}), '(fine_z)\n', (43474, 43482), True, 'import numpy as np\n'), ((43483, 43498), 'numpy.imag', 'np.imag', (['fine_z'], {}), '(fine_z)\n', (43490, 43498), True, 'import numpy as np\n'), ((44042, 44059), 'numpy.abs', 'np.abs', (['phi_guess'], {}), '(phi_guess)\n', (44048, 44059), True, 'import numpy as np\n'), ((44303, 44327), 'numpy.argmax', 'np.argmax', (['(dist1 + dist2)'], {}), '(dist1 + dist2)\n', (44312, 44327), True, 'import numpy as np\n'), ((2042, 2056), 'numpy.real', 'np.real', (['roots'], {}), '(roots)\n', (2049, 2056), True, 'import numpy as np\n'), ((8503, 8543), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * deltaf * tau)'], {}), '(-1.0j * 2 * np.pi * deltaf * tau)\n', (8509, 8543), True, 'import numpy as np\n'), ((9374, 9414), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * deltaf * tau)'], {}), '(-1.0j * 2 * np.pi * deltaf * tau)\n', (9380, 9414), True, 'import numpy as np\n'), ((10800, 10809), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (10806, 10809), True, 'import numpy as np\n'), ((11089, 11104), 'numpy.min', 'np.min', (['sum_dev'], {}), '(sum_dev)\n', (11095, 11104), True, 'import numpy as np\n'), ((12271, 12294), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(4)'}), '(sum_dev, axis=4)\n', (12277, 12294), True, 'import numpy as np\n'), ((12384, 12407), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(4)'}), '(sum_dev, axis=4)\n', (12390, 12407), True, 'import numpy as np\n'), ((12497, 12520), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(4)'}), '(sum_dev, axis=4)\n', (12503, 12520), True, 'import numpy as np\n'), ((12610, 12633), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(4)'}), '(sum_dev, axis=4)\n', (12616, 12633), True, 'import numpy as np\n'), ((12723, 12746), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(4)'}), '(sum_dev, axis=4)\n', (12729, 12746), True, 'import numpy as np\n'), ((12836, 12859), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(4)'}), '(sum_dev, axis=4)\n', (12842, 12859), True, 'import numpy as np\n'), ((12949, 12972), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(3)'}), '(sum_dev, axis=3)\n', (12955, 12972), True, 'import numpy as np\n'), ((13062, 13085), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(3)'}), '(sum_dev, axis=3)\n', (13068, 13085), True, 'import numpy as np\n'), ((13175, 13198), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(3)'}), '(sum_dev, axis=3)\n', (13181, 13198), True, 'import numpy as np\n'), ((13288, 13311), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(2)'}), '(sum_dev, axis=2)\n', (13294, 13311), True, 'import numpy as np\n'), ((18015, 18025), 'numpy.real', 'np.real', (['z'], {}), '(z)\n', (18022, 18025), True, 'import numpy as np\n'), ((18026, 18036), 'numpy.imag', 'np.imag', (['z'], {}), '(z)\n', (18033, 18036), True, 'import numpy as np\n'), ((20976, 20986), 'numpy.real', 'np.real', (['z'], {}), '(z)\n', (20983, 20986), True, 'import numpy as np\n'), ((20987, 20997), 'numpy.imag', 'np.imag', (['z'], {}), '(z)\n', (20994, 20997), True, 'import numpy as np\n'), ((23744, 23754), 'numpy.real', 'np.real', (['z'], {}), '(z)\n', (23751, 23754), True, 'import numpy as np\n'), ((23755, 23765), 'numpy.imag', 'np.imag', (['z'], {}), '(z)\n', (23762, 23765), True, 'import numpy as np\n'), ((25955, 25964), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (25961, 25964), True, 'import numpy as np\n'), ((29326, 29346), 'numpy.abs', 'np.abs', (['z[index_use]'], {}), '(z[index_use])\n', (29332, 29346), True, 'import numpy as np\n'), ((29922, 29947), 'numpy.abs', 'np.abs', (['gain_z[index_use]'], {}), '(gain_z[index_use])\n', (29928, 29947), True, 'import numpy as np\n'), ((30006, 30031), 'numpy.abs', 'np.abs', (['gain_z[index_use]'], {}), '(gain_z[index_use])\n', (30012, 30031), True, 'import numpy as np\n'), ((30096, 30121), 'numpy.abs', 'np.abs', (['gain_z[index_use]'], {}), '(gain_z[index_use])\n', (30102, 30121), True, 'import numpy as np\n'), ((30731, 30744), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (30738, 30744), True, 'import numpy as np\n'), ((30769, 30786), 'numpy.where', 'np.where', (['(df != 0)'], {}), '(df != 0)\n', (30777, 30786), True, 'import numpy as np\n'), ((31557, 31571), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (31563, 31571), True, 'import numpy as np\n'), ((31597, 31611), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (31603, 31611), True, 'import numpy as np\n'), ((31671, 31685), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (31677, 31685), True, 'import numpy as np\n'), ((31824, 31837), 'numpy.abs', 'np.abs', (['right'], {}), '(right)\n', (31830, 31837), True, 'import numpy as np\n'), ((32530, 32544), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (32536, 32544), True, 'import numpy as np\n'), ((32554, 32563), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (32560, 32563), True, 'import numpy as np\n'), ((34175, 34188), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (34182, 34188), True, 'import numpy as np\n'), ((34213, 34230), 'numpy.where', 'np.where', (['(df != 0)'], {}), '(df != 0)\n', (34221, 34230), True, 'import numpy as np\n'), ((34927, 34941), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (34933, 34941), True, 'import numpy as np\n'), ((34967, 34981), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (34973, 34981), True, 'import numpy as np\n'), ((35041, 35055), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (35047, 35055), True, 'import numpy as np\n'), ((35194, 35207), 'numpy.abs', 'np.abs', (['right'], {}), '(right)\n', (35200, 35207), True, 'import numpy as np\n'), ((36202, 36216), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (36208, 36216), True, 'import numpy as np\n'), ((37536, 37550), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (37542, 37550), True, 'import numpy as np\n'), ((37576, 37590), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (37582, 37590), True, 'import numpy as np\n'), ((37650, 37664), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (37656, 37664), True, 'import numpy as np\n'), ((37793, 37806), 'numpy.abs', 'np.abs', (['right'], {}), '(right)\n', (37799, 37806), True, 'import numpy as np\n'), ((39984, 39996), 'numpy.abs', 'np.abs', (['left'], {}), '(left)\n', (39990, 39996), True, 'import numpy as np\n'), ((40577, 40591), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (40583, 40591), True, 'import numpy as np\n'), ((40600, 40614), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (40606, 40614), True, 'import numpy as np\n'), ((41093, 41115), 'numpy.roll', 'np.roll', (['gain_phase', '(1)'], {}), '(gain_phase, 1)\n', (41100, 41115), True, 'import numpy as np\n'), ((41124, 41142), 'numpy.roll', 'np.roll', (['gain_x', '(1)'], {}), '(gain_x, 1)\n', (41131, 41142), True, 'import numpy as np\n'), ((42615, 42629), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (42621, 42629), True, 'import numpy as np\n'), ((42655, 42669), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (42661, 42669), True, 'import numpy as np\n'), ((42729, 42743), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (42735, 42743), True, 'import numpy as np\n'), ((42872, 42885), 'numpy.abs', 'np.abs', (['right'], {}), '(right)\n', (42878, 42885), True, 'import numpy as np\n'), ((45015, 45027), 'numpy.abs', 'np.abs', (['left'], {}), '(left)\n', (45021, 45027), True, 'import numpy as np\n'), ((45904, 45926), 'numpy.roll', 'np.roll', (['gain_phase', '(1)'], {}), '(gain_phase, 1)\n', (45911, 45926), True, 'import numpy as np\n'), ((45935, 45953), 'numpy.roll', 'np.roll', (['gain_x', '(1)'], {}), '(gain_x, 1)\n', (45942, 45953), True, 'import numpy as np\n'), ((1927, 1941), 'numpy.imag', 'np.imag', (['roots'], {}), '(roots)\n', (1934, 1941), True, 'import numpy as np\n'), ((11586, 11609), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(4)'}), '(sum_dev, axis=4)\n', (11592, 11609), True, 'import numpy as np\n'), ((11689, 11712), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(4)'}), '(sum_dev, axis=4)\n', (11695, 11712), True, 'import numpy as np\n'), ((11792, 11815), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(4)'}), '(sum_dev, axis=4)\n', (11798, 11815), True, 'import numpy as np\n'), ((11895, 11918), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(4)'}), '(sum_dev, axis=4)\n', (11901, 11918), True, 'import numpy as np\n'), ((11998, 12021), 'numpy.min', 'np.min', (['sum_dev'], {'axis': '(3)'}), '(sum_dev, axis=3)\n', (12004, 12021), True, 'import numpy as np\n'), ((16975, 16984), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (16981, 16984), True, 'import numpy as np\n'), ((17019, 17028), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (17025, 17028), True, 'import numpy as np\n'), ((17031, 17040), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (17037, 17040), True, 'import numpy as np\n'), ((17081, 17090), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (17087, 17090), True, 'import numpy as np\n'), ((19653, 19667), 'numpy.min', 'np.min', (['fine_x'], {}), '(fine_x)\n', (19659, 19667), True, 'import numpy as np\n'), ((19711, 19725), 'numpy.min', 'np.min', (['fine_x'], {}), '(fine_x)\n', (19717, 19725), True, 'import numpy as np\n'), ((19728, 19742), 'numpy.max', 'np.max', (['fine_x'], {}), '(fine_x)\n', (19734, 19742), True, 'import numpy as np\n'), ((19784, 19798), 'numpy.max', 'np.max', (['fine_x'], {}), '(fine_x)\n', (19790, 19798), True, 'import numpy as np\n'), ((21051, 21065), 'numpy.real', 'np.real', (['z_err'], {}), '(z_err)\n', (21058, 21065), True, 'import numpy as np\n'), ((21066, 21080), 'numpy.imag', 'np.imag', (['z_err'], {}), '(z_err)\n', (21073, 21080), True, 'import numpy as np\n'), ((22997, 23006), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (23003, 23006), True, 'import numpy as np\n'), ((23040, 23049), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (23046, 23049), True, 'import numpy as np\n'), ((23052, 23061), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (23058, 23061), True, 'import numpy as np\n'), ((23092, 23101), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (23098, 23101), True, 'import numpy as np\n'), ((23281, 23290), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (23287, 23290), True, 'import numpy as np\n'), ((25469, 25478), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (25475, 25478), True, 'import numpy as np\n'), ((25512, 25521), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (25518, 25521), True, 'import numpy as np\n'), ((25524, 25533), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (25530, 25533), True, 'import numpy as np\n'), ((25565, 25574), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (25571, 25574), True, 'import numpy as np\n'), ((25754, 25763), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (25760, 25763), True, 'import numpy as np\n'), ((27008, 27022), 'numpy.min', 'np.min', (['fine_x'], {}), '(fine_x)\n', (27014, 27022), True, 'import numpy as np\n'), ((27056, 27070), 'numpy.min', 'np.min', (['fine_x'], {}), '(fine_x)\n', (27062, 27070), True, 'import numpy as np\n'), ((27073, 27087), 'numpy.max', 'np.max', (['fine_x'], {}), '(fine_x)\n', (27079, 27087), True, 'import numpy as np\n'), ((27122, 27136), 'numpy.max', 'np.max', (['fine_x'], {}), '(fine_x)\n', (27128, 27136), True, 'import numpy as np\n'), ((27939, 27948), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (27945, 27948), True, 'import numpy as np\n'), ((28052, 28061), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (28058, 28061), True, 'import numpy as np\n'), ((32822, 32840), 'numpy.real', 'np.real', (['fine_z[0]'], {}), '(fine_z[0])\n', (32829, 32840), True, 'import numpy as np\n'), ((32841, 32860), 'numpy.real', 'np.real', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (32848, 32860), True, 'import numpy as np\n'), ((32885, 32903), 'numpy.imag', 'np.imag', (['fine_z[0]'], {}), '(fine_z[0])\n', (32892, 32903), True, 'import numpy as np\n'), ((32904, 32923), 'numpy.imag', 'np.imag', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (32911, 32923), True, 'import numpy as np\n'), ((33091, 33113), 'numpy.roll', 'np.roll', (['gain_phase', '(1)'], {}), '(gain_phase, 1)\n', (33098, 33113), True, 'import numpy as np\n'), ((33122, 33140), 'numpy.roll', 'np.roll', (['gain_x', '(1)'], {}), '(gain_x, 1)\n', (33129, 33140), True, 'import numpy as np\n'), ((38574, 38592), 'numpy.real', 'np.real', (['fine_z[0]'], {}), '(fine_z[0])\n', (38581, 38592), True, 'import numpy as np\n'), ((38593, 38612), 'numpy.real', 'np.real', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (38600, 38612), True, 'import numpy as np\n'), ((38618, 38636), 'numpy.imag', 'np.imag', (['fine_z[0]'], {}), '(fine_z[0])\n', (38625, 38636), True, 'import numpy as np\n'), ((38637, 38656), 'numpy.imag', 'np.imag', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (38644, 38656), True, 'import numpy as np\n'), ((39397, 39422), 'numpy.exp', 'np.exp', (['(1.0j * -phi_guess)'], {}), '(1.0j * -phi_guess)\n', (39403, 39422), True, 'import numpy as np\n'), ((39573, 39593), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (39579, 39593), True, 'import numpy as np\n'), ((39623, 39643), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (39629, 39643), True, 'import numpy as np\n'), ((39711, 39731), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (39717, 39731), True, 'import numpy as np\n'), ((39906, 39919), 'numpy.abs', 'np.abs', (['right'], {}), '(right)\n', (39912, 39919), True, 'import numpy as np\n'), ((39931, 39951), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (39937, 39951), True, 'import numpy as np\n'), ((40873, 40891), 'numpy.real', 'np.real', (['fine_z[0]'], {}), '(fine_z[0])\n', (40880, 40891), True, 'import numpy as np\n'), ((40892, 40911), 'numpy.real', 'np.real', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (40899, 40911), True, 'import numpy as np\n'), ((40936, 40954), 'numpy.imag', 'np.imag', (['fine_z[0]'], {}), '(fine_z[0])\n', (40943, 40954), True, 'import numpy as np\n'), ((40955, 40974), 'numpy.imag', 'np.imag', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (40962, 40974), True, 'import numpy as np\n'), ((41198, 41209), 'numpy.isnan', 'np.isnan', (['m'], {}), '(m)\n', (41206, 41209), True, 'import numpy as np\n'), ((43634, 43652), 'numpy.real', 'np.real', (['fine_z[0]'], {}), '(fine_z[0])\n', (43641, 43652), True, 'import numpy as np\n'), ((43653, 43672), 'numpy.real', 'np.real', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (43660, 43672), True, 'import numpy as np\n'), ((43678, 43696), 'numpy.imag', 'np.imag', (['fine_z[0]'], {}), '(fine_z[0])\n', (43685, 43696), True, 'import numpy as np\n'), ((43697, 43716), 'numpy.imag', 'np.imag', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (43704, 43716), True, 'import numpy as np\n'), ((44428, 44453), 'numpy.exp', 'np.exp', (['(1.0j * -phi_guess)'], {}), '(1.0j * -phi_guess)\n', (44434, 44453), True, 'import numpy as np\n'), ((44604, 44624), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (44610, 44624), True, 'import numpy as np\n'), ((44654, 44674), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (44660, 44674), True, 'import numpy as np\n'), ((44742, 44762), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (44748, 44762), True, 'import numpy as np\n'), ((44937, 44950), 'numpy.abs', 'np.abs', (['right'], {}), '(right)\n', (44943, 44950), True, 'import numpy as np\n'), ((44962, 44982), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (44968, 44982), True, 'import numpy as np\n'), ((46009, 46020), 'numpy.isnan', 'np.isnan', (['m'], {}), '(m)\n', (46017, 46020), True, 'import numpy as np\n'), ((8598, 8616), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (8604, 8616), True, 'import numpy as np\n'), ((9469, 9487), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (9475, 9487), True, 'import numpy as np\n'), ((10923, 10941), 'numpy.sqrt', 'np.sqrt', (['evaluated'], {}), '(evaluated)\n', (10930, 10941), True, 'import numpy as np\n'), ((10942, 10962), 'numpy.sqrt', 'np.sqrt', (['data_values'], {}), '(data_values)\n', (10949, 10962), True, 'import numpy as np\n'), ((15094, 15109), 'numpy.min', 'np.min', (['sum_dev'], {}), '(sum_dev)\n', (15100, 15109), True, 'import numpy as np\n'), ((29138, 29150), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (29147, 29150), True, 'import numpy as np\n'), ((29683, 29700), 'numpy.median', 'np.median', (['gain_x'], {}), '(gain_x)\n', (29692, 29700), True, 'import numpy as np\n'), ((32037, 32046), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (32043, 32046), True, 'import numpy as np\n'), ((32068, 32077), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (32074, 32077), True, 'import numpy as np\n'), ((32712, 32726), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (32718, 32726), True, 'import numpy as np\n'), ((32774, 32788), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (32780, 32788), True, 'import numpy as np\n'), ((33204, 33215), 'numpy.isnan', 'np.isnan', (['m'], {}), '(m)\n', (33212, 33215), True, 'import numpy as np\n'), ((35407, 35416), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (35413, 35416), True, 'import numpy as np\n'), ((35438, 35447), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (35444, 35447), True, 'import numpy as np\n'), ((38001, 38015), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (38007, 38015), True, 'import numpy as np\n'), ((38037, 38051), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (38043, 38051), True, 'import numpy as np\n'), ((39783, 39803), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (39789, 39803), True, 'import numpy as np\n'), ((39851, 39871), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (39857, 39871), True, 'import numpy as np\n'), ((40763, 40777), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (40769, 40777), True, 'import numpy as np\n'), ((40825, 40839), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (40831, 40839), True, 'import numpy as np\n'), ((43080, 43094), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (43086, 43094), True, 'import numpy as np\n'), ((43116, 43130), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (43122, 43130), True, 'import numpy as np\n'), ((44814, 44834), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (44820, 44834), True, 'import numpy as np\n'), ((44882, 44902), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (44888, 44902), True, 'import numpy as np\n'), ((45650, 45664), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (45656, 45664), True, 'import numpy as np\n'), ((45672, 45686), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (45678, 45686), True, 'import numpy as np\n'), ((45743, 45757), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (45749, 45757), True, 'import numpy as np\n'), ((45769, 45783), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (45775, 45783), True, 'import numpy as np\n'), ((8552, 8570), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (8558, 8570), True, 'import numpy as np\n'), ((9423, 9441), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (9429, 9441), True, 'import numpy as np\n'), ((27790, 27800), 'numpy.real', 'np.real', (['z'], {}), '(z)\n', (27797, 27800), True, 'import numpy as np\n'), ((27824, 27834), 'numpy.imag', 'np.imag', (['z'], {}), '(z)\n', (27831, 27834), True, 'import numpy as np\n'), ((35977, 35991), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (35983, 35991), True, 'import numpy as np\n'), ((35999, 36013), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (36005, 36013), True, 'import numpy as np\n'), ((36114, 36128), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (36120, 36128), True, 'import numpy as np\n'), ((36136, 36150), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (36142, 36150), True, 'import numpy as np\n'), ((39031, 39049), 'numpy.real', 'np.real', (['fine_z[0]'], {}), '(fine_z[0])\n', (39038, 39049), True, 'import numpy as np\n'), ((39050, 39065), 'numpy.real', 'np.real', (['fine_z'], {}), '(fine_z)\n', (39057, 39065), True, 'import numpy as np\n'), ((39071, 39089), 'numpy.imag', 'np.imag', (['fine_z[0]'], {}), '(fine_z[0])\n', (39078, 39089), True, 'import numpy as np\n'), ((39090, 39105), 'numpy.imag', 'np.imag', (['fine_z'], {}), '(fine_z)\n', (39097, 39105), True, 'import numpy as np\n'), ((39136, 39155), 'numpy.real', 'np.real', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (39143, 39155), True, 'import numpy as np\n'), ((39156, 39171), 'numpy.real', 'np.real', (['fine_z'], {}), '(fine_z)\n', (39163, 39171), True, 'import numpy as np\n'), ((39177, 39196), 'numpy.imag', 'np.imag', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (39184, 39196), True, 'import numpy as np\n'), ((39197, 39212), 'numpy.imag', 'np.imag', (['fine_z'], {}), '(fine_z)\n', (39204, 39212), True, 'import numpy as np\n'), ((40155, 40169), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (40161, 40169), True, 'import numpy as np\n'), ((40191, 40211), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (40197, 40211), True, 'import numpy as np\n'), ((44091, 44109), 'numpy.real', 'np.real', (['fine_z[0]'], {}), '(fine_z[0])\n', (44098, 44109), True, 'import numpy as np\n'), ((44110, 44125), 'numpy.real', 'np.real', (['fine_z'], {}), '(fine_z)\n', (44117, 44125), True, 'import numpy as np\n'), ((44131, 44149), 'numpy.imag', 'np.imag', (['fine_z[0]'], {}), '(fine_z[0])\n', (44138, 44149), True, 'import numpy as np\n'), ((44150, 44165), 'numpy.imag', 'np.imag', (['fine_z'], {}), '(fine_z)\n', (44157, 44165), True, 'import numpy as np\n'), ((44196, 44215), 'numpy.real', 'np.real', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (44203, 44215), True, 'import numpy as np\n'), ((44216, 44231), 'numpy.real', 'np.real', (['fine_z'], {}), '(fine_z)\n', (44223, 44231), True, 'import numpy as np\n'), ((44237, 44256), 'numpy.imag', 'np.imag', (['fine_z[-1]'], {}), '(fine_z[-1])\n', (44244, 44256), True, 'import numpy as np\n'), ((44257, 44272), 'numpy.imag', 'np.imag', (['fine_z'], {}), '(fine_z)\n', (44264, 44272), True, 'import numpy as np\n'), ((45186, 45200), 'numpy.abs', 'np.abs', (['gain_z'], {}), '(gain_z)\n', (45192, 45200), True, 'import numpy as np\n'), ((45222, 45242), 'numpy.abs', 'np.abs', (['fine_z_derot'], {}), '(fine_z_derot)\n', (45228, 45242), True, 'import numpy as np\n'), ((2126, 2140), 'numpy.imag', 'np.imag', (['roots'], {}), '(roots)\n', (2133, 2140), True, 'import numpy as np\n'), ((4770, 4788), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (4776, 4788), True, 'import numpy as np\n'), ((6102, 6120), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (6108, 6120), True, 'import numpy as np\n'), ((13940, 13955), 'numpy.min', 'np.min', (['sum_dev'], {}), '(sum_dev)\n', (13946, 13955), True, 'import numpy as np\n'), ((14103, 14118), 'numpy.min', 'np.min', (['sum_dev'], {}), '(sum_dev)\n', (14109, 14118), True, 'import numpy as np\n'), ((27772, 27786), 'numpy.real', 'np.real', (['z_err'], {}), '(z_err)\n', (27779, 27786), True, 'import numpy as np\n'), ((27806, 27820), 'numpy.imag', 'np.imag', (['z_err'], {}), '(z_err)\n', (27813, 27820), True, 'import numpy as np\n'), ((4724, 4742), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (4730, 4742), True, 'import numpy as np\n'), ((6052, 6070), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (6058, 6070), True, 'import numpy as np\n'), ((21917, 21936), 'numpy.real', 'np.real', (['fine_z_err'], {}), '(fine_z_err)\n', (21924, 21936), True, 'import numpy as np\n'), ((21937, 21956), 'numpy.imag', 'np.imag', (['fine_z_err'], {}), '(fine_z_err)\n', (21944, 21956), True, 'import numpy as np\n'), ((28514, 28528), 'numpy.abs', 'np.abs', (['fine_z'], {}), '(fine_z)\n', (28520, 28528), True, 'import numpy as np\n'), ((21785, 21800), 'numpy.real', 'np.real', (['fine_z'], {}), '(fine_z)\n', (21792, 21800), True, 'import numpy as np\n'), ((21801, 21816), 'numpy.imag', 'np.imag', (['fine_z'], {}), '(fine_z)\n', (21808, 21816), True, 'import numpy as np\n')] |
from concurrent.futures import ThreadPoolExecutor, wait
import traceback
import os
import sys
import time
nworkers = int(sys.argv[1])
n = 40000
nruns = 11
import numpy as np
import scipy.sparse as sparse
import scipy.sparse.linalg as sla
from test_data import discrete_laplacian
base_array = discrete_laplacian(n)
#print(sla.eigsh(base_array, 25, which = 'LM')[0])
# Store underlying buffers as memoryviews for handoff
# to different VECs.
data = memoryview(base_array.data)
indices = memoryview(base_array.indices)
indptr = memoryview(base_array.indptr)
np.random.seed(0)
v0 = memoryview(np.random.rand(n))
pool = ThreadPoolExecutor(max_workers = nworkers)
#print("starting")
for i in range(nruns):
def call_arpack(i):
try:
a = sparse.csr_matrix((data, indices, indptr), shape = (n, n))
eig = sla.eigsh(a, 25, which = 'LM', v0 = np.asarray(v0))
except:
traceback.print_exc()
raise
start = time.perf_counter()
futures = [pool.submit(call_arpack, i) for i in range(nworkers)]
wait(futures)
stop = time.perf_counter()
print(stop - start, flush = True)
| [
"numpy.random.rand",
"concurrent.futures.ThreadPoolExecutor",
"test_data.discrete_laplacian",
"numpy.asarray",
"time.perf_counter",
"scipy.sparse.csr_matrix",
"numpy.random.seed",
"concurrent.futures.wait",
"traceback.print_exc"
] | [((297, 318), 'test_data.discrete_laplacian', 'discrete_laplacian', (['n'], {}), '(n)\n', (315, 318), False, 'from test_data import discrete_laplacian\n'), ((562, 579), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (576, 579), True, 'import numpy as np\n'), ((623, 663), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'nworkers'}), '(max_workers=nworkers)\n', (641, 663), False, 'from concurrent.futures import ThreadPoolExecutor, wait\n'), ((596, 613), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (610, 613), True, 'import numpy as np\n'), ((971, 990), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (988, 990), False, 'import time\n'), ((1064, 1077), 'concurrent.futures.wait', 'wait', (['futures'], {}), '(futures)\n', (1068, 1077), False, 'from concurrent.futures import ThreadPoolExecutor, wait\n'), ((1089, 1108), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1106, 1108), False, 'import time\n'), ((761, 817), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['(data, indices, indptr)'], {'shape': '(n, n)'}), '((data, indices, indptr), shape=(n, n))\n', (778, 817), True, 'import scipy.sparse as sparse\n'), ((918, 939), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (937, 939), False, 'import traceback\n'), ((874, 888), 'numpy.asarray', 'np.asarray', (['v0'], {}), '(v0)\n', (884, 888), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Project: silx (originally pyFAI)
# https://github.com/silx-kit/silx
#
# Copyright (C) 2012-2017 European Synchrotron Radiation Facility, Grenoble, France
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "02/08/2016"
import unittest
import numpy
import logging
logger = logging.getLogger(__name__)
from ..bilinear import BilinearImage
class TestBilinear(unittest.TestCase):
"""basic maximum search test"""
N = 1000
def test_max_search_round(self):
"""test maximum search using random points: maximum is at the pixel center"""
a = numpy.arange(100) - 40.
b = numpy.arange(100) - 60.
ga = numpy.exp(-a * a / 4000)
gb = numpy.exp(-b * b / 6000)
gg = numpy.outer(ga, gb)
b = BilinearImage(gg)
ok = 0
for s in range(self.N):
i, j = numpy.random.randint(100), numpy.random.randint(100)
k, l = b.local_maxi((i, j))
if abs(k - 40) > 1e-4 or abs(l - 60) > 1e-4:
logger.warning("Wrong guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
else:
logger.debug("Good guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
ok += 1
logger.debug("Success rate: %.1f", 100. * ok / self.N)
self.assertEqual(ok, self.N, "Maximum is always found")
def test_max_search_half(self):
"""test maximum search using random points: maximum is at a pixel edge"""
a = numpy.arange(100) - 40.5
b = numpy.arange(100) - 60.5
ga = numpy.exp(-a * a / 4000)
gb = numpy.exp(-b * b / 6000)
gg = numpy.outer(ga, gb)
b = BilinearImage(gg)
ok = 0
for s in range(self.N):
i, j = numpy.random.randint(100), numpy.random.randint(100)
k, l = b.local_maxi((i, j))
if abs(k - 40.5) > 0.5 or abs(l - 60.5) > 0.5:
logger.warning("Wrong guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
else:
logger.debug("Good guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
ok += 1
logger.debug("Success rate: %.1f", 100. * ok / self.N)
self.assertEqual(ok, self.N, "Maximum is always found")
def test_map(self):
N = 100
y, x = numpy.ogrid[:N, :N + 10]
img = x + y
b = BilinearImage(img)
x2d = numpy.zeros_like(y) + x
y2d = numpy.zeros_like(x) + y
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img).max(), 0, "images are the same (corners)")
x2d = numpy.zeros_like(y) + (x[:, :-1] + 0.5)
y2d = numpy.zeros_like(x[:, :-1]) + y
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img[:, :-1] - 0.5).max(), 0, "images are the same (middle)")
x2d = numpy.zeros_like(y[:-1, :]) + (x[:, :-1] + 0.5)
y2d = numpy.zeros_like(x[:, :-1]) + (y[:-1, :] + 0.5)
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img[:-1, 1:]).max(), 0, "images are the same (center)")
def test_profile_grad(self):
N = 100
img = numpy.arange(N * N).reshape(N, N)
b = BilinearImage(img)
res1 = b.profile_line((0, 0), (N - 1, N - 1))
l = numpy.ceil(numpy.sqrt(2) * N)
self.assertEqual(len(res1), l, "Profile has correct length")
self.assertLess((res1[:-2] - res1[1:-1]).std(), 1e-3, "profile is linear (excluding last point)")
def test_profile_gaus(self):
N = 100
x = numpy.arange(N) - N // 2.0
g = numpy.exp(-x * x / (N * N))
img = numpy.outer(g, g)
b = BilinearImage(img)
res_hor = b.profile_line((N // 2, 0), (N // 2, N - 1))
res_ver = b.profile_line((0, N // 2), (N - 1, N // 2))
self.assertEqual(len(res_hor), N, "Profile has correct length")
self.assertEqual(len(res_ver), N, "Profile has correct length")
self.assertLess(abs(res_hor - g).max(), 1e-5, "correct horizontal profile")
self.assertLess(abs(res_ver - g).max(), 1e-5, "correct vertical profile")
# Profile with linewidth=3
expected_profile = img[:, N // 2 - 1:N // 2 + 2].mean(axis=1)
res_hor = b.profile_line((N // 2, 0), (N // 2, N - 1), linewidth=3)
res_ver = b.profile_line((0, N // 2), (N - 1, N // 2), linewidth=3)
self.assertEqual(len(res_hor), N, "Profile has correct length")
self.assertEqual(len(res_ver), N, "Profile has correct length")
self.assertLess(abs(res_hor - expected_profile).max(), 1e-5,
"correct horizontal profile")
self.assertLess(abs(res_ver - expected_profile).max(), 1e-5,
"correct vertical profile")
def suite():
testsuite = unittest.TestSuite()
testsuite.addTest(TestBilinear("test_max_search_round"))
testsuite.addTest(TestBilinear("test_max_search_half"))
testsuite.addTest(TestBilinear("test_map"))
testsuite.addTest(TestBilinear("test_profile_grad"))
testsuite.addTest(TestBilinear("test_profile_gaus"))
return testsuite
| [
"logging.getLogger",
"unittest.TestSuite",
"numpy.sqrt",
"numpy.exp",
"numpy.random.randint",
"numpy.outer",
"numpy.zeros_like",
"numpy.arange"
] | [((1380, 1407), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1397, 1407), False, 'import logging\n'), ((5872, 5892), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (5890, 5892), False, 'import unittest\n'), ((1744, 1768), 'numpy.exp', 'numpy.exp', (['(-a * a / 4000)'], {}), '(-a * a / 4000)\n', (1753, 1768), False, 'import numpy\n'), ((1782, 1806), 'numpy.exp', 'numpy.exp', (['(-b * b / 6000)'], {}), '(-b * b / 6000)\n', (1791, 1806), False, 'import numpy\n'), ((1820, 1839), 'numpy.outer', 'numpy.outer', (['ga', 'gb'], {}), '(ga, gb)\n', (1831, 1839), False, 'import numpy\n'), ((2636, 2660), 'numpy.exp', 'numpy.exp', (['(-a * a / 4000)'], {}), '(-a * a / 4000)\n', (2645, 2660), False, 'import numpy\n'), ((2674, 2698), 'numpy.exp', 'numpy.exp', (['(-b * b / 6000)'], {}), '(-b * b / 6000)\n', (2683, 2698), False, 'import numpy\n'), ((2712, 2731), 'numpy.outer', 'numpy.outer', (['ga', 'gb'], {}), '(ga, gb)\n', (2723, 2731), False, 'import numpy\n'), ((4667, 4694), 'numpy.exp', 'numpy.exp', (['(-x * x / (N * N))'], {}), '(-x * x / (N * N))\n', (4676, 4694), False, 'import numpy\n'), ((4709, 4726), 'numpy.outer', 'numpy.outer', (['g', 'g'], {}), '(g, g)\n', (4720, 4726), False, 'import numpy\n'), ((1671, 1688), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (1683, 1688), False, 'import numpy\n'), ((1707, 1724), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (1719, 1724), False, 'import numpy\n'), ((2561, 2578), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (2573, 2578), False, 'import numpy\n'), ((2598, 2615), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (2610, 2615), False, 'import numpy\n'), ((3470, 3489), 'numpy.zeros_like', 'numpy.zeros_like', (['y'], {}), '(y)\n', (3486, 3489), False, 'import numpy\n'), ((3508, 3527), 'numpy.zeros_like', 'numpy.zeros_like', (['x'], {}), '(x)\n', (3524, 3527), False, 'import numpy\n'), ((3676, 3695), 'numpy.zeros_like', 'numpy.zeros_like', (['y'], {}), '(y)\n', (3692, 3695), False, 'import numpy\n'), ((3730, 3757), 'numpy.zeros_like', 'numpy.zeros_like', (['x[:, :-1]'], {}), '(x[:, :-1])\n', (3746, 3757), False, 'import numpy\n'), ((3919, 3946), 'numpy.zeros_like', 'numpy.zeros_like', (['y[:-1, :]'], {}), '(y[:-1, :])\n', (3935, 3946), False, 'import numpy\n'), ((3981, 4008), 'numpy.zeros_like', 'numpy.zeros_like', (['x[:, :-1]'], {}), '(x[:, :-1])\n', (3997, 4008), False, 'import numpy\n'), ((4628, 4643), 'numpy.arange', 'numpy.arange', (['N'], {}), '(N)\n', (4640, 4643), False, 'import numpy\n'), ((1936, 1961), 'numpy.random.randint', 'numpy.random.randint', (['(100)'], {}), '(100)\n', (1956, 1961), False, 'import numpy\n'), ((1963, 1988), 'numpy.random.randint', 'numpy.random.randint', (['(100)'], {}), '(100)\n', (1983, 1988), False, 'import numpy\n'), ((2828, 2853), 'numpy.random.randint', 'numpy.random.randint', (['(100)'], {}), '(100)\n', (2848, 2853), False, 'import numpy\n'), ((2855, 2880), 'numpy.random.randint', 'numpy.random.randint', (['(100)'], {}), '(100)\n', (2875, 2880), False, 'import numpy\n'), ((4230, 4249), 'numpy.arange', 'numpy.arange', (['(N * N)'], {}), '(N * N)\n', (4242, 4249), False, 'import numpy\n'), ((4372, 4385), 'numpy.sqrt', 'numpy.sqrt', (['(2)'], {}), '(2)\n', (4382, 4385), False, 'import numpy\n')] |
## http://weinbe58.github.io/QuSpin/generated/quspin.basis.spin_basis_general.html#quspin.basis.spin_basis_general
## https://doi.org/10.1103/PhysRevX.8.021069
## https://doi.org/10.1103/PhysRevX.8.021070
## consider nearest neighbor Ising
from __future__ import print_function, division
from quspin.operators import hamiltonian # operators
from quspin.basis import spin_basis_general # spin basis constructor
import numpy as np # general math functions
def exact_diag(J,Hx,Hz,Lx,Ly):
N_2d = Lx*Ly # number of sites
###### setting up user-defined symmetry transformations for 2d lattice ######
s = np.arange(N_2d) # sites [0,1,2,....]
x = s%Lx # x positions for sites
y = s//Lx # y positions for sites
T_x = (x+1)%Lx + Lx*y # translation along x-direction
T_y = x +Lx*((y+1)%Ly) # translation along y-direction
P_x = x + Lx*(Ly-y-1) # reflection about x-axis
P_y = (Lx-x-1) + Lx*y # reflection about y-axis
Z = -(s+1) # spin inversion
###### setting up bases ######
# basis_2d = spin_basis_general(N=N_2d,S="1/2",pauli=0)
basis_2d = spin_basis_general(N=N_2d,S="1/2",pauli=0,kxblock=(T_x,0),kyblock=(T_y,0))
###### setting up hamiltonian ######
# setting up site-coupling lists
Jzzs = [[J,i,T_x[i]] for i in range(N_2d)]+[[J,i,T_y[i]] for i in range(N_2d)]
Hxs = [[-Hx,i] for i in range(N_2d)]
Hzs = [[-Hz,i] for i in range(N_2d)]
static = [["zz",Jzzs],["x",Hxs],["z",Hzs]]
# build hamiltonian
# H = hamiltonian(static,[],static_fmt="csr",basis=basis_2d,dtype=np.float64)
no_checks = dict(check_symm=False, check_pcon=False, check_herm=False)
H = hamiltonian(static,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks)
# diagonalise H
ene,vec = H.eigsh(time=0.0,which="SA",k=2)
# ene = H.eigsh(time=0.0,which="SA",k=2,return_eigenvectors=False); ene = np.sort(ene)
norm2 = np.linalg.norm(vec[:,0])**2
# calculate uniform magnetization
int_mx = [[1.0,i] for i in range(N_2d)]
int_mz = [[1.0,i] for i in range(N_2d)]
static_mx = [["x",int_mx]]
static_mz = [["z",int_mz]]
op_mx = hamiltonian(static_mx,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0)
op_mz = hamiltonian(static_mz,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0)
mx = (np.conjugate(vec[:,0]).dot(op_mx.dot(vec[:,0])) / norm2).real / N_2d
mz = (np.conjugate(vec[:,0]).dot(op_mz.dot(vec[:,0])) / norm2).real / N_2d
# calculate n.n. sz.sz correlation
int_mz0mz1 = [[1.0,i,T_x[i]] for i in range(N_2d)]+[[1.0,i,T_y[i]] for i in range(N_2d)]
static_mz0mz1 = [["zz",int_mz0mz1]]
op_mz0mz1 = hamiltonian(static_mz0mz1,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0)
mz0mz1 = (np.conjugate(vec[:,0]).dot(op_mz0mz1.dot(vec[:,0])) / norm2).real / N_2d
return ene, mx, mz, mz0mz1
def main():
###### define model parameters ######
Lx, Ly = 4, 4 # linear dimension of 2d lattice
N_2d = Lx*Ly # number of sites
J = 1.0 # AF Ising
# Hz = 2.00 # longitudinal field
Hzs = np.linspace(0.0,4.0,401)
# Hzs = np.linspace(1.99,2.03,41)
Hx = 0.10 # transverse field
for Hz in Hzs:
ene, mx, mz, mz0mz1 = exact_diag(J,Hx,Hz,Lx,Ly)
# print(J,Hz,Hx,Lx,Ly,ene[0]/N_2d,ene[1]/N_2d)
print(J,Hz,Hx,Lx,Ly,ene[0]/N_2d,mx,mz,mz0mz1)
if __name__ == "__main__":
main()
| [
"numpy.conjugate",
"quspin.operators.hamiltonian",
"numpy.linspace",
"quspin.basis.spin_basis_general",
"numpy.linalg.norm",
"numpy.arange"
] | [((611, 626), 'numpy.arange', 'np.arange', (['N_2d'], {}), '(N_2d)\n', (620, 626), True, 'import numpy as np\n'), ((1087, 1172), 'quspin.basis.spin_basis_general', 'spin_basis_general', ([], {'N': 'N_2d', 'S': '"""1/2"""', 'pauli': '(0)', 'kxblock': '(T_x, 0)', 'kyblock': '(T_y, 0)'}), "(N=N_2d, S='1/2', pauli=0, kxblock=(T_x, 0), kyblock=(T_y, 0)\n )\n", (1105, 1172), False, 'from quspin.basis import spin_basis_general\n'), ((1640, 1732), 'quspin.operators.hamiltonian', 'hamiltonian', (['static', '[]'], {'static_fmt': '"""csr"""', 'basis': 'basis_2d', 'dtype': 'np.float64'}), "(static, [], static_fmt='csr', basis=basis_2d, dtype=np.float64,\n **no_checks)\n", (1651, 1732), False, 'from quspin.operators import hamiltonian\n'), ((3110, 3136), 'numpy.linspace', 'np.linspace', (['(0.0)', '(4.0)', '(401)'], {}), '(0.0, 4.0, 401)\n', (3121, 3136), True, 'import numpy as np\n'), ((1893, 1918), 'numpy.linalg.norm', 'np.linalg.norm', (['vec[:, 0]'], {}), '(vec[:, 0])\n', (1907, 1918), True, 'import numpy as np\n'), ((2121, 2217), 'quspin.operators.hamiltonian', 'hamiltonian', (['static_mx', '[]'], {'static_fmt': '"""csr"""', 'basis': 'basis_2d', 'dtype': 'np.float64'}), "(static_mx, [], static_fmt='csr', basis=basis_2d, dtype=np.\n float64, **no_checks)\n", (2132, 2217), False, 'from quspin.operators import hamiltonian\n'), ((2234, 2330), 'quspin.operators.hamiltonian', 'hamiltonian', (['static_mz', '[]'], {'static_fmt': '"""csr"""', 'basis': 'basis_2d', 'dtype': 'np.float64'}), "(static_mz, [], static_fmt='csr', basis=basis_2d, dtype=np.\n float64, **no_checks)\n", (2245, 2330), False, 'from quspin.operators import hamiltonian\n'), ((2681, 2781), 'quspin.operators.hamiltonian', 'hamiltonian', (['static_mz0mz1', '[]'], {'static_fmt': '"""csr"""', 'basis': 'basis_2d', 'dtype': 'np.float64'}), "(static_mz0mz1, [], static_fmt='csr', basis=basis_2d, dtype=np.\n float64, **no_checks)\n", (2692, 2781), False, 'from quspin.operators import hamiltonian\n'), ((2345, 2368), 'numpy.conjugate', 'np.conjugate', (['vec[:, 0]'], {}), '(vec[:, 0])\n', (2357, 2368), True, 'import numpy as np\n'), ((2424, 2447), 'numpy.conjugate', 'np.conjugate', (['vec[:, 0]'], {}), '(vec[:, 0])\n', (2436, 2447), True, 'import numpy as np\n'), ((2800, 2823), 'numpy.conjugate', 'np.conjugate', (['vec[:, 0]'], {}), '(vec[:, 0])\n', (2812, 2823), True, 'import numpy as np\n')] |
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt
# we can generate standard normal
#r = np.random.randn(10000)
# to sample from a distribution with arbitrary distribution,
# scale r
r = 10*np.random.randn(10000) + 5 # mean 5 and standard deviation 10
plt.hist(r,bins=100)
plt.show() | [
"numpy.random.randn",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show"
] | [((290, 311), 'matplotlib.pyplot.hist', 'plt.hist', (['r'], {'bins': '(100)'}), '(r, bins=100)\n', (298, 311), True, 'import matplotlib.pyplot as plt\n'), ((312, 322), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (320, 322), True, 'import matplotlib.pyplot as plt\n'), ((226, 248), 'numpy.random.randn', 'np.random.randn', (['(10000)'], {}), '(10000)\n', (241, 248), True, 'import numpy as np\n')] |
""" Set of classes and methods specific to GE scanning environments
"""
import os
from os.path import join
import sys
import time
import re
import logging
import json
import textwrap
from threading import Thread
from queue import Queue
import numpy as np
import pydicom
import nibabel as nib
import zmq
# regEx for GE style file naming
GE_filePattern = re.compile(r'i\d*.MRDC.\d*')
class GE_DirStructure():
""" Finding the names and paths of series directories in a GE scanning
environment
In GE enviroments, a new folder is created for every series (i.e. each
unique scan). The series folders are typically named like 's###'. While
the number for the first series cannot be predicted, subsequent series
directories tend to (but not necessarily, it turns out) be numbered
sequentially
All of the series directories for a given session (or 'exam' in GE-speak)
are stored in an exam folder, named like 'e###', where the number is
unpredictable. Likewise, each exam folder is stored in a parent folder,
named like 'p###' where the number is unpredictable. The p### directories
are stored in a baseDir which (thankfully) tends to be a fixed path.
So, putting this all together, new series show up in a unique directory
with an absolute path structured like:
[baseDir]/p###/e###/s###
We'll refer to the directory that contains the s### directories
as the 'sessionDir'. So,
sessionDir = [baseDir]/p###/e###
Users must specify the full path to the sessionDir in the `scannerConfig.yaml`
in the pyneal_scanner directory:
`scannerSessionDir: /path/to/session/dir`
"""
def __init__(self, scannerSettings):
""" Initialize the class
Parameters
----------
scannerSettings : object
class attributes represent all of the settings unique to the
current scanning environment (many of them read from
`scannerConfig.yaml`)
See Also
--------
general_utils.ScannerSettings
"""
# initialize the class attributes
if 'scannerSessionDir' in scannerSettings.allSettings:
self.sessionDir = scannerSettings.allSettings['scannerSessionDir']
else:
raise Exception("""
No scannerSessionDir found in scannerConfig.yaml file. Please update that file and try again
""")
# make sure sessionDir exists
if not os.path.isdir(self.sessionDir):
raise Exception(f"""
The specified session dir does not exist:
{self.sessionDir}
Please update the scannerConfig.yaml file with a valid directory and try again
""")
self.seriesDirs = None
def print_currentSeries(self):
""" Find all of the series dirs in current sessionDir, and print them
all, along with time since last modification, and directory size
"""
# get a list of all series dirs in the sessionDir
seriesDirs = self._findAllSubdirs(self.sessionDir)
if seriesDirs is not None:
# sort based on modification time
seriesDirs = sorted(seriesDirs, key=lambda x: x[1])
# print directory info to the screen
print('Existing Series Dirs: ')
currentTime = int(time.time())
for s in seriesDirs:
# get the info from this series dir
dirName = s[0].split('/')[-1]
# calculate & format directory size
dirSize = sum([os.path.getsize(join(s[0], f)) for f in os.listdir(s[0])])
if dirSize < 1000:
size_string = '{:5.1f} bytes'.format(dirSize)
elif 1000 <= dirSize < 1000000:
size_string = '{:5.1f} kB'.format(dirSize / 1000)
elif 1000000 <= dirSize:
size_string = '{:5.1f} MB'.format(dirSize / 1000000)
# calculate time (in mins/secs) since it was modified
mTime = s[1]
timeElapsed = currentTime - mTime
m, s = divmod(timeElapsed, 60)
time_string = '{} min, {} s ago'.format(int(m), int(s))
print(' {}\t{}\t{}'.format(dirName, size_string, time_string))
print('\n')
def _findAllSubdirs(self, parentDir):
""" Return a list of all subdirectories within the specified
parentDir, along with the modification time for each
Parameters
----------
parentDir : string
full path to the parent directory you want to search
Returns
-------
subDirs : list
each item in `subDirs` is itself a list containing 2-items for each
subdirectory in the `parentDir`. Each nested list will contain the
path to the subdirectory and the last modification time for that
directory. Thus, `subDirs` is structured like:
[[subDir_path, subDir_modTime]]
"""
subDirs = [join(parentDir, d) for d in os.listdir(parentDir) if os.path.isdir(join(parentDir, d))]
if not subDirs:
subDirs = None
else:
# add the modify time for each directory
subDirs = [[path, os.stat(path).st_mtime] for path in subDirs]
# return the subdirectories
return subDirs
def waitForSeriesDir(self, interval=.1):
""" Listen for the creation of a new series directory.
Once a scan starts, a new series directory will be created
in the `sessionDir`. By the time this function is called, this
class should already have the `sessionDir` defined
Parameters
----------
interval : float, optional
time, in seconds, to wait between polling for a new directory
Returns
-------
seriesDir : string
full path to the newly created directory
"""
startTime = int(time.time()) # tag the start time
keepWaiting = True
while keepWaiting:
# obtain a list of all directories in sessionDir
childDirs = [join(self.sessionDir, d) for d in os.listdir(self.sessionDir) if os.path.isdir(join(self.sessionDir, d))]
# loop through all dirs, check modification time
for thisDir in childDirs:
thisDir_mTime = os.path.getmtime(thisDir)
if thisDir_mTime > startTime:
seriesDir = thisDir
keepWaiting = False
break
# pause before searching directories again
time.sleep(interval)
# return the found series directory
return seriesDir
def get_seriesDirs(self):
""" Build a list that contains the directory names of all of the
series directories currently in the `sessionDir`. Set the class
attribute for `seriesDirs`
Returns
-------
seriesDirs : list
list of all series directories (directory names ONLY) found within
the current `sessionDir`
"""
# get a list of all sub dirs in the sessionDir
subDirs = self._findAllSubdirs(self.sessionDir)
if subDirs is not None:
# extract just the dirname from subDirs and append to a list
self.seriesDirs = []
for d in subDirs:
self.seriesDirs.append(d[0].split('/')[-1])
else:
self.seriesDirs = None
return self.seriesDirs
def get_sessionDir(self):
""" Return the current `sessionDir` for the current session """
return self.sessionDir
class GE_BuildNifti():
""" Tools to build a 3D or 4D Nifti image from all of the dicom slice
images in a directory.
Input is a path to a series directory containing dicom slices. Image
parameters, like voxel spacing and dimensions, are obtained automatically
from info in the dicom tags
End result is a Nifti1 formatted 3D (anat) or 4D (func) file in RAS+
orientation.
"""
def __init__(self, seriesDir):
""" Initialize class, and set/obtain basic class attributes like file
paths and scan parameters
Parameters
----------
seriesDir : string
full path to the directory containing the raw dicom slices you
want to build a Nifti image from
"""
# initialize attributes
self.seriesDir = seriesDir
self.niftiImage = None
self.affine = None
self.pixelSpacing = None # pixel spacing attribute from dicom tag
self.firstSlice_IOP = None # first slice ImageOrientationPatient tag
self.firstSlice_IPP = None # first slice ImagePositionPatient tag
self.lastSlice_IPP = None # last slice ImagePositionPatient tag
self.nSlicesPerVol = None # number of slices per volume
# make a list of all of the dicoms in this dir
self.rawDicoms = [f for f in os.listdir(self.seriesDir) if GE_filePattern.match(f)]
# figure out what type of image this is, 4d or 3d
self.scanType = self._determineScanType(self.rawDicoms[0])
if self.scanType == 'anat':
self.niftiImage = self.buildAnat(self.rawDicoms)
elif self.scanType == 'func':
self.niftiImage = self.buildFunc(self.rawDicoms)
def buildAnat(self, dicomFiles):
""" Build a 3D structural/anatomical image from list of dicom files
Given a list of `dicomFiles`, build a 3D anatomical image from them.
Figure out the image dimensions and affine transformation to map
from voxels to mm from the dicom tags
Parameters
----------
dicomFiles : list
list containing the file names (file names ONLY, no path) of all
dicom slice images to be used in constructing the final nifti image
Returns
-------
anatImage_RAS : Nifti1Image
nifti-1 formated image of the 3D anatomical data, oriented in
RAS+
See Also
--------
nibabel.nifti1.Nifti1Image()
"""
# read the first dicom in the list to get overall image dimensions
dcm = pydicom.dcmread(join(self.seriesDir, dicomFiles[0]),
stop_before_pixels=1)
sliceDims = (getattr(dcm, 'Columns'), getattr(dcm, 'Rows'))
self.nSlicesPerVol = getattr(dcm, 'ImagesInAcquisition')
sliceThickness = getattr(dcm, 'SliceThickness')
voxSize = getattr(dcm, 'PixelSpacing')
### Build 3D array of voxel data
# create an empty array to store the slice data
imageMatrix = np.zeros(shape=(
sliceDims[0],
sliceDims[1],
self.nSlicesPerVol),
dtype='int16')
# With anatomical data, the dicom tag 'InStackPositionNumber'
# seems to correspond to the slice index (one-based indexing).
# But with anatomical data, there are 'InStackPositionNumbers'
# that may start at 2, and go past the total number of slices.
# To correct, we first pull out all of the InStackPositionNumbers,
# and create a dictionary with InStackPositionNumbers:dicomPath
# keys. Sort by the position numbers, and assemble the image in order
sliceDict = {}
for s in dicomFiles:
dcm = pydicom.dcmread(join(self.seriesDir, s))
sliceDict[dcm.InStackPositionNumber] = join(self.seriesDir, s)
# sort by InStackPositionNumber and assemble the image
for sliceIdx, ISPN in enumerate(sorted(sliceDict.keys())):
dcm = pydicom.dcmread(sliceDict[ISPN])
# grab the slices necessary for creating the affine transformation
if sliceIdx == 0:
firstSliceDcm = dcm
if sliceIdx == self.nSlicesPerVol - 1:
lastSliceDcm = dcm
# extract the pixel data as a numpy array. Transpose
# so that the axes order go [cols, rows]
pixel_array = dcm.pixel_array.T
# place in the image matrix
imageMatrix[:, :, sliceIdx] = pixel_array
### create the affine transformation to map from vox to mm space
# in order to do this, we need to get some values from the first and
# last slices in the volume.
firstSlice = sliceDict[sorted(sliceDict.keys())[0]]
lastSlice = sliceDict[sorted(sliceDict.keys())[-1]]
dcm_first = pydicom.dcmread(firstSlice)
dcm_last = pydicom.dcmread(lastSlice)
self.pixelSpacing = getattr(dcm_first, 'PixelSpacing')
self.firstSlice_IOP = np.array(getattr(dcm_first,
'ImageOrientationPatient'))
self.firstSlice_IPP = np.array(getattr(dcm_first,
'ImagePositionPatient'))
self.lastSlice_IPP = np.array(getattr(dcm_last,
'ImagePositionPatient'))
# now we can build the affine
affine = self.buildAffine()
### Build a Nifti object, reorder it to RAS+
anatImage = nib.Nifti1Image(imageMatrix, affine=affine)
anatImage_RAS = nib.as_closest_canonical(anatImage) # reoder to RAS+
print('Nifti image dims: {}'.format(anatImage_RAS.shape))
return anatImage_RAS
def buildFunc(self, dicomFiles):
""" Build a 4D functional image from list of dicom files
Given a list of `dicomFiles`, build a 4D functional image from them.
Figure out the image dimensions and affine transformation to map
from voxels to mm from the dicom tags
Parameters
----------
dicomFiles : list
list containing the file names (file names ONLY, no path) of all
dicom slice images to be used in constructing the final nifti image
Returns
-------
funcImage_RAS : Nifti1Image
nifti-1 formated image of the 4D functional data, oriented in
RAS+
See Also
--------
nibabel.nifti1.Nifti1Image()
"""
# read the first dicom in the list to get overall image dimensions
dcm = pydicom.dcmread(join(self.seriesDir, dicomFiles[0]),
stop_before_pixels=1)
sliceDims = (getattr(dcm, 'Columns'),
getattr(dcm, 'Rows'))
self.nSlicesPerVol = getattr(dcm, 'ImagesInAcquisition')
nVols = getattr(dcm, 'NumberOfTemporalPositions')
sliceThickness = getattr(dcm, 'SliceThickness')
voxSize = getattr(dcm, 'PixelSpacing')
TR = getattr(dcm, 'RepetitionTime') / 1000
### Build 4D array of voxel data
# create an empty array to store the slice data
imageMatrix = np.zeros(shape=(
sliceDims[0],
sliceDims[1],
self.nSlicesPerVol,
nVols), dtype='int16')
print('Nifti image dims: {}'.format(imageMatrix.shape))
### Assemble 4D matrix
# loop over every dicom file
for s in dicomFiles:
# read in the dcm file
dcm = pydicom.dcmread(join(self.seriesDir, s))
# The dicom tag 'InStackPositionNumber' will tell
# what slice number within a volume this dicom is.
# Note: InStackPositionNumber uses one-based indexing
sliceIdx = getattr(dcm, 'InStackPositionNumber') - 1
# Get the tags needed for the affine transform, if this is
# either the first or last slice
if sliceIdx == 0 and self.firstSlice_IOP is None:
self.pixelSpacing = getattr(dcm, 'PixelSpacing')
self.firstSlice_IOP = np.array(getattr(dcm,
'ImageOrientationPatient'))
self.firstSlice_IPP = np.array(getattr(dcm,
'ImagePositionPatient'))
if sliceIdx == (self.nSlicesPerVol - 1) and self.lastSlice_IPP is None:
self.lastSlice_IPP = np.array(getattr(dcm,
'ImagePositionPatient'))
# We can figure out the volume index using the dicom
# tags "InstanceNumber" (# out of all images), and
# "ImagesInAcquisition" (# of slices in a single vol).
# Divide InstanceNumber by ImagesInAcquisition and drop
# the remainder. Note: InstanceNumber is also one-based index
instanceIdx = getattr(dcm, 'InstanceNumber') - 1
volIdx = int(np.floor(instanceIdx / self.nSlicesPerVol))
# We need our data to be an array that is indexed like [x,y,z,t],
# so we need to transpose each slice from [row,col] to [col,row]
# before adding to the full dataset
imageMatrix[:, :, sliceIdx, volIdx] = dcm.pixel_array.T
### create the affine transformation to map from vox to mm space
affine = self.buildAffine()
### Build a Nifti object, reorder it to RAS+
funcImage = nib.Nifti1Image(imageMatrix, affine=affine)
funcImage_RAS = nib.as_closest_canonical(funcImage) # reoder to RAS+
# add the correct TR to the header
pixDims = np.array(funcImage_RAS.header.get_zooms())
pixDims[3] = TR
funcImage_RAS.header.set_zooms(pixDims)
return funcImage_RAS
def buildAffine(self):
""" Build the affine matrix that will transform the data to RAS+.
This function should only be called once the required data has been
extracted from the dicom tags from the relevant slices. The affine
matrix is constructed by using the information in the
ImageOrientationPatient and ImagePositionPatient tags from the first
and last slices in a volume.
However, note that those tags will tell you how to orient the image to
DICOM reference coordinate space, which is LPS+. In order to to get to
RAS+ we have to invert the first two axes.
Notes
-----
For more info on building this affine, please see the documentation at:
http://nipy.org/nibabel/dicom/dicom_orientation.html
http://nipy.org/nibabel/coordinate_systems.html
"""
### Get the ImageOrientation values from the first slice,
# split the row-axis values (0:3) and col-axis values (3:6)
# and then invert the first and second values of each
rowAxis_orient = self.firstSlice_IOP[0:3] * np.array([-1, -1, 1])
colAxis_orient = self.firstSlice_IOP[3:6] * np.array([-1, -1, 1])
### Get the voxel size along Row and Col axis
voxSize_row = float(self.pixelSpacing[0])
voxSize_col = float(self.pixelSpacing[1])
### Figure out the change along the 3rd axis by subtracting the
# ImagePosition of the last slice from the ImagePosition of the first,
# then dividing by 1/(total number of slices-1), then invert to
# make it go from LPS+ to RAS+
slAxis_orient = (self.firstSlice_IPP - self.lastSlice_IPP) / (1 - self.nSlicesPerVol)
slAxis_orient = slAxis_orient * np.array([-1, -1, 1])
### Invert the first two values of the firstSlice ImagePositionPatient.
# This tag represents the translation needed to take the origin of our 3D voxel
# array to the origin of the LPS+ reference coordinate system. Since we want
# RAS+, need to invert those first two axes
voxTranslations = self.firstSlice_IPP * np.array([-1, -1, 1])
### Assemble the affine matrix
affine = np.matrix([
[rowAxis_orient[0] * voxSize_row, colAxis_orient[0] * voxSize_col, slAxis_orient[0], voxTranslations[0]],
[rowAxis_orient[1] * voxSize_row, colAxis_orient[1] * voxSize_col, slAxis_orient[1], voxTranslations[1]],
[rowAxis_orient[2] * voxSize_row, colAxis_orient[2] * voxSize_col, slAxis_orient[2], voxTranslations[2]],
[0, 0, 0, 1]])
return affine
def _determineScanType(self, sliceDcm):
""" Figure out what type of scan this is, anat or func
This tool will determine the scan type from a given dicom file.
Possible scan types are either single 3D volume (anat), or a 4D dataset
built up of 2D slices (func). The scan type is determined by reading
the `MRAcquisitionType` tag from the dicom file
Parameters
----------
sliceDcm : string
file name of slice dicom file from the current session that you
would like to open to read the imaging parameters from
Returns
-------
scanType : string
either 'anat' or 'func' depending on scan type stored in dicom tag
"""
# read the dicom file
dcm = pydicom.dcmread(join(self.seriesDir, sliceDcm),
stop_before_pixels=1)
if getattr(dcm, 'MRAcquisitionType') == '3D':
scanType = 'anat'
elif getattr(dcm, 'MRAcquisitionType') == '2D':
scanType = 'func'
else:
print('Cannot determine a scan type from this image!')
sys.exit()
return scanType
def get_scanType(self):
""" Return the scan type """
return self.scanType
def get_niftiImage(self):
""" Return the constructed Nifti Image """
return self.niftiImage
def write_nifti(self, outputPath):
""" Write the nifti file to disk
Parameters
----------
outputPath : string
full path, including filename, you want to use to save the nifti
image
"""
nib.save(self.niftiImage, outputPath)
print('Image saved at: {}'.format(outputPath))
class GE_monitorSeriesDir(Thread):
""" Class to monitor for new slices images to appear in the seriesDir.
This class will run independently in a separate thread, monitoring a
specified directory for the appearance of new dicom slice files. Each new
dicom slice file that appears will be added to the Queue for further
processing
"""
def __init__(self, seriesDir, dicomQ, interval=.2):
""" Initialize the class, and set basic class attributes
Parameters
----------
seriesDir : string
full path to the series directory where new dicom files will appear
dicomQ : object
instance of python queue class to hold new dicom files before they
have been processed. This class will add items to that queue.
interval : float, optional
time, in seconds, to wait before repolling the seriesDir to check
for any new files
"""
# start the thead upon creation
Thread.__init__(self)
# set up logger
self.logger = logging.getLogger(__name__)
# initialize class parameters
self.interval = interval # interval for polling for new files (s)
self.seriesDir = seriesDir # full path to series directory
self.dicomQ = dicomQ # queue to store dicom files
self.alive = True # thread status
self.numSlicesAdded = 0 # counter to keep track of # of slices added overall
self.queued_dicom_files = set() # empty set to store names of files placed on queue
def run(self):
# function that loops while the Thead is still alive
while self.alive:
# create a set of all dicoms currently in the series directory
currentDicoms = set(os.listdir(self.seriesDir))
# grab only the the dicoms which haven't already been added to the queue
newDicoms = [f for f in currentDicoms if f not in self.queued_dicom_files]
# loop over each of the newDicoms and add them to queue
for f in newDicoms:
dicom_fname = join(self.seriesDir, f)
try:
self.dicomQ.put(dicom_fname)
except:
self.logger.error('failed on: {}'.format(dicom_fname))
print(sys.exc_info())
sys.exit()
if len(newDicoms) > 0:
self.logger.debug('Put {} new slices on the queue'.format(len(newDicoms)))
self.numSlicesAdded += len(newDicoms)
# now update the set of dicoms added to the queue
self.queued_dicom_files.update(set(newDicoms))
# pause
time.sleep(self.interval)
def get_numSlicesAdded(self):
""" Return the cumulative number of slices added to the queue thus far """
return self.numSlicesAdded
def stop(self):
""" Set the `alive` flag to False, stopping thread """
self.alive = False
class GE_processSlice(Thread):
""" Class to process each dicom slice in the dicom queue.
This class will run in it's own separate thread. While running, it will
pull slice file names off of the `dicomQ` and process each slice.
Processing each slice will include reading the dicom file and extracting
the pixel array and any relevant header information. The pixel array from
each slice will be stored in an 4d image matrix. Whenever all of the slices
from a single volume have arrived, that volume will be reformatted
so that its axes correspond to RAS+. The volume, along with a JSON header
containing metadata on that volume, will be sent out over the socket
connection to Pyneal
"""
def __init__(self, dicomQ, pynealSocket, interval=.2):
""" Initialize the class
Parameters
----------
dicomQ : object
instance of python queue class that will store the dicom slice file
names. This class will pull items from that queue.
pynealSocket : object
instance of ZMQ style socket that will be used to communicate with
Pyneal. This class will use this socket to send image data and
headers to Pyneal during the real-time scan.
See also: general_utils.create_pynealSocket()
interval : float, optional
time, in seconds, to wait before repolling the queue to see if
there are any new file names to process
"""
# start the thread upon creation
Thread.__init__(self)
# set up logger
self.logger = logging.getLogger(__name__)
# initialize class parameters
self.dicomQ = dicomQ
self.interval = interval
self.alive = True
self.pynealSocket = pynealSocket
self.totalProcessed = 0 # counter for total number of slices processed
self.volCounter = 0
# parameters we'll build once dicom data starts arriving
self.firstSliceHasArrived = False
self.nSlicesPerVol = None
self.sliceDims = None
self.nVols = None
self.pixelSpacing = None
self.completedSlices = None # store which slices have arrived
self.imageMatrix = None # 4D image matrix where new slices stored
self.affine = None # var to store RAS+ affine, once created
self.firstSlice_IOP = None # first slice ImageOrientationPatient tag
self.firstSlice_IPP = None # first slice ImagePositionPatient tag
self.lastSlice_IPP = None # last slice ImagePositionPatient tag
def run(self):
self.logger.debug('GE_processSlice thread started')
# function to run on loop
while self.alive:
# if there are any slices in the queue, process them
if not self.dicomQ.empty():
numSlicesInQueue = self.dicomQ.qsize()
# loop through all slices currently in queue & process
for s in range(numSlicesInQueue):
dcm_fname = self.dicomQ.get(True, 2) # retrieve the filename from the queue
# ensure the file has copied completely
file_size = 0
while True:
file_info = os.stat(dcm_fname)
if file_info.st_size == 0 or file_info.st_size > file_size:
file_size = file_info.st_size
else:
break
# process this slice
self.processDcmSlice(dcm_fname)
# complete this task, thereby clearing it from the queue
self.dicomQ.task_done()
# log how many were processed
self.totalProcessed += numSlicesInQueue
self.logger.debug('Processed {} tasks from the queue ({} total)'.format(numSlicesInQueue, self.totalProcessed))
# pause for a bit
time.sleep(self.interval)
def processDcmSlice(self, dcm_fname):
""" Process a given dicom slice file
This method will read the slice dicom file, extract the data and
relevant image parameters, and add the image data to the master image
matrix.
Parameters
----------
dcm_fname : string
full path to the dicom slice file that you want to process
"""
# if this is the first slice to have arrived, read the dcm header
# to get relevent information about the series, and to construct
# the imageMatrix and completedSlices table
if not self.firstSliceHasArrived:
self.processFirstSlice(dcm_fname)
# read in the dicom file
dcm = pydicom.dcmread(dcm_fname)
### Get the Slice Number
# The dicom tag 'InStackPositionNumber' will tell
# what slice number within a volume this dicom is.
# Note: InStackPositionNumber uses one-based indexing,
# and we want sliceIdx to reflect 0-based indexing
sliceIdx = getattr(dcm, 'InStackPositionNumber') - 1
### Check if you can build the affine using the information that is
# currently available. We need info from the dicom tags for the first
# and last slice from any of the 3D volumes
if self.affine is None and sliceIdx in [0, (self.nSlicesPerVol - 1)]:
if sliceIdx == 0:
# store the relevent data from the first slice
self.firstSlice_IOP = np.array(getattr(dcm,
'ImageOrientationPatient'))
self.firstSlice_IPP = np.array(getattr(dcm,
'ImagePositionPatient'))
if sliceIdx == (self.nSlicesPerVol - 1):
# store the relevent data from the last slice
self.lastSlice_IPP = np.array(getattr(dcm,
'ImagePositionPatient'))
# See if you have valid values for all required parameters for the affine
if all(x is not None for x in [self.firstSlice_IOP, self.firstSlice_IPP, self.lastSlice_IPP, self.pixelSpacing]):
self.buildAffine()
### Get the volume number
# We can figure out the volume index using the dicom
# tags "InstanceNumber" (# out of all images), and
# the total number of slices.
# Divide InstanceNumber by ImagesInAcquisition and drop
# the remainder. Note: InstanceNumber is also one-based index
volIdx = int(int(getattr(dcm, 'InstanceNumber') - 1) / self.nSlicesPerVol)
### Place pixel data in imageMatrix
# transpose the data from numpy standard [row,col] to [col,row]
self.imageMatrix[:, :, sliceIdx, volIdx] = dcm.pixel_array.T
# update this slice location in completedSlices
self.completedSlices[sliceIdx, volIdx] = True
### Check if full volume is here, and process if so
if self.completedSlices[:, self.volCounter].all():
self.processVolume(self.volCounter)
# increment volCounter
self.volCounter += 1
if self.volCounter >= self.nVols:
self.stop()
def processFirstSlice(self, dcm_fname):
""" Extract relevant scanning parameters from the first slice to arrive
Read the dicom header from the supplied slice to get relevant info
that pertains to the whole scan series. This only needs to be done once
per series. Build the imageMatrix and completedSlice table to store
subsequent slice data as it arrives
Parameters
----------
dcm_fname : string
full path to the dicom slice file you want to read to extract info
from
"""
# Read the header dicom tags only
dcmHdr = pydicom.dcmread(dcm_fname, stop_before_pixels=True)
### Get series parameters from the dicom tags
self.nSlicesPerVol = getattr(dcmHdr, 'ImagesInAcquisition')
self.nVols = getattr(dcmHdr, 'NumberOfTemporalPositions')
self.pixelSpacing = getattr(dcmHdr, 'PixelSpacing')
self.tr = getattr(dcmHdr, 'RepetitionTime') / 1000 # convert to sec
# Note: [cols, rows] to match the order of the transposed pixel_array later on
self.sliceDims = np.array([getattr(dcmHdr, 'Columns'),
getattr(dcmHdr, 'Rows')])
### Build the image matrix and completed slices table
self.imageMatrix = np.zeros(shape=(self.sliceDims[0],
self.sliceDims[1],
self.nSlicesPerVol,
self.nVols), dtype=np.uint16)
self.completedSlices = np.zeros(shape=(self.nSlicesPerVol,
self.nVols), dtype=bool)
self.logger.debug('Incoming 4D series dimensions: {}'.format(self.imageMatrix.shape))
### Update the flow control flag
self.firstSliceHasArrived = True
def buildAffine(self):
""" Build the affine matrix that will transform the data to RAS+.
This function should only be called once the required data has been
extracted from the dicom tags from the relevant slices. The affine
matrix is constructed by using the information in the
ImageOrientationPatient and ImagePositionPatient tags from the first
and last slices in a volume.
However, note that those tags will tell you how to orient the image to
DICOM reference coordinate space, which is LPS+. In order to to get to
RAS+ we have to invert the first two axes.
Notes
-----
For more info on building this affine, please see the documentation at:
http://nipy.org/nibabel/dicom/dicom_orientation.html
http://nipy.org/nibabel/coordinate_systems.html
"""
### Get the ImageOrientation values from the first slice,
# split the row-axis values (0:3) and col-axis values (3:6)
# and then invert the first and second values of each
rowAxis_orient = self.firstSlice_IOP[0:3] * np.array([-1, -1, 1])
colAxis_orient = self.firstSlice_IOP[3:6] * np.array([-1, -1, 1])
### Get the voxel size along Row and Col axis
voxSize_row = float(self.pixelSpacing[0])
voxSize_col = float(self.pixelSpacing[1])
### Figure out the change along the 3rd axis by subtracting the
# ImagePosition of the last slice from the ImagePosition of the first,
# then dividing by 1/(total number of slices-1), then invert to
# make it go from LPS+ to RAS+
slAxis_orient = (self.firstSlice_IPP - self.lastSlice_IPP) / (1 - self.nSlicesPerVol)
slAxis_orient = slAxis_orient * np.array([-1, -1, 1])
### Invert the first two values of the firstSlice ImagePositionPatient.
# This tag represents the translation needed to take the origin of our 3D voxel
# array to the origin of the LPS+ reference coordinate system. Since we want
# RAS+, need to invert those first two axes
voxTranslations = self.firstSlice_IPP * np.array([-1, -1, 1])
### Assemble the affine matrix
self.affine = np.matrix([
[rowAxis_orient[0] * voxSize_row, colAxis_orient[0] * voxSize_col, slAxis_orient[0], voxTranslations[0]],
[rowAxis_orient[1] * voxSize_row, colAxis_orient[1] * voxSize_col, slAxis_orient[1], voxTranslations[1]],
[rowAxis_orient[2] * voxSize_row, colAxis_orient[2] * voxSize_col, slAxis_orient[2], voxTranslations[2]],
[0, 0, 0, 1]])
def processVolume(self, volIdx):
""" Process a single 3D timepoint from the series
Extract the 3D numpy array of voxel data for the current volume (set by
self.volCounter attribute). Reorder the voxel data so that it is RAS+,
build a header JSON object, and then send both the header and the voxel
data out over the socket connection to Pyneal
Parameters
----------
volIdx : int
index (0-based) of the volume you want to process
"""
self.logger.info('Volume {} processing'.format(volIdx))
### Prep the voxel data by extracting this vol from the imageMatrix,
# and then converting to a Nifti1 object in order to set the voxel
# order to RAS+, then get the voxel data as contiguous numpy array
thisVol = self.imageMatrix[:, :, :, volIdx]
thisVol_nii = nib.Nifti1Image(thisVol, self.affine)
thisVol_RAS = nib.as_closest_canonical(thisVol_nii) # make RAS+
thisVol_RAS_data = np.ascontiguousarray(thisVol_RAS.get_fdata())
### Create a header with metadata info
volHeader = {
'volIdx': volIdx,
'TR': str(self.tr),
'dtype': str(thisVol_RAS_data.dtype),
'shape': thisVol_RAS_data.shape,
'affine': json.dumps(thisVol_RAS.affine.tolist())}
### Send the voxel array and header to the pynealSocket
self.sendVolToPynealSocket(volHeader, thisVol_RAS_data)
def sendVolToPynealSocket(self, volHeader, voxelArray):
""" Send the volume data to Pyneal
Send the image data and header information for the specified volume to
Pyneal via the `pynealSocket`.
Parameters
----------
volHeader : dict
key:value pairs for all of the relevant metadata for this volume
voxelArray : numpy array
3D numpy array of voxel data from the volume, reoriented to RAS+
"""
self.logger.debug('TO pynealSocket: vol {}'.format(volHeader['volIdx']))
### Send data out the socket, listen for response
self.pynealSocket.send_json(volHeader, zmq.SNDMORE) # header as json
self.pynealSocket.send(voxelArray, flags=0, copy=False, track=False)
pynealSocketResponse = self.pynealSocket.recv_string()
# log the success
self.logger.debug('FROM pynealSocket: {}'.format(pynealSocketResponse))
def stop(self):
""" set the `alive` flag to False, stopping the thread """
self.alive = False
def GE_launch_rtfMRI(scannerSettings, scannerDirs):
""" Launch a real-time session in a GE environment.
This method should be called from pynealScanner.py before starting the
scanner. Once called, this method will take care of:
- monitoring the `sessionDir` for a new series directory to appear (and
then returing the name of the new series dir)
- set up the `pynealSocket` -- socket connection to send volume data to
Pyneal
- creating a Queue to store newly arriving DICOM files
- start a separate thread to monitor the new `seriesDir`
- start a separate thread to process DICOMs that are in the Queue
Parameters
----------
scannerSettings : object
class attributes represent all of the settings unique to the
current scanning environment (many of them read from
`scannerConfig.yaml`). Returned from `general_utils.ScannerSettings()``
scannerDirs : object
instance of `GE_utils.GE_DirStructure`. Has attributes for the relvant
paths for the current session. `scannerDirs` is one of the variables
returned by running `general_utils.initializeSession()`
See Also
--------
general_utils.ScannerSettings()
general_utils.initializeSession()
"""
# Create a reference to the logger. This assumes the logger has already
# been created and customized by pynealScanner.py
logger = logging.getLogger(__name__)
#### SET UP PYNEAL SOCKET (this is what we'll use to
#### send data (e.g. header, volume voxel data) to remote connections)
# figure out host and port number to use
host = scannerSettings.get_pynealSocketHost()
port = scannerSettings.get_pynealSocketPort()
logger.debug('Pyneal Host: {}'.format(host))
logger.debug('Pyneal Socket Port: {}'.format(port))
# create a socket connection
from .general_utils import create_pynealSocket
pynealSocket = create_pynealSocket(host, port)
logger.debug('Created pynealSocket')
# wait for remote to connect on pynealSocket
logger.info('Connecting to pynealSocket...')
while True:
msg = 'hello from pyneal_scanner'
pynealSocket.send_string(msg)
msgResponse = pynealSocket.recv_string()
if msgResponse == msg:
break
logger.info('pynealSocket connected')
### Wait for a new series directory appear
logger.info('Waiting for new seriesDir...')
seriesDir = scannerDirs.waitForSeriesDir()
logger.info('New Series Directory: {}'.format(seriesDir))
### Start threads to A) watch for new slices, and B) process
# volumes as they appear
# initialize the dicom queue to keep store newly arrived
# dicom slices, and keep track of which have been processed
dicomQ = Queue()
# create instance of class that will monitor seriesDir. Pass in
# a copy of the dicom queue. Start the thread going
scanWatcher = GE_monitorSeriesDir(seriesDir, dicomQ)
scanWatcher.start()
# create an instance of the class that will grab slice dicoms
# from the queue, reformat the data, and pass over the socket
# to pyneal. Start the thread going
sliceProcessor = GE_processSlice(dicomQ, pynealSocket)
sliceProcessor.start()
| [
"logging.getLogger",
"re.compile",
"nibabel.as_closest_canonical",
"time.sleep",
"numpy.array",
"sys.exc_info",
"sys.exit",
"threading.Thread.__init__",
"os.listdir",
"pydicom.dcmread",
"os.path.isdir",
"numpy.matrix",
"nibabel.save",
"numpy.floor",
"nibabel.Nifti1Image",
"os.path.getm... | [((356, 385), 're.compile', 're.compile', (['"""i\\\\d*.MRDC.\\\\d*"""'], {}), "('i\\\\d*.MRDC.\\\\d*')\n", (366, 385), False, 'import re\n'), ((40994, 41021), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (41011, 41021), False, 'import logging\n'), ((42355, 42362), 'queue.Queue', 'Queue', ([], {}), '()\n', (42360, 42362), False, 'from queue import Queue\n'), ((10831, 10910), 'numpy.zeros', 'np.zeros', ([], {'shape': '(sliceDims[0], sliceDims[1], self.nSlicesPerVol)', 'dtype': '"""int16"""'}), "(shape=(sliceDims[0], sliceDims[1], self.nSlicesPerVol), dtype='int16')\n", (10839, 10910), True, 'import numpy as np\n'), ((12732, 12759), 'pydicom.dcmread', 'pydicom.dcmread', (['firstSlice'], {}), '(firstSlice)\n', (12747, 12759), False, 'import pydicom\n'), ((12779, 12805), 'pydicom.dcmread', 'pydicom.dcmread', (['lastSlice'], {}), '(lastSlice)\n', (12794, 12805), False, 'import pydicom\n'), ((13384, 13427), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['imageMatrix'], {'affine': 'affine'}), '(imageMatrix, affine=affine)\n', (13399, 13427), True, 'import nibabel as nib\n'), ((13452, 13487), 'nibabel.as_closest_canonical', 'nib.as_closest_canonical', (['anatImage'], {}), '(anatImage)\n', (13476, 13487), True, 'import nibabel as nib\n'), ((15049, 15139), 'numpy.zeros', 'np.zeros', ([], {'shape': '(sliceDims[0], sliceDims[1], self.nSlicesPerVol, nVols)', 'dtype': '"""int16"""'}), "(shape=(sliceDims[0], sliceDims[1], self.nSlicesPerVol, nVols),\n dtype='int16')\n", (15057, 15139), True, 'import numpy as np\n'), ((17425, 17468), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['imageMatrix'], {'affine': 'affine'}), '(imageMatrix, affine=affine)\n', (17440, 17468), True, 'import nibabel as nib\n'), ((17493, 17528), 'nibabel.as_closest_canonical', 'nib.as_closest_canonical', (['funcImage'], {}), '(funcImage)\n', (17517, 17528), True, 'import nibabel as nib\n'), ((19984, 20344), 'numpy.matrix', 'np.matrix', (['[[rowAxis_orient[0] * voxSize_row, colAxis_orient[0] * voxSize_col,\n slAxis_orient[0], voxTranslations[0]], [rowAxis_orient[1] * voxSize_row,\n colAxis_orient[1] * voxSize_col, slAxis_orient[1], voxTranslations[1]],\n [rowAxis_orient[2] * voxSize_row, colAxis_orient[2] * voxSize_col,\n slAxis_orient[2], voxTranslations[2]], [0, 0, 0, 1]]'], {}), '([[rowAxis_orient[0] * voxSize_row, colAxis_orient[0] *\n voxSize_col, slAxis_orient[0], voxTranslations[0]], [rowAxis_orient[1] *\n voxSize_row, colAxis_orient[1] * voxSize_col, slAxis_orient[1],\n voxTranslations[1]], [rowAxis_orient[2] * voxSize_row, colAxis_orient[2\n ] * voxSize_col, slAxis_orient[2], voxTranslations[2]], [0, 0, 0, 1]])\n', (19993, 20344), True, 'import numpy as np\n'), ((22069, 22106), 'nibabel.save', 'nib.save', (['self.niftiImage', 'outputPath'], {}), '(self.niftiImage, outputPath)\n', (22077, 22106), True, 'import nibabel as nib\n'), ((23171, 23192), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (23186, 23192), False, 'from threading import Thread\n'), ((23240, 23267), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (23257, 23267), False, 'import logging\n'), ((26765, 26786), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (26780, 26786), False, 'from threading import Thread\n'), ((26834, 26861), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (26851, 26861), False, 'import logging\n'), ((30007, 30033), 'pydicom.dcmread', 'pydicom.dcmread', (['dcm_fname'], {}), '(dcm_fname)\n', (30022, 30033), False, 'import pydicom\n'), ((33160, 33211), 'pydicom.dcmread', 'pydicom.dcmread', (['dcm_fname'], {'stop_before_pixels': '(True)'}), '(dcm_fname, stop_before_pixels=True)\n', (33175, 33211), False, 'import pydicom\n'), ((33839, 33946), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.sliceDims[0], self.sliceDims[1], self.nSlicesPerVol, self.nVols)', 'dtype': 'np.uint16'}), '(shape=(self.sliceDims[0], self.sliceDims[1], self.nSlicesPerVol,\n self.nVols), dtype=np.uint16)\n', (33847, 33946), True, 'import numpy as np\n'), ((34082, 34142), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.nSlicesPerVol, self.nVols)', 'dtype': 'bool'}), '(shape=(self.nSlicesPerVol, self.nVols), dtype=bool)\n', (34090, 34142), True, 'import numpy as np\n'), ((36596, 36956), 'numpy.matrix', 'np.matrix', (['[[rowAxis_orient[0] * voxSize_row, colAxis_orient[0] * voxSize_col,\n slAxis_orient[0], voxTranslations[0]], [rowAxis_orient[1] * voxSize_row,\n colAxis_orient[1] * voxSize_col, slAxis_orient[1], voxTranslations[1]],\n [rowAxis_orient[2] * voxSize_row, colAxis_orient[2] * voxSize_col,\n slAxis_orient[2], voxTranslations[2]], [0, 0, 0, 1]]'], {}), '([[rowAxis_orient[0] * voxSize_row, colAxis_orient[0] *\n voxSize_col, slAxis_orient[0], voxTranslations[0]], [rowAxis_orient[1] *\n voxSize_row, colAxis_orient[1] * voxSize_col, slAxis_orient[1],\n voxTranslations[1]], [rowAxis_orient[2] * voxSize_row, colAxis_orient[2\n ] * voxSize_col, slAxis_orient[2], voxTranslations[2]], [0, 0, 0, 1]])\n', (36605, 36956), True, 'import numpy as np\n'), ((37879, 37916), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['thisVol', 'self.affine'], {}), '(thisVol, self.affine)\n', (37894, 37916), True, 'import nibabel as nib\n'), ((37939, 37976), 'nibabel.as_closest_canonical', 'nib.as_closest_canonical', (['thisVol_nii'], {}), '(thisVol_nii)\n', (37963, 37976), True, 'import nibabel as nib\n'), ((2476, 2506), 'os.path.isdir', 'os.path.isdir', (['self.sessionDir'], {}), '(self.sessionDir)\n', (2489, 2506), False, 'import os\n'), ((5108, 5126), 'os.path.join', 'join', (['parentDir', 'd'], {}), '(parentDir, d)\n', (5112, 5126), False, 'from os.path import join\n'), ((6054, 6065), 'time.time', 'time.time', ([], {}), '()\n', (6063, 6065), False, 'import time\n'), ((6715, 6735), 'time.sleep', 'time.sleep', (['interval'], {}), '(interval)\n', (6725, 6735), False, 'import time\n'), ((10386, 10421), 'os.path.join', 'join', (['self.seriesDir', 'dicomFiles[0]'], {}), '(self.seriesDir, dicomFiles[0])\n', (10390, 10421), False, 'from os.path import join\n'), ((11707, 11730), 'os.path.join', 'join', (['self.seriesDir', 's'], {}), '(self.seriesDir, s)\n', (11711, 11730), False, 'from os.path import join\n'), ((11880, 11912), 'pydicom.dcmread', 'pydicom.dcmread', (['sliceDict[ISPN]'], {}), '(sliceDict[ISPN])\n', (11895, 11912), False, 'import pydicom\n'), ((14474, 14509), 'os.path.join', 'join', (['self.seriesDir', 'dicomFiles[0]'], {}), '(self.seriesDir, dicomFiles[0])\n', (14478, 14509), False, 'from os.path import join\n'), ((18881, 18902), 'numpy.array', 'np.array', (['[-1, -1, 1]'], {}), '([-1, -1, 1])\n', (18889, 18902), True, 'import numpy as np\n'), ((18955, 18976), 'numpy.array', 'np.array', (['[-1, -1, 1]'], {}), '([-1, -1, 1])\n', (18963, 18976), True, 'import numpy as np\n'), ((19529, 19550), 'numpy.array', 'np.array', (['[-1, -1, 1]'], {}), '([-1, -1, 1])\n', (19537, 19550), True, 'import numpy as np\n'), ((19905, 19926), 'numpy.array', 'np.array', (['[-1, -1, 1]'], {}), '([-1, -1, 1])\n', (19913, 19926), True, 'import numpy as np\n'), ((21213, 21243), 'os.path.join', 'join', (['self.seriesDir', 'sliceDcm'], {}), '(self.seriesDir, sliceDcm)\n', (21217, 21243), False, 'from os.path import join\n'), ((24920, 24945), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (24930, 24945), False, 'import time\n'), ((29240, 29265), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (29250, 29265), False, 'import time\n'), ((35488, 35509), 'numpy.array', 'np.array', (['[-1, -1, 1]'], {}), '([-1, -1, 1])\n', (35496, 35509), True, 'import numpy as np\n'), ((35562, 35583), 'numpy.array', 'np.array', (['[-1, -1, 1]'], {}), '([-1, -1, 1])\n', (35570, 35583), True, 'import numpy as np\n'), ((36136, 36157), 'numpy.array', 'np.array', (['[-1, -1, 1]'], {}), '([-1, -1, 1])\n', (36144, 36157), True, 'import numpy as np\n'), ((36512, 36533), 'numpy.array', 'np.array', (['[-1, -1, 1]'], {}), '([-1, -1, 1])\n', (36520, 36533), True, 'import numpy as np\n'), ((3377, 3388), 'time.time', 'time.time', ([], {}), '()\n', (3386, 3388), False, 'import time\n'), ((5136, 5157), 'os.listdir', 'os.listdir', (['parentDir'], {}), '(parentDir)\n', (5146, 5157), False, 'import os\n'), ((6231, 6255), 'os.path.join', 'join', (['self.sessionDir', 'd'], {}), '(self.sessionDir, d)\n', (6235, 6255), False, 'from os.path import join\n'), ((6469, 6494), 'os.path.getmtime', 'os.path.getmtime', (['thisDir'], {}), '(thisDir)\n', (6485, 6494), False, 'import os\n'), ((9125, 9151), 'os.listdir', 'os.listdir', (['self.seriesDir'], {}), '(self.seriesDir)\n', (9135, 9151), False, 'import os\n'), ((11631, 11654), 'os.path.join', 'join', (['self.seriesDir', 's'], {}), '(self.seriesDir, s)\n', (11635, 11654), False, 'from os.path import join\n'), ((15493, 15516), 'os.path.join', 'join', (['self.seriesDir', 's'], {}), '(self.seriesDir, s)\n', (15497, 15516), False, 'from os.path import join\n'), ((16925, 16967), 'numpy.floor', 'np.floor', (['(instanceIdx / self.nSlicesPerVol)'], {}), '(instanceIdx / self.nSlicesPerVol)\n', (16933, 16967), True, 'import numpy as np\n'), ((21561, 21571), 'sys.exit', 'sys.exit', ([], {}), '()\n', (21569, 21571), False, 'import sys\n'), ((23991, 24017), 'os.listdir', 'os.listdir', (['self.seriesDir'], {}), '(self.seriesDir)\n', (24001, 24017), False, 'import os\n'), ((24323, 24346), 'os.path.join', 'join', (['self.seriesDir', 'f'], {}), '(self.seriesDir, f)\n', (24327, 24346), False, 'from os.path import join\n'), ((5175, 5193), 'os.path.join', 'join', (['parentDir', 'd'], {}), '(parentDir, d)\n', (5179, 5193), False, 'from os.path import join\n'), ((6265, 6292), 'os.listdir', 'os.listdir', (['self.sessionDir'], {}), '(self.sessionDir)\n', (6275, 6292), False, 'import os\n'), ((5344, 5357), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (5351, 5357), False, 'import os\n'), ((6310, 6334), 'os.path.join', 'join', (['self.sessionDir', 'd'], {}), '(self.sessionDir, d)\n', (6314, 6334), False, 'from os.path import join\n'), ((24578, 24588), 'sys.exit', 'sys.exit', ([], {}), '()\n', (24586, 24588), False, 'import sys\n'), ((28525, 28543), 'os.stat', 'os.stat', (['dcm_fname'], {}), '(dcm_fname)\n', (28532, 28543), False, 'import os\n'), ((3621, 3634), 'os.path.join', 'join', (['s[0]', 'f'], {}), '(s[0], f)\n', (3625, 3634), False, 'from os.path import join\n'), ((3645, 3661), 'os.listdir', 'os.listdir', (['s[0]'], {}), '(s[0])\n', (3655, 3661), False, 'import os\n'), ((24542, 24556), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (24554, 24556), False, 'import sys\n')] |
import time
from numpy import exp, linspace, pi, sin
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models import (Circle, ColumnDataSource, DatetimeAxis,
PanTool, Plot, WheelZoomTool,)
from bokeh.resources import INLINE
from bokeh.util.browser import view
N = 200
x = linspace(-2 * pi, 2 * pi, N)
y = sin(x)*exp(-x)
# Create an array of synthetic times, starting at the current time, and extending 24hrs
times = (linspace(0, 24*3600, N) + time.time()) * 1000
source = ColumnDataSource(data=dict(x=x, y=y, times=times))
plot = Plot(min_border=80, plot_width=800, plot_height=350, background_fill_color="#efefef")
circle = Circle(x="times", y="y", fill_color="red", size=3, line_color=None, fill_alpha=0.5)
plot.add_glyph(source, circle)
plot.add_layout(DatetimeAxis(), 'below')
plot.add_layout(DatetimeAxis(), 'left')
plot.add_tools(PanTool(), WheelZoomTool(zoom_on_axis=False, speed=1/5000.))
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
doc.validate()
filename = "dateaxis.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Date Axis Example"))
print("Wrote %s" % filename)
view(filename)
| [
"bokeh.models.Circle",
"bokeh.models.DatetimeAxis",
"numpy.exp",
"numpy.linspace",
"time.time",
"numpy.sin",
"bokeh.models.Plot",
"bokeh.document.Document",
"bokeh.util.browser.view",
"bokeh.models.PanTool",
"bokeh.embed.file_html",
"bokeh.models.WheelZoomTool"
] | [((332, 360), 'numpy.linspace', 'linspace', (['(-2 * pi)', '(2 * pi)', 'N'], {}), '(-2 * pi, 2 * pi, N)\n', (340, 360), False, 'from numpy import exp, linspace, pi, sin\n'), ((593, 683), 'bokeh.models.Plot', 'Plot', ([], {'min_border': '(80)', 'plot_width': '(800)', 'plot_height': '(350)', 'background_fill_color': '"""#efefef"""'}), "(min_border=80, plot_width=800, plot_height=350, background_fill_color=\n '#efefef')\n", (597, 683), False, 'from bokeh.models import Circle, ColumnDataSource, DatetimeAxis, PanTool, Plot, WheelZoomTool\n'), ((689, 776), 'bokeh.models.Circle', 'Circle', ([], {'x': '"""times"""', 'y': '"""y"""', 'fill_color': '"""red"""', 'size': '(3)', 'line_color': 'None', 'fill_alpha': '(0.5)'}), "(x='times', y='y', fill_color='red', size=3, line_color=None,\n fill_alpha=0.5)\n", (695, 776), False, 'from bokeh.models import Circle, ColumnDataSource, DatetimeAxis, PanTool, Plot, WheelZoomTool\n'), ((970, 980), 'bokeh.document.Document', 'Document', ([], {}), '()\n', (978, 980), False, 'from bokeh.document import Document\n'), ((365, 371), 'numpy.sin', 'sin', (['x'], {}), '(x)\n', (368, 371), False, 'from numpy import exp, linspace, pi, sin\n'), ((372, 379), 'numpy.exp', 'exp', (['(-x)'], {}), '(-x)\n', (375, 379), False, 'from numpy import exp, linspace, pi, sin\n'), ((821, 835), 'bokeh.models.DatetimeAxis', 'DatetimeAxis', ([], {}), '()\n', (833, 835), False, 'from bokeh.models import Circle, ColumnDataSource, DatetimeAxis, PanTool, Plot, WheelZoomTool\n'), ((862, 876), 'bokeh.models.DatetimeAxis', 'DatetimeAxis', ([], {}), '()\n', (874, 876), False, 'from bokeh.models import Circle, ColumnDataSource, DatetimeAxis, PanTool, Plot, WheelZoomTool\n'), ((902, 911), 'bokeh.models.PanTool', 'PanTool', ([], {}), '()\n', (909, 911), False, 'from bokeh.models import Circle, ColumnDataSource, DatetimeAxis, PanTool, Plot, WheelZoomTool\n'), ((913, 964), 'bokeh.models.WheelZoomTool', 'WheelZoomTool', ([], {'zoom_on_axis': '(False)', 'speed': '(1 / 5000.0)'}), '(zoom_on_axis=False, speed=1 / 5000.0)\n', (926, 964), False, 'from bokeh.models import Circle, ColumnDataSource, DatetimeAxis, PanTool, Plot, WheelZoomTool\n'), ((1211, 1225), 'bokeh.util.browser.view', 'view', (['filename'], {}), '(filename)\n', (1215, 1225), False, 'from bokeh.util.browser import view\n'), ((478, 503), 'numpy.linspace', 'linspace', (['(0)', '(24 * 3600)', 'N'], {}), '(0, 24 * 3600, N)\n', (486, 503), False, 'from numpy import exp, linspace, pi, sin\n'), ((504, 515), 'time.time', 'time.time', ([], {}), '()\n', (513, 515), False, 'import time\n'), ((1129, 1172), 'bokeh.embed.file_html', 'file_html', (['doc', 'INLINE', '"""Date Axis Example"""'], {}), "(doc, INLINE, 'Date Axis Example')\n", (1138, 1172), False, 'from bokeh.embed import file_html\n')] |
import sys
import cftime
import numpy as np
import pandas as pd
import pytest
import xarray as xr
# Import from directory structure if coverage test, or from installed
# packages otherwise
if "--cov" in str(sys.argv):
from src.geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average
else:
from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average
dset_a = xr.tutorial.open_dataset("rasm")
dset_b = xr.tutorial.open_dataset("air_temperature")
dset_c = dset_a.copy().rename({"time": "Times"})
dset_encoded = xr.tutorial.open_dataset("rasm", decode_cf=False)
def get_fake_dataset(start_month, nmonths, nlats, nlons):
"""Returns a very simple xarray dataset for testing.
Data values are equal to "month of year" for monthly time steps.
"""
# Create coordinates
months = pd.date_range(start=pd.to_datetime(start_month),
periods=nmonths,
freq="MS")
lats = np.linspace(start=-90, stop=90, num=nlats, dtype="float32")
lons = np.linspace(start=-180, stop=180, num=nlons, dtype="float32")
# Create data variable. Construct a 3D array with time as the first
# dimension.
month_values = np.expand_dims(np.arange(start=1, stop=nmonths + 1),
axis=(1, 2))
var_values = np.tile(month_values, (1, nlats, nlons))
ds = xr.Dataset(
data_vars={
"my_var": (("time", "lat", "lon"), var_values.astype("float32")),
},
coords={
"time": months,
"lat": lats,
"lon": lons
},
)
return ds
def _get_dummy_data(start_date,
end_date,
freq,
nlats,
nlons,
calendar='standard'):
"""Returns a simple xarray dataset to test with.
Data can be hourly, daily, or monthly.
"""
# Coordinates
time = xr.cftime_range(start=start_date,
end=end_date,
freq=freq,
calendar=calendar)
lats = np.linspace(start=-90, stop=90, num=nlats, dtype='float32')
lons = np.linspace(start=-180, stop=180, num=nlons, dtype='float32')
# Create data variable
values = np.expand_dims(np.arange(len(time)), axis=(1, 2))
data = np.tile(values, (1, nlats, nlons))
ds = xr.Dataset(data_vars={'data': (('time', 'lat', 'lon'), data)},
coords={
'time': time,
'lat': lats,
'lon': lons
})
return ds
def test_climatology_invalid_freq():
with pytest.raises(ValueError):
climatology(dset_a, "hourly")
def test_climatology_encoded_time():
with pytest.raises(ValueError):
climatology(dset_encoded, "monthly")
@pytest.mark.parametrize("dataset", [dset_a, dset_b, dset_c["Tair"]])
@pytest.mark.parametrize("freq", ["day", "month", "year", "season"])
def test_climatology_setup(dataset, freq):
computed_dset = climatology(dataset, freq)
assert type(dataset) == type(computed_dset)
@pytest.mark.parametrize("dataset", [dset_a, dset_b, dset_c["Tair"]])
@pytest.mark.parametrize("freq", ["day", "month", "year", "season"])
def test_anomaly_setup(dataset, freq):
computed_dset = anomaly(dataset, freq)
assert type(dataset) == type(computed_dset)
ds1 = get_fake_dataset(start_month="2000-01", nmonths=12, nlats=1, nlons=1)
# Create another dataset for the year 2001.
ds2 = get_fake_dataset(start_month="2001-01", nmonths=12, nlats=1, nlons=1)
# Create a dataset that combines the two previous datasets, for two
# years of data.
ds3 = xr.concat([ds1, ds2], dim="time")
# Create a dataset with the wrong number of months.
partial_year_dataset = get_fake_dataset(start_month="2000-01",
nmonths=13,
nlats=1,
nlons=1)
# Create a dataset with a custom time coordinate.
custom_time_dataset = get_fake_dataset(start_month="2000-01",
nmonths=12,
nlats=1,
nlons=1)
custom_time_dataset = custom_time_dataset.rename({"time": "my_time"})
# Create a more complex dataset just to verify that get_fake_dataset()
# is generally working.
complex_dataset = get_fake_dataset(start_month="2001-01",
nmonths=12,
nlats=10,
nlons=10)
@pytest.mark.parametrize("dataset, season, expected", [(ds1, "JFM", 2.0),
(ds1, "JJA", 7.0)])
def test_month_to_season_returns_middle_month_value(dataset, season, expected):
season_ds = month_to_season(dataset, season)
np.testing.assert_equal(season_ds["my_var"].data, expected)
def test_month_to_season_bad_season_exception():
with pytest.raises(KeyError):
month_to_season(ds1, "TEST")
def test_month_to_season_partial_years_exception():
with pytest.raises(ValueError):
month_to_season(partial_year_dataset, "JFM")
@pytest.mark.parametrize("dataset, season, expected", [(ds1, "NDJ", 11.5)])
def test_month_to_season_final_season_returns_2month_average(
dataset, season, expected):
season_ds = month_to_season(dataset, season)
np.testing.assert_equal(season_ds["my_var"].data, expected)
@pytest.mark.parametrize(
"season",
[
"DJF",
"JFM",
"FMA",
"MAM",
"AMJ",
"MJJ",
"JJA",
"JAS",
"ASO",
"SON",
"OND",
"NDJ",
],
)
def test_month_to_season_returns_one_point_per_year(season):
nyears_of_data = ds3.sizes["time"] / 12
season_ds = month_to_season(ds3, season)
assert season_ds["my_var"].size == nyears_of_data
@pytest.mark.parametrize(
"dataset, time_coordinate, var_name, expected",
[
(custom_time_dataset, "my_time", "my_var", 2.0),
(dset_c.isel(x=110, y=200), None, "Tair", [-10.56, -8.129, -7.125]),
],
)
def test_month_to_season_custom_time_coordinate(dataset, time_coordinate,
var_name, expected):
season_ds = month_to_season(dataset, "JFM", time_coord_name=time_coordinate)
np.testing.assert_almost_equal(season_ds[var_name].data,
expected,
decimal=1)
# Test Datasets For calendar_average() and climatology_average()
minute = _get_dummy_data('2020-01-01', '2021-12-31 23:30:00', '30min', 1, 1)
hourly = _get_dummy_data('2020-01-01', '2021-12-31 23:00:00', 'H', 1, 1)
daily = _get_dummy_data('2020-01-01', '2021-12-31', 'D', 1, 1)
monthly = _get_dummy_data('2020-01-01', '2021-12-01', 'MS', 1, 1)
# Computational Tests for calendar_average()
hour_avg = np.arange(0.5, 35088.5, 2).reshape((365 + 366) * 24, 1, 1)
hour_avg_time = xr.cftime_range('2020-01-01 00:30:00',
'2021-12-31 23:30:00',
freq='H')
min_2_hour_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), hour_avg)},
coords={
'time': hour_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(minute, min_2_hour_avg)])
def test_30min_to_hourly_calendar_average(dset, expected):
result = calendar_average(dset, freq='hour')
xr.testing.assert_equal(result, expected)
day_avg = np.arange(11.5, 17555.5, 24).reshape(366 + 365, 1, 1)
day_avg_time = xr.cftime_range('2020-01-01 12:00:00',
'2021-12-31 12:00:00',
freq='D')
hour_2_day_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), day_avg)},
coords={
'time': day_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(hourly, hour_2_day_avg)])
def test_hourly_to_daily_calendar_average(dset, expected):
result = calendar_average(dset, freq='day')
xr.testing.assert_equal(result, expected)
month_avg = np.array([
15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, 381, 410.5,
440, 470.5, 501, 531.5, 562, 593, 623.5, 654, 684.5, 715
]).reshape(24, 1, 1)
month_avg_time = xr.cftime_range('2020-01-01', '2022-01-01', freq='MS')
month_avg_time = xr.DataArray(np.vstack((month_avg_time[:-1], month_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
day_2_month_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), month_avg)},
coords={
'time': month_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(daily, day_2_month_avg)])
def test_daily_to_monthly_calendar_average(dset, expected):
result = calendar_average(dset, freq='month')
xr.testing.assert_equal(result, expected)
season_avg = np.array([29.5, 105.5, 197.5, 289, 379.5, 470.5, 562.5, 654,
715]).reshape(9, 1, 1)
season_avg_time = xr.cftime_range('2019-12-01', '2022-03-01', freq='QS-DEC')
season_avg_time = xr.DataArray(np.vstack((season_avg_time[:-1], season_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
day_2_season_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), season_avg)},
coords={
'time': season_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
season_avg = np.array(
[0.483333333, 3, 6.010869565, 9, 11.96666667, 15, 18.01086957, 21,
23]).reshape(9, 1, 1)
month_2_season_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), season_avg)},
coords={
'time': season_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(daily, day_2_season_avg),
(monthly, month_2_season_avg)])
def test_daily_monthly_to_seasonal_calendar_average(dset, expected):
result = calendar_average(dset, freq='season')
xr.testing.assert_allclose(result, expected)
year_avg_time = [
cftime.datetime(2020, 7, 2),
cftime.datetime(2021, 7, 2, hour=12)
]
day_2_year_avg = [[[182.5]], [[548]]]
day_2_year_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), day_2_year_avg)},
coords={
'time': year_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
month_2_year_avg = [[[5.513661202]], [[17.5260274]]]
month_2_year_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), month_2_year_avg)},
coords={
'time': year_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(daily, day_2_year_avg),
(monthly, month_2_year_avg)])
def test_daily_monthly_to_yearly_calendar_average(dset, expected):
result = calendar_average(dset, freq='year')
xr.testing.assert_allclose(result, expected)
# Computational Tests for climatology_average()
hour_clim = np.concatenate([np.arange(8784.5, 11616.5, 2),
np.arange(2832.5, 2880.5, 2),
np.arange(11640.5, 26328.5, 2)])\
.reshape(8784, 1, 1)
hour_clim_time = xr.cftime_range('2020-01-01 00:30:00',
'2020-12-31 23:30:00',
freq='H')
min_2_hourly_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), hour_clim)},
coords={
'time': hour_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(minute, min_2_hourly_clim)])
def test_30min_to_hourly_climatology_average(dset, expected):
result = climatology_average(dset, freq='hour')
xr.testing.assert_allclose(result, expected)
day_clim = np.concatenate([np.arange(4403.5, 5819.5, 24),
[1427.5],
np.arange(5831.5, 13175.5, 24)]) \
.reshape(366, 1, 1)
day_clim_time = xr.cftime_range('2020-01-01 12:00:00',
'2020-12-31 12:00:00',
freq='24H')
hour_2_day_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), day_clim)},
coords={
'time': day_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(hourly, hour_2_day_clim)])
def test_hourly_to_daily_climatology_average(dset, expected):
result = climatology_average(dset, freq='day')
xr.testing.assert_equal(result, expected)
month_clim = np.array([
198, 224.5438596, 257.5, 288, 318.5, 349, 379.5, 410.5, 441, 471.5, 502,
532.5
]).reshape(12, 1, 1)
month_clim_time = xr.cftime_range('2020-01-01', '2021-01-01', freq='MS')
month_clim_time = xr.DataArray(np.vstack(
(month_clim_time[:-1], month_clim_time[1:])).T,
dims=['time', 'nbd']).mean(dim='nbd')
day_2_month_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), month_clim)},
coords={
'time': month_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(daily, day_2_month_clim)])
def test_daily_to_monthly_climatology_average(dset, expected):
result = climatology_average(dset, freq='month')
xr.testing.assert_allclose(result, expected)
season_clim = np.array([320.9392265, 380, 288, 471.5]).reshape(4, 1, 1)
season_clim_time = ['DJF', 'JJA', 'MAM', 'SON']
day_2_season_clim = xr.Dataset(
data_vars={'data': (('season', 'lat', 'lon'), season_clim)},
coords={
'season': season_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
season_clim = np.array([10.04972376, 12.01086957, 9, 15]).reshape(4, 1, 1)
month_2_season_clim = xr.Dataset(
data_vars={'data': (('season', 'lat', 'lon'), season_clim)},
coords={
'season': season_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(daily, day_2_season_clim),
(monthly, month_2_season_clim)])
def test_daily_monthly_to_seasonal_climatology_average(dset, expected):
result = climatology_average(dset, freq='season')
xr.testing.assert_allclose(result, expected)
# Argument Tests for climatology_average() and calendar_average()
@pytest.mark.parametrize('freq', ['TEST', None])
def test_invalid_freq_climatology_average(freq):
with pytest.raises(KeyError):
climatology_average(monthly, freq=freq)
@pytest.mark.parametrize('freq', ['TEST', None])
def test_invalid_freq_calendar_average(freq):
with pytest.raises(KeyError):
calendar_average(monthly, freq=freq)
time_dim = 'my_time'
custom_time = daily.rename({'time': time_dim})
custom_time_expected = day_2_month_clim.rename({'time': time_dim})
@pytest.mark.parametrize('dset, expected, time_dim',
[(custom_time, custom_time_expected, time_dim)])
def test_custom_time_coord_climatology_average(dset, expected, time_dim):
result = climatology_average(dset, freq='month', time_dim=time_dim)
xr.testing.assert_allclose(result, expected)
custom_time_expected = day_2_month_avg.rename({'time': time_dim})
@pytest.mark.parametrize('dset, expected, time_dim',
[(custom_time, custom_time_expected, time_dim)])
def test_custom_time_coord_calendar_average(dset, expected, time_dim):
result = calendar_average(dset, freq='month', time_dim=time_dim)
xr.testing.assert_allclose(result, expected)
array = daily['data']
array_expected = day_2_month_clim['data']
@pytest.mark.parametrize('da, expected', [(array, array_expected)])
def test_xr_DataArray_support_climatology_average(da, expected):
result = climatology_average(da, freq='month')
xr.testing.assert_allclose(result, expected)
array_expected = day_2_month_avg['data']
@pytest.mark.parametrize('da, expected', [(array, array_expected)])
def test_xr_DataArray_support_calendar_average(da, expected):
result = calendar_average(da, freq='month')
xr.testing.assert_equal(result, expected)
dset_encoded = xr.tutorial.open_dataset("air_temperature", decode_cf=False)
def test_non_datetime_like_objects_climatology_average():
with pytest.raises(ValueError):
climatology_average(dset_encoded, 'month')
def test_non_datetime_like_objects_calendar_average():
with pytest.raises(ValueError):
calendar_average(dset_encoded, 'month')
time = pd.to_datetime(['2020-01-01', '2020-01-02', '2020-01-04'])
non_uniform = xr.Dataset(data_vars={'data': (('time'), np.arange(3))},
coords={'time': time})
def test_non_uniformly_spaced_data_climatology_average():
with pytest.raises(ValueError):
climatology_average(non_uniform, freq='day')
def test_non_uniformly_spaced_data_calendar_average():
with pytest.raises(ValueError):
calendar_average(non_uniform, freq='day')
julian_daily = _get_dummy_data('2020-01-01',
'2021-12-31',
'D',
1,
1,
calendar='julian')
noleap_daily = _get_dummy_data('2020-01-01',
'2021-12-31',
'D',
1,
1,
calendar='noleap')
all_leap_daily = _get_dummy_data('2020-01-01',
'2021-12-31',
'D',
1,
1,
calendar='all_leap')
day_360_daily = _get_dummy_data('2020-01-01',
'2021-12-30',
'D',
1,
1,
calendar='360_day')
# Daily -> Monthly Climatologies for Julian Calendar
julian_month_clim = np.array([198, 224.54385965, 257.5, 288, 318.5, 349,
379.5, 410.5, 441, 471.5, 502, 532.5])\
.reshape(12, 1, 1)
julian_month_clim_time = xr.cftime_range('2020-01-01',
'2021-01-01',
freq='MS',
calendar='julian')
julian_month_clim_time = xr.DataArray(np.vstack((julian_month_clim_time[:-1], julian_month_clim_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
julian_day_2_month_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), julian_month_clim)},
coords={
'time': julian_month_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Climatologies for NoLeap Calendar
noleap_month_clim = np.array([197.5, 227, 256.5, 287, 317.5, 348,
378.5, 409.5, 440, 470.5, 501, 531.5])\
.reshape(12, 1, 1)
noleap_month_clim_time = xr.cftime_range('2020-01-01',
'2021-01-01',
freq='MS',
calendar='noleap')
noleap_month_clim_time = xr.DataArray(np.vstack((noleap_month_clim_time[:-1], noleap_month_clim_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
noleap_day_2_month_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), noleap_month_clim)},
coords={
'time': noleap_month_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Climatologies for AllLeap Calendar
all_leap_month_clim = np.array([198, 228, 258, 288.5, 319, 349.5,
380, 411, 441.5, 472, 502.5, 533])\
.reshape(12, 1, 1)
all_leap_month_clim_time = xr.cftime_range('2020-01-01',
'2021-01-01',
freq='MS',
calendar='all_leap')
all_leap_month_clim_time = xr.DataArray(np.vstack((all_leap_month_clim_time[:-1], all_leap_month_clim_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
all_leap_day_2_month_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), all_leap_month_clim)},
coords={
'time': all_leap_month_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Climatologies for 360 Day Calendar
day_360_leap_month_clim = np.arange(194.5, 554.5, 30).reshape(12, 1, 1)
day_360_leap_month_clim_time = xr.cftime_range('2020-01-01',
'2021-01-01',
freq='MS',
calendar='360_day')
day_360_leap_month_clim_time = xr.DataArray(np.vstack((day_360_leap_month_clim_time[:-1], day_360_leap_month_clim_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
day_360_leap_day_2_month_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), day_360_leap_month_clim)},
coords={
'time': day_360_leap_month_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected',
[(julian_daily, julian_day_2_month_clim),
(noleap_daily, noleap_day_2_month_clim),
(all_leap_daily, all_leap_day_2_month_clim),
(day_360_daily, day_360_leap_day_2_month_clim)])
def test_non_standard_calendars_climatology_average(dset, expected):
result = climatology_average(dset, freq='month')
xr.testing.assert_allclose(result, expected)
# Daily -> Monthly Means for Julian Calendar
julian_month_avg = np.array([
15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, 381, 410.5,
440, 470.5, 501, 531.5, 562, 593, 623.5, 654, 684.5, 715
]).reshape(24, 1, 1)
julian_month_avg_time = xr.cftime_range('2020-01-01',
'2022-01-01',
freq='MS',
calendar='julian')
julian_month_avg_time = xr.DataArray(np.vstack((julian_month_avg_time[:-1], julian_month_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
julian_day_2_month_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), julian_month_avg)},
coords={
'time': julian_month_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Means for NoLeap Calendar
noleap_month_avg = np.array([
15, 44.5, 74, 104.5, 135, 165.5, 196, 227, 257.5, 288, 318.5, 349, 380,
409.5, 439, 469.5, 500, 530.5, 561, 592, 622.5, 653, 683.5, 714
]).reshape(24, 1, 1)
noleap_month_avg_time = xr.cftime_range('2020-01-01',
'2022-01-01',
freq='MS',
calendar='noleap')
noleap_month_avg_time = xr.DataArray(np.vstack((noleap_month_avg_time[:-1], noleap_month_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
noleap_day_2_month_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), noleap_month_avg)},
coords={
'time': noleap_month_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Means for AllLeap Calendar
all_leap_month_avg = np.array([
15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, 381, 411,
441, 471.5, 502, 532.5, 563, 594, 624.5, 655, 685.5, 716
]).reshape(24, 1, 1)
all_leap_month_avg_time = xr.cftime_range('2020-01-01',
'2022-01-01',
freq='MS',
calendar='all_leap')
all_leap_month_avg_time = xr.DataArray(np.vstack((all_leap_month_avg_time[:-1], all_leap_month_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
all_leap_day_2_month_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), all_leap_month_avg)},
coords={
'time': all_leap_month_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Means for 360 Day Calendar
day_360_leap_month_avg = np.arange(14.5, 734.5, 30).reshape(24, 1, 1)
day_360_leap_month_avg_time = xr.cftime_range('2020-01-01',
'2022-01-01',
freq='MS',
calendar='360_day')
day_360_leap_month_avg_time = xr.DataArray(np.vstack((day_360_leap_month_avg_time[:-1], day_360_leap_month_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
day_360_leap_day_2_month_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), day_360_leap_month_avg)},
coords={
'time': day_360_leap_month_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected',
[(julian_daily, julian_day_2_month_avg),
(noleap_daily, noleap_day_2_month_avg),
(all_leap_daily, all_leap_day_2_month_avg),
(day_360_daily, day_360_leap_day_2_month_avg)])
def test_non_standard_calendars_calendar_average(dset, expected):
result = calendar_average(dset, freq='month')
xr.testing.assert_equal(result, expected)
| [
"geocat.comp.anomaly",
"numpy.testing.assert_equal",
"xarray.concat",
"numpy.array",
"xarray.tutorial.open_dataset",
"geocat.comp.month_to_season",
"numpy.arange",
"pandas.to_datetime",
"xarray.testing.assert_allclose",
"geocat.comp.climatology",
"numpy.testing.assert_almost_equal",
"numpy.lin... | [((450, 482), 'xarray.tutorial.open_dataset', 'xr.tutorial.open_dataset', (['"""rasm"""'], {}), "('rasm')\n", (474, 482), True, 'import xarray as xr\n'), ((492, 535), 'xarray.tutorial.open_dataset', 'xr.tutorial.open_dataset', (['"""air_temperature"""'], {}), "('air_temperature')\n", (516, 535), True, 'import xarray as xr\n'), ((600, 649), 'xarray.tutorial.open_dataset', 'xr.tutorial.open_dataset', (['"""rasm"""'], {'decode_cf': '(False)'}), "('rasm', decode_cf=False)\n", (624, 649), True, 'import xarray as xr\n'), ((2926, 2994), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataset"""', "[dset_a, dset_b, dset_c['Tair']]"], {}), "('dataset', [dset_a, dset_b, dset_c['Tair']])\n", (2949, 2994), False, 'import pytest\n'), ((2996, 3063), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""freq"""', "['day', 'month', 'year', 'season']"], {}), "('freq', ['day', 'month', 'year', 'season'])\n", (3019, 3063), False, 'import pytest\n'), ((3205, 3273), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataset"""', "[dset_a, dset_b, dset_c['Tair']]"], {}), "('dataset', [dset_a, dset_b, dset_c['Tair']])\n", (3228, 3273), False, 'import pytest\n'), ((3275, 3342), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""freq"""', "['day', 'month', 'year', 'season']"], {}), "('freq', ['day', 'month', 'year', 'season'])\n", (3298, 3342), False, 'import pytest\n'), ((3764, 3797), 'xarray.concat', 'xr.concat', (['[ds1, ds2]'], {'dim': '"""time"""'}), "([ds1, ds2], dim='time')\n", (3773, 3797), True, 'import xarray as xr\n'), ((4688, 4785), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataset, season, expected"""', "[(ds1, 'JFM', 2.0), (ds1, 'JJA', 7.0)]"], {}), "('dataset, season, expected', [(ds1, 'JFM', 2.0), (\n ds1, 'JJA', 7.0)])\n", (4711, 4785), False, 'import pytest\n'), ((5297, 5371), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataset, season, expected"""', "[(ds1, 'NDJ', 11.5)]"], {}), "('dataset, season, expected', [(ds1, 'NDJ', 11.5)])\n", (5320, 5371), False, 'import pytest\n'), ((5586, 5709), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""season"""', "['DJF', 'JFM', 'FMA', 'MAM', 'AMJ', 'MJJ', 'JJA', 'JAS', 'ASO', 'SON',\n 'OND', 'NDJ']"], {}), "('season', ['DJF', 'JFM', 'FMA', 'MAM', 'AMJ', 'MJJ',\n 'JJA', 'JAS', 'ASO', 'SON', 'OND', 'NDJ'])\n", (5609, 5709), False, 'import pytest\n'), ((7110, 7181), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01 00:30:00"""', '"""2021-12-31 23:30:00"""'], {'freq': '"""H"""'}), "('2020-01-01 00:30:00', '2021-12-31 23:30:00', freq='H')\n", (7125, 7181), True, 'import xarray as xr\n'), ((7263, 7399), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), hour_avg)}", 'coords': "{'time': hour_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), hour_avg)}, coords={\n 'time': hour_avg_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (7273, 7399), True, 'import xarray as xr\n'), ((7437, 7506), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(minute, min_2_hour_avg)]'], {}), "('dset, expected', [(minute, min_2_hour_avg)])\n", (7460, 7506), False, 'import pytest\n'), ((7742, 7813), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01 12:00:00"""', '"""2021-12-31 12:00:00"""'], {'freq': '"""D"""'}), "('2020-01-01 12:00:00', '2021-12-31 12:00:00', freq='D')\n", (7757, 7813), True, 'import xarray as xr\n'), ((7893, 8027), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), day_avg)}", 'coords': "{'time': day_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), day_avg)}, coords={\n 'time': day_avg_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (7903, 8027), True, 'import xarray as xr\n'), ((8065, 8134), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(hourly, hour_2_day_avg)]'], {}), "('dset, expected', [(hourly, hour_2_day_avg)])\n", (8088, 8134), False, 'import pytest\n'), ((8493, 8547), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01"""', '"""2022-01-01"""'], {'freq': '"""MS"""'}), "('2020-01-01', '2022-01-01', freq='MS')\n", (8508, 8547), True, 'import xarray as xr\n'), ((8743, 8881), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), month_avg)}", 'coords': "{'time': month_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), month_avg)}, coords=\n {'time': month_avg_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (8753, 8881), True, 'import xarray as xr\n'), ((8919, 8988), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(daily, day_2_month_avg)]'], {}), "('dset, expected', [(daily, day_2_month_avg)])\n", (8942, 8988), False, 'import pytest\n'), ((9285, 9343), 'xarray.cftime_range', 'xr.cftime_range', (['"""2019-12-01"""', '"""2022-03-01"""'], {'freq': '"""QS-DEC"""'}), "('2019-12-01', '2022-03-01', freq='QS-DEC')\n", (9300, 9343), True, 'import xarray as xr\n'), ((9544, 9684), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), season_avg)}", 'coords': "{'time': season_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), season_avg)}, coords\n ={'time': season_avg_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (9554, 9684), True, 'import xarray as xr\n'), ((9862, 10002), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), season_avg)}", 'coords': "{'time': season_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), season_avg)}, coords\n ={'time': season_avg_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (9872, 10002), True, 'import xarray as xr\n'), ((10040, 10146), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(daily, day_2_season_avg), (monthly, month_2_season_avg)]'], {}), "('dset, expected', [(daily, day_2_season_avg), (\n monthly, month_2_season_avg)])\n", (10063, 10146), False, 'import pytest\n'), ((10506, 10647), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), day_2_year_avg)}", 'coords': "{'time': year_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), day_2_year_avg)},\n coords={'time': year_avg_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (10516, 10647), True, 'import xarray as xr\n'), ((10755, 10898), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), month_2_year_avg)}", 'coords': "{'time': year_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), month_2_year_avg)},\n coords={'time': year_avg_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (10765, 10898), True, 'import xarray as xr\n'), ((10937, 11039), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(daily, day_2_year_avg), (monthly, month_2_year_avg)]'], {}), "('dset, expected', [(daily, day_2_year_avg), (\n monthly, month_2_year_avg)])\n", (10960, 11039), False, 'import pytest\n'), ((11525, 11596), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01 00:30:00"""', '"""2020-12-31 23:30:00"""'], {'freq': '"""H"""'}), "('2020-01-01 00:30:00', '2020-12-31 23:30:00', freq='H')\n", (11540, 11596), True, 'import xarray as xr\n'), ((11683, 11821), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), hour_clim)}", 'coords': "{'time': hour_clim_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), hour_clim)}, coords=\n {'time': hour_clim_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (11693, 11821), True, 'import xarray as xr\n'), ((11859, 11931), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(minute, min_2_hourly_clim)]'], {}), "('dset, expected', [(minute, min_2_hourly_clim)])\n", (11882, 11931), False, 'import pytest\n'), ((12303, 12376), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01 12:00:00"""', '"""2020-12-31 12:00:00"""'], {'freq': '"""24H"""'}), "('2020-01-01 12:00:00', '2020-12-31 12:00:00', freq='24H')\n", (12318, 12376), True, 'import xarray as xr\n'), ((12460, 12596), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), day_clim)}", 'coords': "{'time': day_clim_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), day_clim)}, coords={\n 'time': day_clim_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (12470, 12596), True, 'import xarray as xr\n'), ((12634, 12704), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(hourly, hour_2_day_clim)]'], {}), "('dset, expected', [(hourly, hour_2_day_clim)])\n", (12657, 12704), False, 'import pytest\n'), ((13016, 13070), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01"""', '"""2021-01-01"""'], {'freq': '"""MS"""'}), "('2020-01-01', '2021-01-01', freq='MS')\n", (13031, 13070), True, 'import xarray as xr\n'), ((13253, 13393), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), month_clim)}", 'coords': "{'time': month_clim_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), month_clim)}, coords\n ={'time': month_clim_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (13263, 13393), True, 'import xarray as xr\n'), ((13431, 13501), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(daily, day_2_month_clim)]'], {}), "('dset, expected', [(daily, day_2_month_clim)])\n", (13454, 13501), False, 'import pytest\n'), ((13809, 13954), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('season', 'lat', 'lon'), season_clim)}", 'coords': "{'season': season_clim_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('season', 'lat', 'lon'), season_clim)},\n coords={'season': season_clim_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (13819, 13954), True, 'import xarray as xr\n'), ((14088, 14233), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('season', 'lat', 'lon'), season_clim)}", 'coords': "{'season': season_clim_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('season', 'lat', 'lon'), season_clim)},\n coords={'season': season_clim_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (14098, 14233), True, 'import xarray as xr\n'), ((14272, 14380), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(daily, day_2_season_clim), (monthly, month_2_season_clim)]'], {}), "('dset, expected', [(daily, day_2_season_clim), (\n monthly, month_2_season_clim)])\n", (14295, 14380), False, 'import pytest\n'), ((14664, 14711), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""freq"""', "['TEST', None]"], {}), "('freq', ['TEST', None])\n", (14687, 14711), False, 'import pytest\n'), ((14846, 14893), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""freq"""', "['TEST', None]"], {}), "('freq', ['TEST', None])\n", (14869, 14893), False, 'import pytest\n'), ((15159, 15263), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected, time_dim"""', '[(custom_time, custom_time_expected, time_dim)]'], {}), "('dset, expected, time_dim', [(custom_time,\n custom_time_expected, time_dim)])\n", (15182, 15263), False, 'import pytest\n'), ((15551, 15655), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected, time_dim"""', '[(custom_time, custom_time_expected, time_dim)]'], {}), "('dset, expected, time_dim', [(custom_time,\n custom_time_expected, time_dim)])\n", (15574, 15655), False, 'import pytest\n'), ((15935, 16001), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""da, expected"""', '[(array, array_expected)]'], {}), "('da, expected', [(array, array_expected)])\n", (15958, 16001), False, 'import pytest\n'), ((16213, 16279), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""da, expected"""', '[(array, array_expected)]'], {}), "('da, expected', [(array, array_expected)])\n", (16236, 16279), False, 'import pytest\n'), ((16453, 16513), 'xarray.tutorial.open_dataset', 'xr.tutorial.open_dataset', (['"""air_temperature"""'], {'decode_cf': '(False)'}), "('air_temperature', decode_cf=False)\n", (16477, 16513), True, 'import xarray as xr\n'), ((16811, 16869), 'pandas.to_datetime', 'pd.to_datetime', (["['2020-01-01', '2020-01-02', '2020-01-04']"], {}), "(['2020-01-01', '2020-01-02', '2020-01-04'])\n", (16825, 16869), True, 'import pandas as pd\n'), ((18543, 18616), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01"""', '"""2021-01-01"""'], {'freq': '"""MS"""', 'calendar': '"""julian"""'}), "('2020-01-01', '2021-01-01', freq='MS', calendar='julian')\n", (18558, 18616), True, 'import xarray as xr\n'), ((18958, 19111), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), julian_month_clim)}", 'coords': "{'time': julian_month_clim_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), julian_month_clim)},\n coords={'time': julian_month_clim_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (18968, 19111), True, 'import xarray as xr\n'), ((19402, 19475), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01"""', '"""2021-01-01"""'], {'freq': '"""MS"""', 'calendar': '"""noleap"""'}), "('2020-01-01', '2021-01-01', freq='MS', calendar='noleap')\n", (19417, 19475), True, 'import xarray as xr\n'), ((19817, 19970), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), noleap_month_clim)}", 'coords': "{'time': noleap_month_clim_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), noleap_month_clim)},\n coords={'time': noleap_month_clim_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (19827, 19970), True, 'import xarray as xr\n'), ((20264, 20339), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01"""', '"""2021-01-01"""'], {'freq': '"""MS"""', 'calendar': '"""all_leap"""'}), "('2020-01-01', '2021-01-01', freq='MS', calendar='all_leap')\n", (20279, 20339), True, 'import xarray as xr\n'), ((20697, 20860), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), all_leap_month_clim)}", 'coords': "{'time': all_leap_month_clim_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), all_leap_month_clim)\n }, coords={'time': all_leap_month_clim_time, 'lat': [-90.0], 'lon': [-\n 180.0]})\n", (20707, 20860), True, 'import xarray as xr\n'), ((21047, 21121), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01"""', '"""2021-01-01"""'], {'freq': '"""MS"""', 'calendar': '"""360_day"""'}), "('2020-01-01', '2021-01-01', freq='MS', calendar='360_day')\n", (21062, 21121), True, 'import xarray as xr\n'), ((21511, 21680), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), day_360_leap_month_clim)}", 'coords': "{'time': day_360_leap_month_clim_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'),\n day_360_leap_month_clim)}, coords={'time': day_360_leap_month_clim_time,\n 'lat': [-90.0], 'lon': [-180.0]})\n", (21521, 21680), True, 'import xarray as xr\n'), ((21715, 21946), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(julian_daily, julian_day_2_month_clim), (noleap_daily,\n noleap_day_2_month_clim), (all_leap_daily, all_leap_day_2_month_clim),\n (day_360_daily, day_360_leap_day_2_month_clim)]'], {}), "('dset, expected', [(julian_daily,\n julian_day_2_month_clim), (noleap_daily, noleap_day_2_month_clim), (\n all_leap_daily, all_leap_day_2_month_clim), (day_360_daily,\n day_360_leap_day_2_month_clim)])\n", (21738, 21946), False, 'import pytest\n'), ((22472, 22545), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01"""', '"""2022-01-01"""'], {'freq': '"""MS"""', 'calendar': '"""julian"""'}), "('2020-01-01', '2022-01-01', freq='MS', calendar='julian')\n", (22487, 22545), True, 'import xarray as xr\n'), ((22902, 23053), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), julian_month_avg)}", 'coords': "{'time': julian_month_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), julian_month_avg)},\n coords={'time': julian_month_avg_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (22912, 23053), True, 'import xarray as xr\n'), ((23353, 23426), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01"""', '"""2022-01-01"""'], {'freq': '"""MS"""', 'calendar': '"""noleap"""'}), "('2020-01-01', '2022-01-01', freq='MS', calendar='noleap')\n", (23368, 23426), True, 'import xarray as xr\n'), ((23783, 23934), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), noleap_month_avg)}", 'coords': "{'time': noleap_month_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), noleap_month_avg)},\n coords={'time': noleap_month_avg_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (23793, 23934), True, 'import xarray as xr\n'), ((24235, 24310), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01"""', '"""2022-01-01"""'], {'freq': '"""MS"""', 'calendar': '"""all_leap"""'}), "('2020-01-01', '2022-01-01', freq='MS', calendar='all_leap')\n", (24250, 24310), True, 'import xarray as xr\n'), ((24659, 24814), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), all_leap_month_avg)}", 'coords': "{'time': all_leap_month_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'), all_leap_month_avg)},\n coords={'time': all_leap_month_avg_time, 'lat': [-90.0], 'lon': [-180.0]})\n", (24669, 24814), True, 'import xarray as xr\n'), ((24996, 25070), 'xarray.cftime_range', 'xr.cftime_range', (['"""2020-01-01"""', '"""2022-01-01"""'], {'freq': '"""MS"""', 'calendar': '"""360_day"""'}), "('2020-01-01', '2022-01-01', freq='MS', calendar='360_day')\n", (25011, 25070), True, 'import xarray as xr\n'), ((25449, 25616), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), day_360_leap_month_avg)}", 'coords': "{'time': day_360_leap_month_avg_time, 'lat': [-90.0], 'lon': [-180.0]}"}), "(data_vars={'data': (('time', 'lat', 'lon'),\n day_360_leap_month_avg)}, coords={'time': day_360_leap_month_avg_time,\n 'lat': [-90.0], 'lon': [-180.0]})\n", (25459, 25616), True, 'import xarray as xr\n'), ((25651, 25878), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dset, expected"""', '[(julian_daily, julian_day_2_month_avg), (noleap_daily,\n noleap_day_2_month_avg), (all_leap_daily, all_leap_day_2_month_avg), (\n day_360_daily, day_360_leap_day_2_month_avg)]'], {}), "('dset, expected', [(julian_daily,\n julian_day_2_month_avg), (noleap_daily, noleap_day_2_month_avg), (\n all_leap_daily, all_leap_day_2_month_avg), (day_360_daily,\n day_360_leap_day_2_month_avg)])\n", (25674, 25878), False, 'import pytest\n'), ((1025, 1084), 'numpy.linspace', 'np.linspace', ([], {'start': '(-90)', 'stop': '(90)', 'num': 'nlats', 'dtype': '"""float32"""'}), "(start=-90, stop=90, num=nlats, dtype='float32')\n", (1036, 1084), True, 'import numpy as np\n'), ((1096, 1157), 'numpy.linspace', 'np.linspace', ([], {'start': '(-180)', 'stop': '(180)', 'num': 'nlons', 'dtype': '"""float32"""'}), "(start=-180, stop=180, num=nlons, dtype='float32')\n", (1107, 1157), True, 'import numpy as np\n'), ((1384, 1424), 'numpy.tile', 'np.tile', (['month_values', '(1, nlats, nlons)'], {}), '(month_values, (1, nlats, nlons))\n', (1391, 1424), True, 'import numpy as np\n'), ((2001, 2078), 'xarray.cftime_range', 'xr.cftime_range', ([], {'start': 'start_date', 'end': 'end_date', 'freq': 'freq', 'calendar': 'calendar'}), '(start=start_date, end=end_date, freq=freq, calendar=calendar)\n', (2016, 2078), True, 'import xarray as xr\n'), ((2171, 2230), 'numpy.linspace', 'np.linspace', ([], {'start': '(-90)', 'stop': '(90)', 'num': 'nlats', 'dtype': '"""float32"""'}), "(start=-90, stop=90, num=nlats, dtype='float32')\n", (2182, 2230), True, 'import numpy as np\n'), ((2242, 2303), 'numpy.linspace', 'np.linspace', ([], {'start': '(-180)', 'stop': '(180)', 'num': 'nlons', 'dtype': '"""float32"""'}), "(start=-180, stop=180, num=nlons, dtype='float32')\n", (2253, 2303), True, 'import numpy as np\n'), ((2406, 2440), 'numpy.tile', 'np.tile', (['values', '(1, nlats, nlons)'], {}), '(values, (1, nlats, nlons))\n', (2413, 2440), True, 'import numpy as np\n'), ((2450, 2566), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'data': (('time', 'lat', 'lon'), data)}", 'coords': "{'time': time, 'lat': lats, 'lon': lons}"}), "(data_vars={'data': (('time', 'lat', 'lon'), data)}, coords={\n 'time': time, 'lat': lats, 'lon': lons})\n", (2460, 2566), True, 'import xarray as xr\n'), ((3127, 3153), 'geocat.comp.climatology', 'climatology', (['dataset', 'freq'], {}), '(dataset, freq)\n', (3138, 3153), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((3402, 3424), 'geocat.comp.anomaly', 'anomaly', (['dataset', 'freq'], {}), '(dataset, freq)\n', (3409, 3424), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((4932, 4964), 'geocat.comp.month_to_season', 'month_to_season', (['dataset', 'season'], {}), '(dataset, season)\n', (4947, 4964), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((4969, 5028), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["season_ds['my_var'].data", 'expected'], {}), "(season_ds['my_var'].data, expected)\n", (4992, 5028), True, 'import numpy as np\n'), ((5486, 5518), 'geocat.comp.month_to_season', 'month_to_season', (['dataset', 'season'], {}), '(dataset, season)\n', (5501, 5518), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((5523, 5582), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["season_ds['my_var'].data", 'expected'], {}), "(season_ds['my_var'].data, expected)\n", (5546, 5582), True, 'import numpy as np\n'), ((5941, 5969), 'geocat.comp.month_to_season', 'month_to_season', (['ds3', 'season'], {}), '(ds3, season)\n', (5956, 5969), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((6412, 6476), 'geocat.comp.month_to_season', 'month_to_season', (['dataset', '"""JFM"""'], {'time_coord_name': 'time_coordinate'}), "(dataset, 'JFM', time_coord_name=time_coordinate)\n", (6427, 6476), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((6481, 6558), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['season_ds[var_name].data', 'expected'], {'decimal': '(1)'}), '(season_ds[var_name].data, expected, decimal=1)\n', (6511, 6558), True, 'import numpy as np\n'), ((7579, 7614), 'geocat.comp.calendar_average', 'calendar_average', (['dset'], {'freq': '"""hour"""'}), "(dset, freq='hour')\n", (7595, 7614), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((7619, 7660), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (7642, 7660), True, 'import xarray as xr\n'), ((8207, 8241), 'geocat.comp.calendar_average', 'calendar_average', (['dset'], {'freq': '"""day"""'}), "(dset, freq='day')\n", (8223, 8241), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((8246, 8287), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (8269, 8287), True, 'import xarray as xr\n'), ((9062, 9098), 'geocat.comp.calendar_average', 'calendar_average', (['dset'], {'freq': '"""month"""'}), "(dset, freq='month')\n", (9078, 9098), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((9103, 9144), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (9126, 9144), True, 'import xarray as xr\n'), ((10268, 10305), 'geocat.comp.calendar_average', 'calendar_average', (['dset'], {'freq': '"""season"""'}), "(dset, freq='season')\n", (10284, 10305), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((10310, 10354), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (10336, 10354), True, 'import xarray as xr\n'), ((10379, 10406), 'cftime.datetime', 'cftime.datetime', (['(2020)', '(7)', '(2)'], {}), '(2020, 7, 2)\n', (10394, 10406), False, 'import cftime\n'), ((10412, 10448), 'cftime.datetime', 'cftime.datetime', (['(2021)', '(7)', '(2)'], {'hour': '(12)'}), '(2021, 7, 2, hour=12)\n', (10427, 10448), False, 'import cftime\n'), ((11159, 11194), 'geocat.comp.calendar_average', 'calendar_average', (['dset'], {'freq': '"""year"""'}), "(dset, freq='year')\n", (11175, 11194), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((11199, 11243), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (11225, 11243), True, 'import xarray as xr\n'), ((12007, 12045), 'geocat.comp.climatology_average', 'climatology_average', (['dset'], {'freq': '"""hour"""'}), "(dset, freq='hour')\n", (12026, 12045), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((12050, 12094), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (12076, 12094), True, 'import xarray as xr\n'), ((12780, 12817), 'geocat.comp.climatology_average', 'climatology_average', (['dset'], {'freq': '"""day"""'}), "(dset, freq='day')\n", (12799, 12817), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((12822, 12863), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (12845, 12863), True, 'import xarray as xr\n'), ((13578, 13617), 'geocat.comp.climatology_average', 'climatology_average', (['dset'], {'freq': '"""month"""'}), "(dset, freq='month')\n", (13597, 13617), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((13622, 13666), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (13648, 13666), True, 'import xarray as xr\n'), ((14505, 14545), 'geocat.comp.climatology_average', 'climatology_average', (['dset'], {'freq': '"""season"""'}), "(dset, freq='season')\n", (14524, 14545), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((14550, 14594), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (14576, 14594), True, 'import xarray as xr\n'), ((15372, 15430), 'geocat.comp.climatology_average', 'climatology_average', (['dset'], {'freq': '"""month"""', 'time_dim': 'time_dim'}), "(dset, freq='month', time_dim=time_dim)\n", (15391, 15430), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((15435, 15479), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (15461, 15479), True, 'import xarray as xr\n'), ((15761, 15816), 'geocat.comp.calendar_average', 'calendar_average', (['dset'], {'freq': '"""month"""', 'time_dim': 'time_dim'}), "(dset, freq='month', time_dim=time_dim)\n", (15777, 15816), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((15821, 15865), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (15847, 15865), True, 'import xarray as xr\n'), ((16080, 16117), 'geocat.comp.climatology_average', 'climatology_average', (['da'], {'freq': '"""month"""'}), "(da, freq='month')\n", (16099, 16117), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((16122, 16166), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (16148, 16166), True, 'import xarray as xr\n'), ((16355, 16389), 'geocat.comp.calendar_average', 'calendar_average', (['da'], {'freq': '"""month"""'}), "(da, freq='month')\n", (16371, 16389), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((16394, 16435), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (16417, 16435), True, 'import xarray as xr\n'), ((22119, 22158), 'geocat.comp.climatology_average', 'climatology_average', (['dset'], {'freq': '"""month"""'}), "(dset, freq='month')\n", (22138, 22158), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((22163, 22207), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (22189, 22207), True, 'import xarray as xr\n'), ((26048, 26084), 'geocat.comp.calendar_average', 'calendar_average', (['dset'], {'freq': '"""month"""'}), "(dset, freq='month')\n", (26064, 26084), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((26089, 26130), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (26112, 26130), True, 'import xarray as xr\n'), ((1282, 1318), 'numpy.arange', 'np.arange', ([], {'start': '(1)', 'stop': '(nmonths + 1)'}), '(start=1, stop=nmonths + 1)\n', (1291, 1318), True, 'import numpy as np\n'), ((2738, 2763), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2751, 2763), False, 'import pytest\n'), ((2773, 2802), 'geocat.comp.climatology', 'climatology', (['dset_a', '"""hourly"""'], {}), "(dset_a, 'hourly')\n", (2784, 2802), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((2851, 2876), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2864, 2876), False, 'import pytest\n'), ((2886, 2922), 'geocat.comp.climatology', 'climatology', (['dset_encoded', '"""monthly"""'], {}), "(dset_encoded, 'monthly')\n", (2897, 2922), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((5089, 5112), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (5102, 5112), False, 'import pytest\n'), ((5122, 5150), 'geocat.comp.month_to_season', 'month_to_season', (['ds1', '"""TEST"""'], {}), "(ds1, 'TEST')\n", (5137, 5150), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((5214, 5239), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5227, 5239), False, 'import pytest\n'), ((5249, 5293), 'geocat.comp.month_to_season', 'month_to_season', (['partial_year_dataset', '"""JFM"""'], {}), "(partial_year_dataset, 'JFM')\n", (5264, 5293), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((7035, 7061), 'numpy.arange', 'np.arange', (['(0.5)', '(35088.5)', '(2)'], {}), '(0.5, 35088.5, 2)\n', (7044, 7061), True, 'import numpy as np\n'), ((7673, 7701), 'numpy.arange', 'np.arange', (['(11.5)', '(17555.5)', '(24)'], {}), '(11.5, 17555.5, 24)\n', (7682, 7701), True, 'import numpy as np\n'), ((8302, 8452), 'numpy.array', 'np.array', (['[15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, 381, \n 410.5, 440, 470.5, 501, 531.5, 562, 593, 623.5, 654, 684.5, 715]'], {}), '([15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, \n 381, 410.5, 440, 470.5, 501, 531.5, 562, 593, 623.5, 654, 684.5, 715])\n', (8310, 8452), True, 'import numpy as np\n'), ((9160, 9226), 'numpy.array', 'np.array', (['[29.5, 105.5, 197.5, 289, 379.5, 470.5, 562.5, 654, 715]'], {}), '([29.5, 105.5, 197.5, 289, 379.5, 470.5, 562.5, 654, 715])\n', (9168, 9226), True, 'import numpy as np\n'), ((9733, 9818), 'numpy.array', 'np.array', (['[0.483333333, 3, 6.010869565, 9, 11.96666667, 15, 18.01086957, 21, 23]'], {}), '([0.483333333, 3, 6.010869565, 9, 11.96666667, 15, 18.01086957, 21, 23]\n )\n', (9741, 9818), True, 'import numpy as np\n'), ((12879, 12974), 'numpy.array', 'np.array', (['[198, 224.5438596, 257.5, 288, 318.5, 349, 379.5, 410.5, 441, 471.5, 502, 532.5\n ]'], {}), '([198, 224.5438596, 257.5, 288, 318.5, 349, 379.5, 410.5, 441, \n 471.5, 502, 532.5])\n', (12887, 12974), True, 'import numpy as np\n'), ((13683, 13723), 'numpy.array', 'np.array', (['[320.9392265, 380, 288, 471.5]'], {}), '([320.9392265, 380, 288, 471.5])\n', (13691, 13723), True, 'import numpy as np\n'), ((14005, 14048), 'numpy.array', 'np.array', (['[10.04972376, 12.01086957, 9, 15]'], {}), '([10.04972376, 12.01086957, 9, 15])\n', (14013, 14048), True, 'import numpy as np\n'), ((14770, 14793), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (14783, 14793), False, 'import pytest\n'), ((14803, 14842), 'geocat.comp.climatology_average', 'climatology_average', (['monthly'], {'freq': 'freq'}), '(monthly, freq=freq)\n', (14822, 14842), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((14949, 14972), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (14962, 14972), False, 'import pytest\n'), ((14982, 15018), 'geocat.comp.calendar_average', 'calendar_average', (['monthly'], {'freq': 'freq'}), '(monthly, freq=freq)\n', (14998, 15018), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((16583, 16608), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16596, 16608), False, 'import pytest\n'), ((16618, 16660), 'geocat.comp.climatology_average', 'climatology_average', (['dset_encoded', '"""month"""'], {}), "(dset_encoded, 'month')\n", (16637, 16660), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((16727, 16752), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16740, 16752), False, 'import pytest\n'), ((16762, 16801), 'geocat.comp.calendar_average', 'calendar_average', (['dset_encoded', '"""month"""'], {}), "(dset_encoded, 'month')\n", (16778, 16801), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((17058, 17083), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17071, 17083), False, 'import pytest\n'), ((17093, 17137), 'geocat.comp.climatology_average', 'climatology_average', (['non_uniform'], {'freq': '"""day"""'}), "(non_uniform, freq='day')\n", (17112, 17137), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((17204, 17229), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17217, 17229), False, 'import pytest\n'), ((17239, 17280), 'geocat.comp.calendar_average', 'calendar_average', (['non_uniform'], {'freq': '"""day"""'}), "(non_uniform, freq='day')\n", (17255, 17280), False, 'from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average\n'), ((18354, 18450), 'numpy.array', 'np.array', (['[198, 224.54385965, 257.5, 288, 318.5, 349, 379.5, 410.5, 441, 471.5, 502, \n 532.5]'], {}), '([198, 224.54385965, 257.5, 288, 318.5, 349, 379.5, 410.5, 441, \n 471.5, 502, 532.5])\n', (18362, 18450), True, 'import numpy as np\n'), ((19220, 19308), 'numpy.array', 'np.array', (['[197.5, 227, 256.5, 287, 317.5, 348, 378.5, 409.5, 440, 470.5, 501, 531.5]'], {}), '([197.5, 227, 256.5, 287, 317.5, 348, 378.5, 409.5, 440, 470.5, 501,\n 531.5])\n', (19228, 19308), True, 'import numpy as np\n'), ((20082, 20160), 'numpy.array', 'np.array', (['[198, 228, 258, 288.5, 319, 349.5, 380, 411, 441.5, 472, 502.5, 533]'], {}), '([198, 228, 258, 288.5, 319, 349.5, 380, 411, 441.5, 472, 502.5, 533])\n', (20090, 20160), True, 'import numpy as np\n'), ((20970, 20997), 'numpy.arange', 'np.arange', (['(194.5)', '(554.5)', '(30)'], {}), '(194.5, 554.5, 30)\n', (20979, 20997), True, 'import numpy as np\n'), ((22274, 22424), 'numpy.array', 'np.array', (['[15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, 381, \n 410.5, 440, 470.5, 501, 531.5, 562, 593, 623.5, 654, 684.5, 715]'], {}), '([15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, \n 381, 410.5, 440, 470.5, 501, 531.5, 562, 593, 623.5, 654, 684.5, 715])\n', (22282, 22424), True, 'import numpy as np\n'), ((23153, 23304), 'numpy.array', 'np.array', (['[15, 44.5, 74, 104.5, 135, 165.5, 196, 227, 257.5, 288, 318.5, 349, 380, \n 409.5, 439, 469.5, 500, 530.5, 561, 592, 622.5, 653, 683.5, 714]'], {}), '([15, 44.5, 74, 104.5, 135, 165.5, 196, 227, 257.5, 288, 318.5, 349,\n 380, 409.5, 439, 469.5, 500, 530.5, 561, 592, 622.5, 653, 683.5, 714])\n', (23161, 23304), True, 'import numpy as np\n'), ((24037, 24185), 'numpy.array', 'np.array', (['[15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, 381, 411,\n 441, 471.5, 502, 532.5, 563, 594, 624.5, 655, 685.5, 716]'], {}), '([15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, \n 381, 411, 441, 471.5, 502, 532.5, 563, 594, 624.5, 655, 685.5, 716])\n', (24045, 24185), True, 'import numpy as np\n'), ((24921, 24947), 'numpy.arange', 'np.arange', (['(14.5)', '(734.5)', '(30)'], {}), '(14.5, 734.5, 30)\n', (24930, 24947), True, 'import numpy as np\n'), ((903, 930), 'pandas.to_datetime', 'pd.to_datetime', (['start_month'], {}), '(start_month)\n', (917, 930), True, 'import pandas as pd\n'), ((8578, 8630), 'numpy.vstack', 'np.vstack', (['(month_avg_time[:-1], month_avg_time[1:])'], {}), '((month_avg_time[:-1], month_avg_time[1:]))\n', (8587, 8630), True, 'import numpy as np\n'), ((9375, 9429), 'numpy.vstack', 'np.vstack', (['(season_avg_time[:-1], season_avg_time[1:])'], {}), '((season_avg_time[:-1], season_avg_time[1:]))\n', (9384, 9429), True, 'import numpy as np\n'), ((11322, 11351), 'numpy.arange', 'np.arange', (['(8784.5)', '(11616.5)', '(2)'], {}), '(8784.5, 11616.5, 2)\n', (11331, 11351), True, 'import numpy as np\n'), ((11381, 11409), 'numpy.arange', 'np.arange', (['(2832.5)', '(2880.5)', '(2)'], {}), '(2832.5, 2880.5, 2)\n', (11390, 11409), True, 'import numpy as np\n'), ((11439, 11469), 'numpy.arange', 'np.arange', (['(11640.5)', '(26328.5)', '(2)'], {}), '(11640.5, 26328.5, 2)\n', (11448, 11469), True, 'import numpy as np\n'), ((12124, 12153), 'numpy.arange', 'np.arange', (['(4403.5)', '(5819.5)', '(24)'], {}), '(4403.5, 5819.5, 24)\n', (12133, 12153), True, 'import numpy as np\n'), ((12219, 12249), 'numpy.arange', 'np.arange', (['(5831.5)', '(13175.5)', '(24)'], {}), '(5831.5, 13175.5, 24)\n', (12228, 12249), True, 'import numpy as np\n'), ((13102, 13156), 'numpy.vstack', 'np.vstack', (['(month_clim_time[:-1], month_clim_time[1:])'], {}), '((month_clim_time[:-1], month_clim_time[1:]))\n', (13111, 13156), True, 'import numpy as np\n'), ((16925, 16937), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (16934, 16937), True, 'import numpy as np\n'), ((18778, 18846), 'numpy.vstack', 'np.vstack', (['(julian_month_clim_time[:-1], julian_month_clim_time[1:])'], {}), '((julian_month_clim_time[:-1], julian_month_clim_time[1:]))\n', (18787, 18846), True, 'import numpy as np\n'), ((19637, 19705), 'numpy.vstack', 'np.vstack', (['(noleap_month_clim_time[:-1], noleap_month_clim_time[1:])'], {}), '((noleap_month_clim_time[:-1], noleap_month_clim_time[1:]))\n', (19646, 19705), True, 'import numpy as np\n'), ((20509, 20581), 'numpy.vstack', 'np.vstack', (['(all_leap_month_clim_time[:-1], all_leap_month_clim_time[1:])'], {}), '((all_leap_month_clim_time[:-1], all_leap_month_clim_time[1:]))\n', (20518, 20581), True, 'import numpy as np\n'), ((21307, 21392), 'numpy.vstack', 'np.vstack', (['(day_360_leap_month_clim_time[:-1], day_360_leap_month_clim_time[1:])'], {}), '((day_360_leap_month_clim_time[:-1], day_360_leap_month_clim_time[1:])\n )\n', (21316, 21392), True, 'import numpy as np\n'), ((22703, 22769), 'numpy.vstack', 'np.vstack', (['(julian_month_avg_time[:-1], julian_month_avg_time[1:])'], {}), '((julian_month_avg_time[:-1], julian_month_avg_time[1:]))\n', (22712, 22769), True, 'import numpy as np\n'), ((23584, 23650), 'numpy.vstack', 'np.vstack', (['(noleap_month_avg_time[:-1], noleap_month_avg_time[1:])'], {}), '((noleap_month_avg_time[:-1], noleap_month_avg_time[1:]))\n', (23593, 23650), True, 'import numpy as np\n'), ((24476, 24546), 'numpy.vstack', 'np.vstack', (['(all_leap_month_avg_time[:-1], all_leap_month_avg_time[1:])'], {}), '((all_leap_month_avg_time[:-1], all_leap_month_avg_time[1:]))\n', (24485, 24546), True, 'import numpy as np\n'), ((25252, 25330), 'numpy.vstack', 'np.vstack', (['(day_360_leap_month_avg_time[:-1], day_360_leap_month_avg_time[1:])'], {}), '((day_360_leap_month_avg_time[:-1], day_360_leap_month_avg_time[1:]))\n', (25261, 25330), True, 'import numpy as np\n')] |
from gna.ui import basecmd
from gna.env import env
import ROOT
import numpy as np
import scipy.misc
from scipy.stats import poisson
import gna.constructors as C
class cmd(basecmd):
@classmethod
def initparser(cls, parser, env):
parser.add_argument('--name', required=True)
parser.add_argument('--Emin', default=0, type=float)
parser.add_argument('--Emax', default=5, type=float)
parser.add_argument('--nbins', default=70, type=int)
parser.add_argument('--order', default=8)
parser.add_argument('--PoissonMean', default=0.2, type=float)
parser.add_argument('--PoissonOrder', default=4, type=int)
def init(self):
ns = env.ns(self.opts.name)
ns.reqparameter('BackgroundRate', central=0, sigma=0.1)
ns.reqparameter('Mu', central=1, sigma=1)
ns.reqparameter('E0', central=2, sigma=0.05)
ns.reqparameter('Width', central=0.2, sigma=0.005)
edges = np.linspace(self.opts.Emin, self.opts.Emax, self.opts.nbins+1)
orders = np.array([self.opts.order]*(len(edges)-1), dtype=int)
integrator = ROOT.GaussLegendre(edges, orders, len(orders))
hist = ROOT.GaussLegendreHist(integrator)
signal = ROOT.Sum()
n = self.opts.PoissonOrder
model = {}
#ff = np.arange(1,n+1)
#ff = 1/scipy.misc.factorial(ff)*np.exp(-self.opts.PoissionMean)
#ff_points = C.Points(ff)
#print(ff, ff_points)
with ns:
for i in range(1, n+1):
print(i, n)
model[i] = ROOT.GaussianPeakWithBackground(i)
model[i].rate.E(integrator.points.x)
# print(model[i].rate,model[i].rate.rate,ff_points[i])
prod = ROOT.Product()
prod.multiply(model[i].rate.rate)
poisson_factor = poisson.pmf(i, self.opts.PoissonMean)
poisson_factor_prod = C.Points([poisson_factor])
print(type(model[i].rate), poisson_factor, poisson_factor_prod)
prod.multiply(poisson_factor_prod)
signal.add(prod)
hist.hist.f(signal)
ns.addobservable('spectrum', hist.hist)
| [
"gna.env.env.ns",
"scipy.stats.poisson.pmf",
"ROOT.GaussianPeakWithBackground",
"ROOT.Product",
"ROOT.GaussLegendreHist",
"numpy.linspace",
"ROOT.Sum",
"gna.constructors.Points"
] | [((694, 716), 'gna.env.env.ns', 'env.ns', (['self.opts.name'], {}), '(self.opts.name)\n', (700, 716), False, 'from gna.env import env\n'), ((960, 1024), 'numpy.linspace', 'np.linspace', (['self.opts.Emin', 'self.opts.Emax', '(self.opts.nbins + 1)'], {}), '(self.opts.Emin, self.opts.Emax, self.opts.nbins + 1)\n', (971, 1024), True, 'import numpy as np\n'), ((1177, 1211), 'ROOT.GaussLegendreHist', 'ROOT.GaussLegendreHist', (['integrator'], {}), '(integrator)\n', (1199, 1211), False, 'import ROOT\n'), ((1229, 1239), 'ROOT.Sum', 'ROOT.Sum', ([], {}), '()\n', (1237, 1239), False, 'import ROOT\n'), ((1572, 1606), 'ROOT.GaussianPeakWithBackground', 'ROOT.GaussianPeakWithBackground', (['i'], {}), '(i)\n', (1603, 1606), False, 'import ROOT\n'), ((1753, 1767), 'ROOT.Product', 'ROOT.Product', ([], {}), '()\n', (1765, 1767), False, 'import ROOT\n'), ((1851, 1888), 'scipy.stats.poisson.pmf', 'poisson.pmf', (['i', 'self.opts.PoissonMean'], {}), '(i, self.opts.PoissonMean)\n', (1862, 1888), False, 'from scipy.stats import poisson\n'), ((1927, 1953), 'gna.constructors.Points', 'C.Points', (['[poisson_factor]'], {}), '([poisson_factor])\n', (1935, 1953), True, 'import gna.constructors as C\n')] |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Loss functions."""
import numpy as np
import tensorflow as tf
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
#----------------------------------------------------------------------------
# Logistic loss from the paper
# "Generative Adversarial Nets", Goodfellow et al. 2014
def G_logistic(G, D, opt, training_set, minibatch_size):
_ = opt
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
loss = -tf.nn.softplus(fake_scores_out) # log(1-sigmoid(fake_scores_out)) # pylint: disable=invalid-unary-operand-type
return loss, None
def G_logistic_ns(G, D, opt, training_set, minibatch_size):
_ = opt
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
return loss, None
def G_logistic_ns_dsp(G, D, opt, training_set, minibatch_size, latent_type='uniform', D_global_size=0):
_ = opt
discrete_latents = None
if D_global_size > 0:
discrete_latents = tf.random.uniform([minibatch_size], minval=0, maxval=D_global_size, dtype=tf.int32)
discrete_latents = tf.one_hot(discrete_latents, D_global_size)
if latent_type == 'uniform':
latents = tf.random.uniform([minibatch_size] + [G.input_shapes[0][1]-D_global_size], minval=-2, maxval=2)
elif latent_type == 'normal':
latents = tf.random_normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
elif latent_type == 'trunc_normal':
latents = tf.random.truncated_normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
else:
raise ValueError('Latent type not supported: ' + latent_type)
if D_global_size > 0:
latents = tf.concat([discrete_latents, latents], axis=1)
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out, _ = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
return loss, None
def calc_info_gan_loss(latents, regress_out, D_global_size, C_global_size, D_lambda, C_lambda):
assert regress_out.shape.as_list()[1] == (D_global_size + 2 * C_global_size)
# Discrete latents loss
I_loss_D = 0
if D_global_size > 0:
prob_D = tf.nn.softmax(regress_out[:, :D_global_size], axis=1)
I_loss_D = tf.reduce_sum(latents[:, :D_global_size] * tf.log(prob_D + 1e-12), axis=1)
# Continuous latents loss
mean_C = regress_out[:, D_global_size:D_global_size + C_global_size]
std_C = tf.sqrt(tf.exp(regress_out[:, D_global_size + C_global_size: D_global_size + C_global_size * 2]))
epsilon = (latents[:, D_global_size:] - mean_C) / (std_C + 1e-12)
I_loss_C = tf.reduce_sum(- 0.5 * np.log(2 * np.pi) - tf.log(std_C + 1e-12) - 0.5 * tf.square(epsilon), axis=1)
I_loss = - D_lambda * I_loss_D - C_lambda * I_loss_C
return I_loss
def G_logistic_ns_info_gan(G, D, I, opt, training_set, minibatch_size, latent_type='uniform', D_global_size=0, D_lambda=1, C_lambda=1):
_ = opt
discrete_latents = None
if D_global_size > 0:
discrete_latents = tf.random.uniform([minibatch_size], minval=0, maxval=D_global_size, dtype=tf.int32)
discrete_latents = tf.one_hot(discrete_latents, D_global_size)
if latent_type == 'uniform':
latents = tf.random.uniform([minibatch_size] + [G.input_shapes[0][1]-D_global_size], minval=-2, maxval=2)
elif latent_type == 'normal':
latents = tf.random_normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
elif latent_type == 'trunc_normal':
latents = tf.random.truncated_normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
else:
raise ValueError('Latent type not supported: ' + latent_type)
if D_global_size > 0:
latents = tf.concat([discrete_latents, latents], axis=1)
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out, _ = G.get_output_for(latents, labels, is_training=True)
fake_scores_out, hidden = D.get_output_for(fake_images_out, labels, is_training=True)
G_loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
# regress_out = I.get_output_for(hidden, is_training=True)
regress_out = I.get_output_for(fake_images_out, is_training=True)
I_loss = calc_info_gan_loss(latents, regress_out, D_global_size, G.input_shapes[0][1]-D_global_size, D_lambda, C_lambda)
I_loss = autosummary('Loss/I_loss', I_loss)
return G_loss, None, I_loss, None
def calc_vc_loss(C_delta_latents, regress_out, D_global_size, C_global_size, D_lambda, C_lambda, delta_type):
assert regress_out.shape.as_list()[1] == (D_global_size + C_global_size)
# Continuous latents loss
if delta_type == 'onedim':
prob_C = tf.nn.softmax(regress_out[:, D_global_size:], axis=1)
I_loss_C = C_delta_latents * tf.log(prob_C + 1e-12)
I_loss_C = C_lambda * I_loss_C
I_loss_C = tf.reduce_sum(I_loss_C, axis=1)
I_loss = - I_loss_C
elif delta_type == 'fulldim':
I_loss_C = tf.reduce_sum((tf.nn.sigmoid(regress_out[:, D_global_size:]) - C_delta_latents) ** 2, axis=1)
I_loss = C_lambda * I_loss_C
return I_loss
# def calc_vc_loss(delta_target, regress_out, D_global_size, C_global_size, D_lambda, C_lambda):
# assert regress_out.shape.as_list()[1] == (D_global_size + C_global_size)
# # Continuous latents loss
# I_loss_C = tf.reduce_mean((regress_out[:, D_global_size:] - delta_target) ** 2, axis=1)
# I_loss = C_lambda * I_loss_C
# return I_loss
def calc_cls_loss(discrete_latents, cls_out, D_global_size, C_global_size, cls_alpha):
assert cls_out.shape.as_list()[1] == D_global_size
prob_D = tf.nn.softmax(cls_out, axis=1)
I_info_loss_D = tf.reduce_sum(discrete_latents * tf.log(prob_D + 1e-12), axis=1)
I_info_loss = - cls_alpha * I_info_loss_D
return I_info_loss
def G_logistic_ns_vc(G, D, I, opt, training_set, minibatch_size, I_info=None, latent_type='uniform',
D_global_size=0, D_lambda=0, C_lambda=1, F_beta=0, cls_alpha=0, epsilon=0.4,
random_eps=False, delta_type='onedim', cascading=False):
_ = opt
discrete_latents = None
C_global_size = G.input_shapes[0][1]-D_global_size
if D_global_size > 0:
discrete_latents = tf.random.uniform([minibatch_size], minval=0, maxval=D_global_size, dtype=tf.int32)
discrete_latents = tf.one_hot(discrete_latents, D_global_size)
discrete_latents_2 = tf.random.uniform([minibatch_size], minval=0, maxval=D_global_size, dtype=tf.int32)
discrete_latents_2 = tf.one_hot(discrete_latents_2, D_global_size)
if latent_type == 'uniform':
latents = tf.random.uniform([minibatch_size] + [G.input_shapes[0][1]-D_global_size], minval=-2, maxval=2)
elif latent_type == 'normal':
# latents = tf.random_normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
latents = tf.random.normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
elif latent_type == 'trunc_normal':
latents = tf.random.truncated_normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
else:
raise ValueError('Latent type not supported: ' + latent_type)
if not cascading:
# Sample delta latents
if delta_type == 'onedim':
C_delta_latents = tf.random.uniform([minibatch_size], minval=0, maxval=C_global_size, dtype=tf.int32)
C_delta_latents = tf.cast(tf.one_hot(C_delta_latents, C_global_size), latents.dtype)
elif delta_type == 'fulldim':
C_delta_latents = tf.random.uniform([minibatch_size, C_global_size], minval=0, maxval=1.0, dtype=latents.dtype)
else:
# apply cascading
cascade_max = 1e5
cascade_step = cascade_max // int(C_global_size)
global_step = tf.compat.v1.train.get_global_step()
n_emph_free = tf.math.floormod(global_step // int(cascade_step), C_global_size) + 2
n_emph = tf.math.minimum(n_emph_free, C_global_size)
if delta_type == 'onedim':
C_delta_latents = tf.random.uniform([minibatch_size], minval=0, maxval=n_emph, dtype=tf.int32)
C_delta_latents = tf.cast(tf.one_hot(C_delta_latents, n_emph), latents.dtype)
elif delta_type == 'fulldim':
C_delta_latents = tf.random.uniform([minibatch_size, n_emph], minval=0, maxval=1.0, dtype=latents.dtype)
C_delta_latents = tf.concat([C_delta_latents, tf.zeros([minibatch_size, C_global_size - n_emph])], axis=1)
if delta_type == 'onedim':
if not random_eps:
delta_target = C_delta_latents * epsilon
# delta_latents = tf.concat([tf.zeros([minibatch_size, D_global_size]), delta_target], axis=1)
else:
epsilon = epsilon * tf.random.normal([minibatch_size, 1], mean=0.0, stddev=2.0)
# delta_target = tf.math.abs(C_delta_latents * epsilon)
delta_target = C_delta_latents * epsilon
# delta_latents = tf.concat([tf.zeros([minibatch_size, D_global_size]), delta_target], axis=1)
else:
delta_target = (C_delta_latents - 0.5) * epsilon
delta_latents = delta_target + latents
if D_global_size > 0:
latents = tf.concat([discrete_latents, latents], axis=1)
# delta_latents = tf.concat([discrete_latents_2, delta_latents], axis=1)
delta_latents = tf.concat([tf.zeros([minibatch_size, D_global_size]), delta_latents], axis=1)
labels = training_set.get_random_labels_tf(minibatch_size)
fake1_out, feat_map1 = G.get_output_for(latents, labels, is_training=True)
fake2_out, feat_map2 = G.get_output_for(delta_latents, labels, is_training=True)
if I_info is not None:
fake_scores_out, hidden = D.get_output_for(fake1_out, labels, is_training=True)
else:
fake_scores_out = D.get_output_for(fake1_out, labels, is_training=True)
G_loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
regress_out = I.get_output_for(fake1_out, fake2_out, is_training=True)
I_loss = calc_vc_loss(C_delta_latents, regress_out, D_global_size, C_global_size, D_lambda, C_lambda, delta_type)
# I_loss = calc_vc_loss(delta_target, regress_out, D_global_size, C_global_size, D_lambda, C_lambda)
I_loss = autosummary('Loss/I_loss', I_loss)
F_loss = tf.reduce_mean(feat_map1 * feat_map1, axis=[1, 2, 3])
F_loss = autosummary('Loss/F_loss', F_loss)
I_loss += (F_loss * F_beta)
if I_info is not None:
cls_out = I_info.get_output_for(hidden, is_training=True)
I_info_loss = calc_cls_loss(discrete_latents, cls_out, D_global_size, G.input_shapes[0][1]-D_global_size, cls_alpha)
I_info_loss = autosummary('Loss/I_info_loss', I_info_loss)
else:
I_info_loss = None
return G_loss, None, I_loss, I_info_loss
def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels):
_ = opt, training_set
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1-sigmoid(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
return loss, None
#----------------------------------------------------------------------------
# R1 and R2 regularizers from the paper
# "Which Training Methods for GANs do actually Converge?", Mescheder et al. 2018
def D_logistic_r1(G, D, opt, training_set, minibatch_size, reals, labels, gamma=10.0):
_ = opt, training_set
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1-sigmoid(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
with tf.name_scope('GradientPenalty'):
real_grads = tf.gradients(tf.reduce_sum(real_scores_out), [reals])[0]
gradient_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3])
gradient_penalty = autosummary('Loss/gradient_penalty', gradient_penalty)
reg = gradient_penalty * (gamma * 0.5)
return loss, reg
def D_logistic_r1_dsp(G, D, opt, training_set, minibatch_size, reals, labels, gamma=10.0, latent_type='uniform', D_global_size=0):
_ = opt, training_set
discrete_latents = None
if D_global_size > 0:
discrete_latents = tf.random.uniform([minibatch_size], minval=0, maxval=D_global_size, dtype=tf.int32)
discrete_latents = tf.one_hot(discrete_latents, D_global_size)
if latent_type == 'uniform':
latents = tf.random.uniform([minibatch_size] + [G.input_shapes[0][1]-D_global_size], minval=-2, maxval=2)
elif latent_type == 'normal':
latents = tf.random_normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
elif latent_type == 'trunc_normal':
latents = tf.random.truncated_normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
else:
raise ValueError('Latent type not supported: ' + latent_type)
if D_global_size > 0:
latents = tf.concat([discrete_latents, latents], axis=1)
fake_images_out, _ = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1-sigmoid(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
with tf.name_scope('GradientPenalty'):
real_grads = tf.gradients(tf.reduce_sum(real_scores_out), [reals])[0]
gradient_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3])
gradient_penalty = autosummary('Loss/gradient_penalty', gradient_penalty)
reg = gradient_penalty * (gamma * 0.5)
return loss, reg
def D_logistic_r1_info_gan(G, D, opt, training_set, minibatch_size, reals, labels, gamma=10.0, latent_type='uniform', D_global_size=0):
_ = opt, training_set
discrete_latents = None
if D_global_size > 0:
discrete_latents = tf.random.uniform([minibatch_size], minval=0, maxval=D_global_size, dtype=tf.int32)
discrete_latents = tf.one_hot(discrete_latents, D_global_size)
if latent_type == 'uniform':
latents = tf.random.uniform([minibatch_size] + [G.input_shapes[0][1]-D_global_size], minval=-2, maxval=2)
elif latent_type == 'normal':
latents = tf.random_normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
elif latent_type == 'trunc_normal':
latents = tf.random.truncated_normal([minibatch_size] + [G.input_shapes[0][1]-D_global_size])
else:
raise ValueError('Latent type not supported: ' + latent_type)
if D_global_size > 0:
latents = tf.concat([discrete_latents, latents], axis=1)
fake_images_out, _ = G.get_output_for(latents, labels, is_training=True)
real_scores_out, _ = D.get_output_for(reals, labels, is_training=True)
fake_scores_out, _ = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1-sigmoid(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
with tf.name_scope('GradientPenalty'):
real_grads = tf.gradients(tf.reduce_sum(real_scores_out), [reals])[0]
gradient_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3])
gradient_penalty = autosummary('Loss/gradient_penalty', gradient_penalty)
reg = gradient_penalty * (gamma * 0.5)
return loss, reg
def D_logistic_r2(G, D, opt, training_set, minibatch_size, reals, labels, gamma=10.0):
_ = opt, training_set
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1-sigmoid(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
with tf.name_scope('GradientPenalty'):
fake_grads = tf.gradients(tf.reduce_sum(fake_scores_out), [fake_images_out])[0]
gradient_penalty = tf.reduce_sum(tf.square(fake_grads), axis=[1,2,3])
gradient_penalty = autosummary('Loss/gradient_penalty', gradient_penalty)
reg = gradient_penalty * (gamma * 0.5)
return loss, reg
#----------------------------------------------------------------------------
# WGAN loss from the paper
# "Wasserstein Generative Adversarial Networks", Arjovsky et al. 2017
def G_wgan(G, D, opt, training_set, minibatch_size):
_ = opt
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
loss = -fake_scores_out
return loss, None
def D_wgan(G, D, opt, training_set, minibatch_size, reals, labels, wgan_epsilon=0.001):
_ = opt, training_set
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
return loss, None
#----------------------------------------------------------------------------
# WGAN-GP loss from the paper
# "Improved Training of Wasserstein GANs", Gulrajani et al. 2017
def D_wgan_gp(G, D, opt, training_set, minibatch_size, reals, labels, wgan_lambda=10.0, wgan_epsilon=0.001, wgan_target=1.0):
_ = opt, training_set
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
with tf.name_scope('GradientPenalty'):
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
mixed_scores_out = D.get_output_for(mixed_images_out, labels, is_training=True)
mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
mixed_grads = tf.gradients(tf.reduce_sum(mixed_scores_out), [mixed_images_out])[0]
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
reg = gradient_penalty * (wgan_lambda / (wgan_target**2))
return loss, reg
#----------------------------------------------------------------------------
# Non-saturating logistic loss with path length regularizer from the paper
# "Analyzing and Improving the Image Quality of StyleGAN", Karras et al. 2019
def G_logistic_ns_pathreg(G, D, opt, training_set, minibatch_size, pl_minibatch_shrink=2, pl_decay=0.01, pl_weight=2.0):
_ = opt
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out, fake_dlatents_out = G.get_output_for(latents, labels, is_training=True, return_dlatents=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
# Path length regularization.
with tf.name_scope('PathReg'):
# Evaluate the regularization term using a smaller minibatch to conserve memory.
if pl_minibatch_shrink > 1:
pl_minibatch = minibatch_size // pl_minibatch_shrink
pl_latents = tf.random_normal([pl_minibatch] + G.input_shapes[0][1:])
pl_labels = training_set.get_random_labels_tf(pl_minibatch)
fake_images_out, fake_dlatents_out = G.get_output_for(pl_latents, pl_labels, is_training=True, return_dlatents=True)
# Compute |J*y|.
pl_noise = tf.random_normal(tf.shape(fake_images_out)) / np.sqrt(np.prod(G.output_shape[2:]))
pl_grads = tf.gradients(tf.reduce_sum(fake_images_out * pl_noise), [fake_dlatents_out])[0]
pl_lengths = tf.sqrt(tf.reduce_mean(tf.reduce_sum(tf.square(pl_grads), axis=2), axis=1))
pl_lengths = autosummary('Loss/pl_lengths', pl_lengths)
# Track exponential moving average of |J*y|.
with tf.control_dependencies(None):
pl_mean_var = tf.Variable(name='pl_mean', trainable=False, initial_value=0.0, dtype=tf.float32)
pl_mean = pl_mean_var + pl_decay * (tf.reduce_mean(pl_lengths) - pl_mean_var)
pl_update = tf.assign(pl_mean_var, pl_mean)
# Calculate (|J*y|-a)^2.
with tf.control_dependencies([pl_update]):
pl_penalty = tf.square(pl_lengths - pl_mean)
pl_penalty = autosummary('Loss/pl_penalty', pl_penalty)
# Apply weight.
#
# Note: The division in pl_noise decreases the weight by num_pixels, and the reduce_mean
# in pl_lengths decreases it by num_affine_layers. The effective weight then becomes:
#
# gamma_pl = pl_weight / num_pixels / num_affine_layers
# = 2 / (r^2) / (log2(r) * 2 - 2)
# = 1 / (r^2 * (log2(r) - 1))
# = ln(2) / (r^2 * (ln(r) - ln(2))
#
reg = pl_penalty * pl_weight
return loss, reg
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Non-saturating logistic loss with path length regularizer from the paper
# "Analyzing and Improving the Image Quality of StyleGAN", Karras et al. 2019
def G_logistic_ns_pathreg_dsp(G, D, opt, training_set, minibatch_size, pl_minibatch_shrink=2, pl_decay=0.01, pl_weight=2.0):
_ = opt
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out, fake_dlatents_out = G.get_output_for(latents, labels, is_training=True, return_dlatents=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
# Path length regularization.
with tf.name_scope('PathReg'):
# Evaluate the regularization term using a smaller minibatch to conserve memory.
if pl_minibatch_shrink > 1:
pl_minibatch = minibatch_size // pl_minibatch_shrink
pl_latents = tf.random_normal([pl_minibatch] + G.input_shapes[0][1:])
pl_labels = training_set.get_random_labels_tf(pl_minibatch)
fake_images_out, fake_dlatents_out = G.get_output_for(pl_latents, pl_labels, is_training=True, return_dlatents=True)
# Compute |J*y|.
pl_noise = tf.random_normal(tf.shape(fake_images_out)) / np.sqrt(np.prod(G.output_shape[2:]))
pl_grads = tf.gradients(tf.reduce_sum(fake_images_out * pl_noise), [fake_dlatents_out])[0]
pl_lengths = tf.sqrt(tf.reduce_sum(tf.square(pl_grads), axis=1))
pl_lengths = autosummary('Loss/pl_lengths', pl_lengths)
# Track exponential moving average of |J*y|.
with tf.control_dependencies(None):
pl_mean_var = tf.Variable(name='pl_mean', trainable=False, initial_value=0.0, dtype=tf.float32)
pl_mean = pl_mean_var + pl_decay * (tf.reduce_mean(pl_lengths) - pl_mean_var)
pl_update = tf.assign(pl_mean_var, pl_mean)
# Calculate (|J*y|-a)^2.
with tf.control_dependencies([pl_update]):
pl_penalty = tf.square(pl_lengths - pl_mean)
pl_penalty = autosummary('Loss/pl_penalty', pl_penalty)
# Apply weight.
#
# Note: The division in pl_noise decreases the weight by num_pixels, and the reduce_mean
# in pl_lengths decreases it by num_affine_layers. The effective weight then becomes:
#
# gamma_pl = pl_weight / num_pixels / num_affine_layers
# = 2 / (r^2) / (log2(r) * 2 - 2)
# = 1 / (r^2 * (log2(r) - 1))
# = ln(2) / (r^2 * (ln(r) - ln(2))
#
reg = pl_penalty * pl_weight
return loss, reg
#----------------------------------------------------------------------------
| [
"numpy.prod",
"tensorflow.shape",
"tensorflow.reduce_sum",
"numpy.log",
"tensorflow.random.truncated_normal",
"tensorflow.nn.softplus",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.log",
"tensorflow.math.minimum",
"tenso... | [((621, 679), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + G.input_shapes[0][1:])'], {}), '([minibatch_size] + G.input_shapes[0][1:])\n', (637, 679), True, 'import tensorflow as tf\n'), ((1131, 1189), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + G.input_shapes[0][1:])'], {}), '([minibatch_size] + G.input_shapes[0][1:])\n', (1147, 1189), True, 'import tensorflow as tf\n'), ((1420, 1452), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-fake_scores_out)'], {}), '(-fake_scores_out)\n', (1434, 1452), True, 'import tensorflow as tf\n'), ((2682, 2714), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-fake_scores_out)'], {}), '(-fake_scores_out)\n', (2696, 2714), True, 'import tensorflow as tf\n'), ((4873, 4905), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-fake_scores_out)'], {}), '(-fake_scores_out)\n', (4887, 4905), True, 'import tensorflow as tf\n'), ((5215, 5249), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/I_loss"""', 'I_loss'], {}), "('Loss/I_loss', I_loss)\n", (5226, 5249), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((6503, 6533), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['cls_out'], {'axis': '(1)'}), '(cls_out, axis=1)\n', (6516, 6533), True, 'import tensorflow as tf\n'), ((10729, 10761), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-fake_scores_out)'], {}), '(-fake_scores_out)\n', (10743, 10761), True, 'import tensorflow as tf\n'), ((11111, 11145), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/I_loss"""', 'I_loss'], {}), "('Loss/I_loss', I_loss)\n", (11122, 11145), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((11160, 11213), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(feat_map1 * feat_map1)'], {'axis': '[1, 2, 3]'}), '(feat_map1 * feat_map1, axis=[1, 2, 3])\n', (11174, 11213), True, 'import tensorflow as tf\n'), ((11227, 11261), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/F_loss"""', 'F_loss'], {}), "('Loss/F_loss', F_loss)\n", (11238, 11261), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((11776, 11834), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + G.input_shapes[0][1:])'], {}), '([minibatch_size] + G.input_shapes[0][1:])\n', (11792, 11834), True, 'import tensorflow as tf\n'), ((12085, 12133), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/real"""', 'real_scores_out'], {}), "('Loss/scores/real', real_scores_out)\n", (12096, 12133), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((12156, 12204), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/fake"""', 'fake_scores_out'], {}), "('Loss/scores/fake', fake_scores_out)\n", (12167, 12204), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((12216, 12247), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['fake_scores_out'], {}), '(fake_scores_out)\n', (12230, 12247), True, 'import tensorflow as tf\n'), ((12295, 12327), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-real_scores_out)'], {}), '(-real_scores_out)\n', (12309, 12327), True, 'import tensorflow as tf\n'), ((12756, 12814), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + G.input_shapes[0][1:])'], {}), '([minibatch_size] + G.input_shapes[0][1:])\n', (12772, 12814), True, 'import tensorflow as tf\n'), ((13065, 13113), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/real"""', 'real_scores_out'], {}), "('Loss/scores/real', real_scores_out)\n", (13076, 13113), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((13136, 13184), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/fake"""', 'fake_scores_out'], {}), "('Loss/scores/fake', fake_scores_out)\n", (13147, 13184), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((13196, 13227), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['fake_scores_out'], {}), '(fake_scores_out)\n', (13210, 13227), True, 'import tensorflow as tf\n'), ((13275, 13307), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-real_scores_out)'], {}), '(-real_scores_out)\n', (13289, 13307), True, 'import tensorflow as tf\n'), ((14971, 15019), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/real"""', 'real_scores_out'], {}), "('Loss/scores/real', real_scores_out)\n", (14982, 15019), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((15042, 15090), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/fake"""', 'fake_scores_out'], {}), "('Loss/scores/fake', fake_scores_out)\n", (15053, 15090), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((15102, 15133), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['fake_scores_out'], {}), '(fake_scores_out)\n', (15116, 15133), True, 'import tensorflow as tf\n'), ((15181, 15213), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-real_scores_out)'], {}), '(-real_scores_out)\n', (15195, 15213), True, 'import tensorflow as tf\n'), ((16888, 16936), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/real"""', 'real_scores_out'], {}), "('Loss/scores/real', real_scores_out)\n", (16899, 16936), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((16959, 17007), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/fake"""', 'fake_scores_out'], {}), "('Loss/scores/fake', fake_scores_out)\n", (16970, 17007), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((17019, 17050), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['fake_scores_out'], {}), '(fake_scores_out)\n', (17033, 17050), True, 'import tensorflow as tf\n'), ((17098, 17130), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-real_scores_out)'], {}), '(-real_scores_out)\n', (17112, 17130), True, 'import tensorflow as tf\n'), ((17687, 17745), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + G.input_shapes[0][1:])'], {}), '([minibatch_size] + G.input_shapes[0][1:])\n', (17703, 17745), True, 'import tensorflow as tf\n'), ((17996, 18044), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/real"""', 'real_scores_out'], {}), "('Loss/scores/real', real_scores_out)\n", (18007, 18044), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((18067, 18115), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/fake"""', 'fake_scores_out'], {}), "('Loss/scores/fake', fake_scores_out)\n", (18078, 18115), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((18127, 18158), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['fake_scores_out'], {}), '(fake_scores_out)\n', (18141, 18158), True, 'import tensorflow as tf\n'), ((18206, 18238), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-real_scores_out)'], {}), '(-real_scores_out)\n', (18220, 18238), True, 'import tensorflow as tf\n'), ((18933, 18991), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + G.input_shapes[0][1:])'], {}), '([minibatch_size] + G.input_shapes[0][1:])\n', (18949, 18991), True, 'import tensorflow as tf\n'), ((19390, 19448), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + G.input_shapes[0][1:])'], {}), '([minibatch_size] + G.input_shapes[0][1:])\n', (19406, 19448), True, 'import tensorflow as tf\n'), ((19699, 19747), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/real"""', 'real_scores_out'], {}), "('Loss/scores/real', real_scores_out)\n", (19710, 19747), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((19770, 19818), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/fake"""', 'fake_scores_out'], {}), "('Loss/scores/fake', fake_scores_out)\n", (19781, 19818), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((20406, 20464), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + G.input_shapes[0][1:])'], {}), '([minibatch_size] + G.input_shapes[0][1:])\n', (20422, 20464), True, 'import tensorflow as tf\n'), ((20715, 20763), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/real"""', 'real_scores_out'], {}), "('Loss/scores/real', real_scores_out)\n", (20726, 20763), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((20786, 20834), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/fake"""', 'fake_scores_out'], {}), "('Loss/scores/fake', fake_scores_out)\n", (20797, 20834), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((22256, 22314), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + G.input_shapes[0][1:])'], {}), '([minibatch_size] + G.input_shapes[0][1:])\n', (22272, 22314), True, 'import tensorflow as tf\n'), ((22586, 22618), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-fake_scores_out)'], {}), '(-fake_scores_out)\n', (22600, 22618), True, 'import tensorflow as tf\n'), ((25093, 25151), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + G.input_shapes[0][1:])'], {}), '([minibatch_size] + G.input_shapes[0][1:])\n', (25109, 25151), True, 'import tensorflow as tf\n'), ((25423, 25455), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(-fake_scores_out)'], {}), '(-fake_scores_out)\n', (25437, 25455), True, 'import tensorflow as tf\n'), ((911, 942), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['fake_scores_out'], {}), '(fake_scores_out)\n', (925, 942), True, 'import tensorflow as tf\n'), ((1706, 1794), 'tensorflow.random.uniform', 'tf.random.uniform', (['[minibatch_size]'], {'minval': '(0)', 'maxval': 'D_global_size', 'dtype': 'tf.int32'}), '([minibatch_size], minval=0, maxval=D_global_size, dtype=\n tf.int32)\n', (1723, 1794), True, 'import tensorflow as tf\n'), ((1817, 1860), 'tensorflow.one_hot', 'tf.one_hot', (['discrete_latents', 'D_global_size'], {}), '(discrete_latents, D_global_size)\n', (1827, 1860), True, 'import tensorflow as tf\n'), ((1913, 2014), 'tensorflow.random.uniform', 'tf.random.uniform', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {'minval': '(-2)', 'maxval': '(2)'}), '([minibatch_size] + [G.input_shapes[0][1] - D_global_size],\n minval=-2, maxval=2)\n', (1930, 2014), True, 'import tensorflow as tf\n'), ((2402, 2448), 'tensorflow.concat', 'tf.concat', (['[discrete_latents, latents]'], {'axis': '(1)'}), '([discrete_latents, latents], axis=1)\n', (2411, 2448), True, 'import tensorflow as tf\n'), ((3036, 3089), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['regress_out[:, :D_global_size]'], {'axis': '(1)'}), '(regress_out[:, :D_global_size], axis=1)\n', (3049, 3089), True, 'import tensorflow as tf\n'), ((3307, 3399), 'tensorflow.exp', 'tf.exp', (['regress_out[:, D_global_size + C_global_size:D_global_size + C_global_size * 2]'], {}), '(regress_out[:, D_global_size + C_global_size:D_global_size + \n C_global_size * 2])\n', (3313, 3399), True, 'import tensorflow as tf\n'), ((3887, 3975), 'tensorflow.random.uniform', 'tf.random.uniform', (['[minibatch_size]'], {'minval': '(0)', 'maxval': 'D_global_size', 'dtype': 'tf.int32'}), '([minibatch_size], minval=0, maxval=D_global_size, dtype=\n tf.int32)\n', (3904, 3975), True, 'import tensorflow as tf\n'), ((3998, 4041), 'tensorflow.one_hot', 'tf.one_hot', (['discrete_latents', 'D_global_size'], {}), '(discrete_latents, D_global_size)\n', (4008, 4041), True, 'import tensorflow as tf\n'), ((4094, 4195), 'tensorflow.random.uniform', 'tf.random.uniform', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {'minval': '(-2)', 'maxval': '(2)'}), '([minibatch_size] + [G.input_shapes[0][1] - D_global_size],\n minval=-2, maxval=2)\n', (4111, 4195), True, 'import tensorflow as tf\n'), ((4583, 4629), 'tensorflow.concat', 'tf.concat', (['[discrete_latents, latents]'], {'axis': '(1)'}), '([discrete_latents, latents], axis=1)\n', (4592, 4629), True, 'import tensorflow as tf\n'), ((5554, 5607), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['regress_out[:, D_global_size:]'], {'axis': '(1)'}), '(regress_out[:, D_global_size:], axis=1)\n', (5567, 5607), True, 'import tensorflow as tf\n'), ((5727, 5758), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['I_loss_C'], {'axis': '(1)'}), '(I_loss_C, axis=1)\n', (5740, 5758), True, 'import tensorflow as tf\n'), ((7114, 7202), 'tensorflow.random.uniform', 'tf.random.uniform', (['[minibatch_size]'], {'minval': '(0)', 'maxval': 'D_global_size', 'dtype': 'tf.int32'}), '([minibatch_size], minval=0, maxval=D_global_size, dtype=\n tf.int32)\n', (7131, 7202), True, 'import tensorflow as tf\n'), ((7225, 7268), 'tensorflow.one_hot', 'tf.one_hot', (['discrete_latents', 'D_global_size'], {}), '(discrete_latents, D_global_size)\n', (7235, 7268), True, 'import tensorflow as tf\n'), ((7298, 7386), 'tensorflow.random.uniform', 'tf.random.uniform', (['[minibatch_size]'], {'minval': '(0)', 'maxval': 'D_global_size', 'dtype': 'tf.int32'}), '([minibatch_size], minval=0, maxval=D_global_size, dtype=\n tf.int32)\n', (7315, 7386), True, 'import tensorflow as tf\n'), ((7411, 7456), 'tensorflow.one_hot', 'tf.one_hot', (['discrete_latents_2', 'D_global_size'], {}), '(discrete_latents_2, D_global_size)\n', (7421, 7456), True, 'import tensorflow as tf\n'), ((7509, 7610), 'tensorflow.random.uniform', 'tf.random.uniform', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {'minval': '(-2)', 'maxval': '(2)'}), '([minibatch_size] + [G.input_shapes[0][1] - D_global_size],\n minval=-2, maxval=2)\n', (7526, 7610), True, 'import tensorflow as tf\n'), ((8650, 8686), 'tensorflow.compat.v1.train.get_global_step', 'tf.compat.v1.train.get_global_step', ([], {}), '()\n', (8684, 8686), True, 'import tensorflow as tf\n'), ((8796, 8839), 'tensorflow.math.minimum', 'tf.math.minimum', (['n_emph_free', 'C_global_size'], {}), '(n_emph_free, C_global_size)\n', (8811, 8839), True, 'import tensorflow as tf\n'), ((10053, 10099), 'tensorflow.concat', 'tf.concat', (['[discrete_latents, latents]'], {'axis': '(1)'}), '([discrete_latents, latents], axis=1)\n', (10062, 10099), True, 'import tensorflow as tf\n'), ((11536, 11580), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/I_info_loss"""', 'I_info_loss'], {}), "('Loss/I_info_loss', I_info_loss)\n", (11547, 11580), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((13396, 13428), 'tensorflow.name_scope', 'tf.name_scope', (['"""GradientPenalty"""'], {}), "('GradientPenalty')\n", (13409, 13428), True, 'import tensorflow as tf\n'), ((13613, 13667), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/gradient_penalty"""', 'gradient_penalty'], {}), "('Loss/gradient_penalty', gradient_penalty)\n", (13624, 13667), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((13975, 14063), 'tensorflow.random.uniform', 'tf.random.uniform', (['[minibatch_size]'], {'minval': '(0)', 'maxval': 'D_global_size', 'dtype': 'tf.int32'}), '([minibatch_size], minval=0, maxval=D_global_size, dtype=\n tf.int32)\n', (13992, 14063), True, 'import tensorflow as tf\n'), ((14086, 14129), 'tensorflow.one_hot', 'tf.one_hot', (['discrete_latents', 'D_global_size'], {}), '(discrete_latents, D_global_size)\n', (14096, 14129), True, 'import tensorflow as tf\n'), ((14182, 14283), 'tensorflow.random.uniform', 'tf.random.uniform', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {'minval': '(-2)', 'maxval': '(2)'}), '([minibatch_size] + [G.input_shapes[0][1] - D_global_size],\n minval=-2, maxval=2)\n', (14199, 14283), True, 'import tensorflow as tf\n'), ((14670, 14716), 'tensorflow.concat', 'tf.concat', (['[discrete_latents, latents]'], {'axis': '(1)'}), '([discrete_latents, latents], axis=1)\n', (14679, 14716), True, 'import tensorflow as tf\n'), ((15302, 15334), 'tensorflow.name_scope', 'tf.name_scope', (['"""GradientPenalty"""'], {}), "('GradientPenalty')\n", (15315, 15334), True, 'import tensorflow as tf\n'), ((15519, 15573), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/gradient_penalty"""', 'gradient_penalty'], {}), "('Loss/gradient_penalty', gradient_penalty)\n", (15530, 15573), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((15886, 15974), 'tensorflow.random.uniform', 'tf.random.uniform', (['[minibatch_size]'], {'minval': '(0)', 'maxval': 'D_global_size', 'dtype': 'tf.int32'}), '([minibatch_size], minval=0, maxval=D_global_size, dtype=\n tf.int32)\n', (15903, 15974), True, 'import tensorflow as tf\n'), ((15997, 16040), 'tensorflow.one_hot', 'tf.one_hot', (['discrete_latents', 'D_global_size'], {}), '(discrete_latents, D_global_size)\n', (16007, 16040), True, 'import tensorflow as tf\n'), ((16093, 16194), 'tensorflow.random.uniform', 'tf.random.uniform', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {'minval': '(-2)', 'maxval': '(2)'}), '([minibatch_size] + [G.input_shapes[0][1] - D_global_size],\n minval=-2, maxval=2)\n', (16110, 16194), True, 'import tensorflow as tf\n'), ((16581, 16627), 'tensorflow.concat', 'tf.concat', (['[discrete_latents, latents]'], {'axis': '(1)'}), '([discrete_latents, latents], axis=1)\n', (16590, 16627), True, 'import tensorflow as tf\n'), ((17219, 17251), 'tensorflow.name_scope', 'tf.name_scope', (['"""GradientPenalty"""'], {}), "('GradientPenalty')\n", (17232, 17251), True, 'import tensorflow as tf\n'), ((17436, 17490), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/gradient_penalty"""', 'gradient_penalty'], {}), "('Loss/gradient_penalty', gradient_penalty)\n", (17447, 17490), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((18327, 18359), 'tensorflow.name_scope', 'tf.name_scope', (['"""GradientPenalty"""'], {}), "('GradientPenalty')\n", (18340, 18359), True, 'import tensorflow as tf\n'), ((18554, 18608), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/gradient_penalty"""', 'gradient_penalty'], {}), "('Loss/gradient_penalty', gradient_penalty)\n", (18565, 18608), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((19873, 19904), 'tensorflow.name_scope', 'tf.name_scope', (['"""EpsilonPenalty"""'], {}), "('EpsilonPenalty')\n", (19886, 19904), True, 'import tensorflow as tf\n'), ((20889, 20920), 'tensorflow.name_scope', 'tf.name_scope', (['"""EpsilonPenalty"""'], {}), "('EpsilonPenalty')\n", (20902, 20920), True, 'import tensorflow as tf\n'), ((21065, 21097), 'tensorflow.name_scope', 'tf.name_scope', (['"""GradientPenalty"""'], {}), "('GradientPenalty')\n", (21078, 21097), True, 'import tensorflow as tf\n'), ((21124, 21212), 'tensorflow.random_uniform', 'tf.random_uniform', (['[minibatch_size, 1, 1, 1]', '(0.0)', '(1.0)'], {'dtype': 'fake_images_out.dtype'}), '([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=\n fake_images_out.dtype)\n', (21141, 21212), True, 'import tensorflow as tf\n'), ((21433, 21483), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/scores/mixed"""', 'mixed_scores_out'], {}), "('Loss/scores/mixed', mixed_scores_out)\n", (21444, 21483), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((21680, 21724), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/mixed_norms"""', 'mixed_norms'], {}), "('Loss/mixed_norms', mixed_norms)\n", (21691, 21724), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((21752, 21788), 'tensorflow.square', 'tf.square', (['(mixed_norms - wgan_target)'], {}), '(mixed_norms - wgan_target)\n', (21761, 21788), True, 'import tensorflow as tf\n'), ((22696, 22720), 'tensorflow.name_scope', 'tf.name_scope', (['"""PathReg"""'], {}), "('PathReg')\n", (22709, 22720), True, 'import tensorflow as tf\n'), ((23541, 23583), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/pl_lengths"""', 'pl_lengths'], {}), "('Loss/pl_lengths', pl_lengths)\n", (23552, 23583), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((23896, 23927), 'tensorflow.assign', 'tf.assign', (['pl_mean_var', 'pl_mean'], {}), '(pl_mean_var, pl_mean)\n', (23905, 23927), True, 'import tensorflow as tf\n'), ((25533, 25557), 'tensorflow.name_scope', 'tf.name_scope', (['"""PathReg"""'], {}), "('PathReg')\n", (25546, 25557), True, 'import tensorflow as tf\n'), ((26354, 26396), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/pl_lengths"""', 'pl_lengths'], {}), "('Loss/pl_lengths', pl_lengths)\n", (26365, 26396), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((26709, 26740), 'tensorflow.assign', 'tf.assign', (['pl_mean_var', 'pl_mean'], {}), '(pl_mean_var, pl_mean)\n', (26718, 26740), True, 'import tensorflow as tf\n'), ((2061, 2136), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {}), '([minibatch_size] + [G.input_shapes[0][1] - D_global_size])\n', (2077, 2136), True, 'import tensorflow as tf\n'), ((4242, 4317), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {}), '([minibatch_size] + [G.input_shapes[0][1] - D_global_size])\n', (4258, 4317), True, 'import tensorflow as tf\n'), ((5645, 5667), 'tensorflow.log', 'tf.log', (['(prob_C + 1e-12)'], {}), '(prob_C + 1e-12)\n', (5651, 5667), True, 'import tensorflow as tf\n'), ((6587, 6609), 'tensorflow.log', 'tf.log', (['(prob_D + 1e-12)'], {}), '(prob_D + 1e-12)\n', (6593, 6609), True, 'import tensorflow as tf\n'), ((7751, 7826), 'tensorflow.random.normal', 'tf.random.normal', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {}), '([minibatch_size] + [G.input_shapes[0][1] - D_global_size])\n', (7767, 7826), True, 'import tensorflow as tf\n'), ((8166, 8254), 'tensorflow.random.uniform', 'tf.random.uniform', (['[minibatch_size]'], {'minval': '(0)', 'maxval': 'C_global_size', 'dtype': 'tf.int32'}), '([minibatch_size], minval=0, maxval=C_global_size, dtype=\n tf.int32)\n', (8183, 8254), True, 'import tensorflow as tf\n'), ((8906, 8982), 'tensorflow.random.uniform', 'tf.random.uniform', (['[minibatch_size]'], {'minval': '(0)', 'maxval': 'n_emph', 'dtype': 'tf.int32'}), '([minibatch_size], minval=0, maxval=n_emph, dtype=tf.int32)\n', (8923, 8982), True, 'import tensorflow as tf\n'), ((13549, 13570), 'tensorflow.square', 'tf.square', (['real_grads'], {}), '(real_grads)\n', (13558, 13570), True, 'import tensorflow as tf\n'), ((14330, 14405), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {}), '([minibatch_size] + [G.input_shapes[0][1] - D_global_size])\n', (14346, 14405), True, 'import tensorflow as tf\n'), ((15455, 15476), 'tensorflow.square', 'tf.square', (['real_grads'], {}), '(real_grads)\n', (15464, 15476), True, 'import tensorflow as tf\n'), ((16241, 16316), 'tensorflow.random_normal', 'tf.random_normal', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {}), '([minibatch_size] + [G.input_shapes[0][1] - D_global_size])\n', (16257, 16316), True, 'import tensorflow as tf\n'), ((17372, 17393), 'tensorflow.square', 'tf.square', (['real_grads'], {}), '(real_grads)\n', (17381, 17393), True, 'import tensorflow as tf\n'), ((18490, 18511), 'tensorflow.square', 'tf.square', (['fake_grads'], {}), '(fake_grads)\n', (18499, 18511), True, 'import tensorflow as tf\n'), ((19968, 19994), 'tensorflow.square', 'tf.square', (['real_scores_out'], {}), '(real_scores_out)\n', (19977, 19994), True, 'import tensorflow as tf\n'), ((20984, 21010), 'tensorflow.square', 'tf.square', (['real_scores_out'], {}), '(real_scores_out)\n', (20993, 21010), True, 'import tensorflow as tf\n'), ((21246, 21283), 'tensorflow.cast', 'tf.cast', (['reals', 'fake_images_out.dtype'], {}), '(reals, fake_images_out.dtype)\n', (21253, 21283), True, 'import tensorflow as tf\n'), ((22938, 22994), 'tensorflow.random_normal', 'tf.random_normal', (['([pl_minibatch] + G.input_shapes[0][1:])'], {}), '([pl_minibatch] + G.input_shapes[0][1:])\n', (22954, 22994), True, 'import tensorflow as tf\n'), ((23651, 23680), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['None'], {}), '(None)\n', (23674, 23680), True, 'import tensorflow as tf\n'), ((23708, 23794), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""pl_mean"""', 'trainable': '(False)', 'initial_value': '(0.0)', 'dtype': 'tf.float32'}), "(name='pl_mean', trainable=False, initial_value=0.0, dtype=tf.\n float32)\n", (23719, 23794), True, 'import tensorflow as tf\n'), ((23975, 24011), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[pl_update]'], {}), '([pl_update])\n', (23998, 24011), True, 'import tensorflow as tf\n'), ((24038, 24069), 'tensorflow.square', 'tf.square', (['(pl_lengths - pl_mean)'], {}), '(pl_lengths - pl_mean)\n', (24047, 24069), True, 'import tensorflow as tf\n'), ((24095, 24137), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/pl_penalty"""', 'pl_penalty'], {}), "('Loss/pl_penalty', pl_penalty)\n", (24106, 24137), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((25775, 25831), 'tensorflow.random_normal', 'tf.random_normal', (['([pl_minibatch] + G.input_shapes[0][1:])'], {}), '([pl_minibatch] + G.input_shapes[0][1:])\n', (25791, 25831), True, 'import tensorflow as tf\n'), ((26464, 26493), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['None'], {}), '(None)\n', (26487, 26493), True, 'import tensorflow as tf\n'), ((26521, 26607), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""pl_mean"""', 'trainable': '(False)', 'initial_value': '(0.0)', 'dtype': 'tf.float32'}), "(name='pl_mean', trainable=False, initial_value=0.0, dtype=tf.\n float32)\n", (26532, 26607), True, 'import tensorflow as tf\n'), ((26788, 26824), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[pl_update]'], {}), '([pl_update])\n', (26811, 26824), True, 'import tensorflow as tf\n'), ((26851, 26882), 'tensorflow.square', 'tf.square', (['(pl_lengths - pl_mean)'], {}), '(pl_lengths - pl_mean)\n', (26860, 26882), True, 'import tensorflow as tf\n'), ((26908, 26950), 'dnnlib.tflib.autosummary.autosummary', 'autosummary', (['"""Loss/pl_penalty"""', 'pl_penalty'], {}), "('Loss/pl_penalty', pl_penalty)\n", (26919, 26950), False, 'from dnnlib.tflib.autosummary import autosummary\n'), ((2193, 2282), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {}), '([minibatch_size] + [G.input_shapes[0][1] -\n D_global_size])\n', (2219, 2282), True, 'import tensorflow as tf\n'), ((3152, 3174), 'tensorflow.log', 'tf.log', (['(prob_D + 1e-12)'], {}), '(prob_D + 1e-12)\n', (3158, 3174), True, 'import tensorflow as tf\n'), ((3524, 3545), 'tensorflow.log', 'tf.log', (['(std_C + 1e-12)'], {}), '(std_C + 1e-12)\n', (3530, 3545), True, 'import tensorflow as tf\n'), ((3554, 3572), 'tensorflow.square', 'tf.square', (['epsilon'], {}), '(epsilon)\n', (3563, 3572), True, 'import tensorflow as tf\n'), ((4374, 4463), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {}), '([minibatch_size] + [G.input_shapes[0][1] -\n D_global_size])\n', (4400, 4463), True, 'import tensorflow as tf\n'), ((7883, 7972), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {}), '([minibatch_size] + [G.input_shapes[0][1] -\n D_global_size])\n', (7909, 7972), True, 'import tensorflow as tf\n'), ((8288, 8330), 'tensorflow.one_hot', 'tf.one_hot', (['C_delta_latents', 'C_global_size'], {}), '(C_delta_latents, C_global_size)\n', (8298, 8330), True, 'import tensorflow as tf\n'), ((8415, 8512), 'tensorflow.random.uniform', 'tf.random.uniform', (['[minibatch_size, C_global_size]'], {'minval': '(0)', 'maxval': '(1.0)', 'dtype': 'latents.dtype'}), '([minibatch_size, C_global_size], minval=0, maxval=1.0,\n dtype=latents.dtype)\n', (8432, 8512), True, 'import tensorflow as tf\n'), ((9021, 9056), 'tensorflow.one_hot', 'tf.one_hot', (['C_delta_latents', 'n_emph'], {}), '(C_delta_latents, n_emph)\n', (9031, 9056), True, 'import tensorflow as tf\n'), ((9141, 9232), 'tensorflow.random.uniform', 'tf.random.uniform', (['[minibatch_size, n_emph]'], {'minval': '(0)', 'maxval': '(1.0)', 'dtype': 'latents.dtype'}), '([minibatch_size, n_emph], minval=0, maxval=1.0, dtype=\n latents.dtype)\n', (9158, 9232), True, 'import tensorflow as tf\n'), ((9283, 9333), 'tensorflow.zeros', 'tf.zeros', (['[minibatch_size, C_global_size - n_emph]'], {}), '([minibatch_size, C_global_size - n_emph])\n', (9291, 9333), True, 'import tensorflow as tf\n'), ((9609, 9668), 'tensorflow.random.normal', 'tf.random.normal', (['[minibatch_size, 1]'], {'mean': '(0.0)', 'stddev': '(2.0)'}), '([minibatch_size, 1], mean=0.0, stddev=2.0)\n', (9625, 9668), True, 'import tensorflow as tf\n'), ((10216, 10257), 'tensorflow.zeros', 'tf.zeros', (['[minibatch_size, D_global_size]'], {}), '([minibatch_size, D_global_size])\n', (10224, 10257), True, 'import tensorflow as tf\n'), ((13464, 13494), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['real_scores_out'], {}), '(real_scores_out)\n', (13477, 13494), True, 'import tensorflow as tf\n'), ((14462, 14551), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {}), '([minibatch_size] + [G.input_shapes[0][1] -\n D_global_size])\n', (14488, 14551), True, 'import tensorflow as tf\n'), ((15370, 15400), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['real_scores_out'], {}), '(real_scores_out)\n', (15383, 15400), True, 'import tensorflow as tf\n'), ((16373, 16462), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['([minibatch_size] + [G.input_shapes[0][1] - D_global_size])'], {}), '([minibatch_size] + [G.input_shapes[0][1] -\n D_global_size])\n', (16399, 16462), True, 'import tensorflow as tf\n'), ((17287, 17317), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['real_scores_out'], {}), '(real_scores_out)\n', (17300, 17317), True, 'import tensorflow as tf\n'), ((18395, 18425), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['fake_scores_out'], {}), '(fake_scores_out)\n', (18408, 18425), True, 'import tensorflow as tf\n'), ((21519, 21550), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mixed_scores_out'], {}), '(mixed_scores_out)\n', (21532, 21550), True, 'import tensorflow as tf\n'), ((21619, 21641), 'tensorflow.square', 'tf.square', (['mixed_grads'], {}), '(mixed_grads)\n', (21628, 21641), True, 'import tensorflow as tf\n'), ((23258, 23283), 'tensorflow.shape', 'tf.shape', (['fake_images_out'], {}), '(fake_images_out)\n', (23266, 23283), True, 'import tensorflow as tf\n'), ((23295, 23322), 'numpy.prod', 'np.prod', (['G.output_shape[2:]'], {}), '(G.output_shape[2:])\n', (23302, 23322), True, 'import numpy as np\n'), ((23356, 23397), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(fake_images_out * pl_noise)'], {}), '(fake_images_out * pl_noise)\n', (23369, 23397), True, 'import tensorflow as tf\n'), ((26095, 26120), 'tensorflow.shape', 'tf.shape', (['fake_images_out'], {}), '(fake_images_out)\n', (26103, 26120), True, 'import tensorflow as tf\n'), ((26132, 26159), 'numpy.prod', 'np.prod', (['G.output_shape[2:]'], {}), '(G.output_shape[2:])\n', (26139, 26159), True, 'import numpy as np\n'), ((26193, 26234), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(fake_images_out * pl_noise)'], {}), '(fake_images_out * pl_noise)\n', (26206, 26234), True, 'import tensorflow as tf\n'), ((26303, 26322), 'tensorflow.square', 'tf.square', (['pl_grads'], {}), '(pl_grads)\n', (26312, 26322), True, 'import tensorflow as tf\n'), ((3504, 3521), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3510, 3521), True, 'import numpy as np\n'), ((23481, 23500), 'tensorflow.square', 'tf.square', (['pl_grads'], {}), '(pl_grads)\n', (23490, 23500), True, 'import tensorflow as tf\n'), ((23834, 23860), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pl_lengths'], {}), '(pl_lengths)\n', (23848, 23860), True, 'import tensorflow as tf\n'), ((26647, 26673), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pl_lengths'], {}), '(pl_lengths)\n', (26661, 26673), True, 'import tensorflow as tf\n'), ((5855, 5900), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['regress_out[:, D_global_size:]'], {}), '(regress_out[:, D_global_size:])\n', (5868, 5900), True, 'import tensorflow as tf\n')] |
from skimage import metrics
from torchvision.transforms import InterpolationMode
from torchvision import transforms
import cfg
import os
import numpy as np
import torch
import cv2
from PIL import Image
class Point(object):
def __init__(self, x=0, y=0):
self.x = x
self.y = y
class Line(object):
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
def GetLinePara(line):
line.a = line.p1.y - line.p2.y
line.b = line.p2.x - line.p1.x
line.c = line.p1.x * line.p2.y - line.p2.x * line.p1.y
def GetCrossPoint(l1, l2):
GetLinePara(l1)
GetLinePara(l2)
d = l1.a * l2.b - l2.a * l1.b
p = Point()
p.x = (l1.b * l2.c - l2.b * l1.c)*1.0 / d
p.y = (l1.c * l2.a - l2.c * l1.a)*1.0 / d
return p
def cross_point(quad):
x1, y1 = quad[0]
x2, y2 = quad[1]
x3, y3 = quad[2]
x4, y4 = quad[3]
# x1, y1, x2, y2, x3, y3, x4, y4 = quad
p1 = Point(x1, y1)
p3 = Point(x3, y3)
line1 = Line(p1, p3)
p2 = Point(x2, y2,)
p4 = Point(x4, y4)
line2 = Line(p2, p4)
Pc = GetCrossPoint(line1, line2)
return (Pc.x, Pc.y)
def tensor2jpg(tensor, W, H):
tensor = tensor.detach().squeeze(0).cpu()
tensor = (tensor * 0.5 + 0.5)
jpg_img = transforms.ToPILImage()(tensor)
jpg_img = jpg_img.resize((W, H), Image.BICUBIC)
jpg_img = cv2.cvtColor(np.asarray(jpg_img), cv2.COLOR_RGB2BGR)
return jpg_img
def tensor2jpg_mask(tensor, W, H):
tensor = tensor.detach().squeeze(0).cpu()
jpg_img = transforms.ToPILImage()(tensor)
jpg_img = jpg_img.resize((W, H), Image.BICUBIC)
jpg_img = cv2.cvtColor(np.asarray(jpg_img), cv2.COLOR_RGB2BGR)
return jpg_img
def cal_mse(src, tar):
src = src/255
tar = tar/255
mse = metrics.mean_squared_error(src, tar)
return mse
def cal_psnr(src, tar):
psnr = metrics.peak_signal_noise_ratio(src, tar, data_range=255)
return psnr
def cal_ssim(src, tar):
ssim = metrics.structural_similarity(
src, tar, data_range=255, multichannel=True)
return ssim*100
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
def is_txt_file(filename):
return any(filename.endswith(extension) for extension in [".txt"])
def expand_roi(Poly, img_width=512, img_height=512, ratio=0.2):
c_pts = cross_point(Poly)
(tl, tr, br, bl) = Poly
expand_poly = np.zeros((4, 2), dtype="float32")
for i, coor in enumerate(Poly):
cha_x, cha_y = (coor[0] - c_pts[0]), (coor[1] - c_pts[1])
dis = np.sqrt((cha_x ** 2) + (cha_y ** 2)) * ratio
ang = abs(np.arctan(cha_y / cha_x)) if cha_x != 0 else np.pi/2
exp_coor = coor + (dis * np.cos(ang), dis * np.sin(ang))
exp_coor = coor - (abs(dis*np.cos(ang)), abs(dis*np.sin(ang))
) if cha_y < 0 and cha_x < 0 else exp_coor
exp_coor = coor + (abs(dis*np.cos(ang)), -abs(dis*np.sin(ang))
) if cha_y < 0 and cha_x > 0 else exp_coor
exp_coor = coor + (-abs(dis*np.cos(ang)), abs(dis*np.sin(ang))
) if cha_y >= 0 and cha_x <= 0 else exp_coor
expand_poly[i] = exp_coor
for x, y in expand_poly:
x = 0 if x < 0 else x
y = 0 if y < 0 else y
x = img_width - 1 if x >= img_width else x
y = img_height - 1 if y >= img_height else y
return expand_poly
def cal_Width_Height(Poly):
(tl, tr, br, bl) = Poly
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
mean_Width = (int(widthA)+int(widthB))/2
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
mean_height = (int(heightA)+int(heightB))/2
return maxWidth, maxHeight, mean_Width, mean_height
def four_point_transform(image, pts):
quad = (pts)
(tl, tr, br, bl) = quad
maxWidth, maxHeight, mean_Width, mean_height = cal_Width_Height(quad)
if mean_Width >= mean_height:
Height = maxHeight
else:
quad = (tr, br, bl, tl)
Height = maxWidth
# expand quad
if Height < 10:
R = 1.0
elif Height < 15:
R = 0.8
elif Height < 20:
R = 0.3
else:
R = 0.2
expand_quad = expand_roi(quad, ratio=R)
maxWidth, maxHeight, _, _ = cal_Width_Height(expand_quad)
rect = np.array([[0, 0], [maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype="float32")
M = cv2.getPerspectiveTransform(expand_quad, rect)
warped = cv2.warpPerspective(
image, M, (maxWidth, maxHeight), flags=cv2.INTER_CUBIC)
return warped, expand_quad, rect
def model_prediction(generator, device, input):
H_, W_ = input.shape[0:2]
text_region = Image.fromarray(cv2.cvtColor(
input, cv2.COLOR_BGR2RGB)) # cvmat -> PILimage
resize_H = H_ if H_ <= cfg.data_shape[0] else cfg.data_shape[0]
resize_W = W_ if W_ <= cfg.data_shape[1] else cfg.data_shape[1]
transform_list = [transforms.Resize((resize_H, resize_W), InterpolationMode.BICUBIC), transforms.ToTensor(
), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
transform = transforms.Compose(transform_list)
input = transform(text_region) # to tensor
input_padding = input.new_full(
(3, cfg.data_shape[0], cfg.data_shape[1]), 0) # 128, 640
input_padding[:input.shape[0], :input.shape[1],
:input.shape[2]].copy_(input)
input_padding = input_padding.unsqueeze(0).to(device)
with torch.no_grad():
output_padding, o_mask_padding = generator(input_padding)
output = output_padding[:, :, :input.shape[1], :input.shape[2]]
o_mask = o_mask_padding[:, :, :input.shape[1], :input.shape[2]]
display = torch.cat(((input.unsqueeze(0).clone(
)*0.5+0.5).to(device), o_mask, (output.clone()*0.5+0.5)), 2)
o_mask = tensor2jpg_mask(o_mask, W_, H_)
display = tensor2jpg_mask(display, W_, 3*H_)
output = tensor2jpg(output, W_, H_)
return output, o_mask, display
def comp_back_persp(dst, output, img_height, img_width, rect, expand_poly, poly_pts):
MM = cv2.getPerspectiveTransform(rect, expand_poly)
text_erased_patch = cv2.warpPerspective(
output, MM, (img_width, img_height), flags=cv2.INTER_CUBIC)
mask = np.zeros((img_height, img_width, 1), np.uint8)
poly_pts = poly_pts.astype(int).reshape((-1, 1, 2))
cv2.fillConvexPoly(mask, poly_pts, (255, 255, 255))
kernel = cv2.getStructuringElement(
cv2.MORPH_ELLIPSE, (3, 3)) # cv2.MORPH_RECT
mask = cv2.dilate(mask, kernel)
mask = mask.reshape((img_height, img_width, -1))
dst = np.where(mask > 0, text_erased_patch, dst)
return dst
def inference(generator, device, src_img_dir, src_txt_dir, save_path):
generator.eval()
txt_list = [x for x in os.listdir(src_txt_dir) if is_txt_file(x)]
txt_list.sort(key=lambda f: int("".join(list(filter(str.isdigit, f)))))
for i, gt_txt in enumerate(txt_list[:]): # 773
gt_lines_txt = open(os.path.join(src_txt_dir, gt_txt), mode='r')
gt_lines = gt_lines_txt.readlines()
img_idx = gt_txt.replace('.txt', '').replace(
'gt_', '').replace('res_', '')
print(f'{i}/{len(txt_list)}', img_idx, end='\r')
try:
img = cv2.imread(os.path.join(src_img_dir, img_idx+'.jpg'))
img_clone = img.copy()
except Exception:
img = cv2.imread(os.path.join(src_img_dir, img_idx+'.png'))
img_clone = img.copy()
dst = img.copy()
mask_img = img.copy()
img_height, img_width = img.shape[0:2]
k = 1
for gt in gt_lines: # gt为单行标注
line_parts = gt.strip().split(",")
pts_num = int(len(line_parts)/2)
poly = list(map(int, list(map(float, line_parts[0:pts_num*2]))))
text = list(map(str, line_parts[pts_num*2: len(line_parts)]))
poly_pts = np.array(poly, np.int32)
if pts_num == 4:
four_pts = poly_pts.reshape((-1, 2)).astype(float)
input, expand_poly, rect = four_point_transform(
dst, four_pts)
output, o_mask, display = model_prediction(
generator, device, input)
dst = comp_back_persp(
dst, output, img_height, img_width, rect, expand_poly, poly_pts)
mask_img = comp_back_persp(
mask_img, o_mask, img_height, img_width, rect, expand_poly, poly_pts)
expand_poly = expand_poly.astype(int).reshape((-1, 1, 2))
else:
print('NOT RIGHT BBOX')
# cv2.namedWindow('img_clone', cv2.WINDOW_NORMAL)
# cv2.imshow('img_clone', img_clone)
# cv2.imshow('dst', dst)
# cv2.imshow('mask_img', mask_img)
# windows_name = f'part_{k}'
# cv2.imshow(windows_name, display)
# k += 1
# cv2.waitKey()
# for ii in range(k):
# cv2.destroyWindow(f'part_{ii}')
# save image
cv2.imwrite(os.path.join(save_path, img_idx+'.png'), dst)
def quality_metric(img_path, label_path):
img_list = [x for x in os.listdir(img_path) if is_image_file(x)]
img_list.sort(key=lambda f: int("".join(list(filter(str.isdigit, f)))))
psnr_list = []
ssim_list = []
mse_list = []
for i, img_name in enumerate(img_list[:]):
img = cv2.imread(os.path.join(img_path, img_name))
try:
label = cv2.imread(os.path.join(
label_path, img_name.replace('.png', '.jpg')))
label_clone = label.copy()
except Exception:
label = cv2.imread(os.path.join(
label_path, img_name))
psnr = cal_psnr(img, label)
ssim = cal_ssim(img, label)
mse = cal_mse(img, label)
psnr_list.append(psnr)
ssim_list.append(ssim)
mse_list.append(mse)
ave_psnr = np.mean(psnr_list)
ave_ssim = np.mean(ssim_list)
ave_mse = np.mean(mse_list)
return ave_psnr, ave_ssim, ave_mse
| [
"numpy.sqrt",
"torchvision.transforms.ToPILImage",
"numpy.array",
"cv2.warpPerspective",
"numpy.sin",
"skimage.metrics.peak_signal_noise_ratio",
"numpy.mean",
"os.listdir",
"skimage.metrics.structural_similarity",
"numpy.where",
"numpy.asarray",
"skimage.metrics.mean_squared_error",
"torchvi... | [((1763, 1799), 'skimage.metrics.mean_squared_error', 'metrics.mean_squared_error', (['src', 'tar'], {}), '(src, tar)\n', (1789, 1799), False, 'from skimage import metrics\n'), ((1852, 1909), 'skimage.metrics.peak_signal_noise_ratio', 'metrics.peak_signal_noise_ratio', (['src', 'tar'], {'data_range': '(255)'}), '(src, tar, data_range=255)\n', (1883, 1909), False, 'from skimage import metrics\n'), ((1963, 2037), 'skimage.metrics.structural_similarity', 'metrics.structural_similarity', (['src', 'tar'], {'data_range': '(255)', 'multichannel': '(True)'}), '(src, tar, data_range=255, multichannel=True)\n', (1992, 2037), False, 'from skimage import metrics\n'), ((2428, 2461), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': '"""float32"""'}), "((4, 2), dtype='float32')\n", (2436, 2461), True, 'import numpy as np\n'), ((3505, 3557), 'numpy.sqrt', 'np.sqrt', (['((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)'], {}), '((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)\n', (3512, 3557), True, 'import numpy as np\n'), ((3575, 3627), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)'], {}), '((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)\n', (3582, 3627), True, 'import numpy as np\n'), ((3736, 3788), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)'], {}), '((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)\n', (3743, 3788), True, 'import numpy as np\n'), ((3807, 3859), 'numpy.sqrt', 'np.sqrt', (['((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)'], {}), '((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)\n', (3814, 3859), True, 'import numpy as np\n'), ((4577, 4687), 'numpy.array', 'np.array', (['[[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]]'], {'dtype': '"""float32"""'}), "([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, \n maxHeight - 1]], dtype='float32')\n", (4585, 4687), True, 'import numpy as np\n'), ((4712, 4758), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['expand_quad', 'rect'], {}), '(expand_quad, rect)\n', (4739, 4758), False, 'import cv2\n'), ((4772, 4847), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image', 'M', '(maxWidth, maxHeight)'], {'flags': 'cv2.INTER_CUBIC'}), '(image, M, (maxWidth, maxHeight), flags=cv2.INTER_CUBIC)\n', (4791, 4847), False, 'import cv2\n'), ((5404, 5438), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (5422, 5438), False, 'from torchvision import transforms\n'), ((6387, 6433), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['rect', 'expand_poly'], {}), '(rect, expand_poly)\n', (6414, 6433), False, 'import cv2\n'), ((6458, 6537), 'cv2.warpPerspective', 'cv2.warpPerspective', (['output', 'MM', '(img_width, img_height)'], {'flags': 'cv2.INTER_CUBIC'}), '(output, MM, (img_width, img_height), flags=cv2.INTER_CUBIC)\n', (6477, 6537), False, 'import cv2\n'), ((6558, 6604), 'numpy.zeros', 'np.zeros', (['(img_height, img_width, 1)', 'np.uint8'], {}), '((img_height, img_width, 1), np.uint8)\n', (6566, 6604), True, 'import numpy as np\n'), ((6665, 6716), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['mask', 'poly_pts', '(255, 255, 255)'], {}), '(mask, poly_pts, (255, 255, 255))\n', (6683, 6716), False, 'import cv2\n'), ((6731, 6783), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(3, 3)'], {}), '(cv2.MORPH_ELLIPSE, (3, 3))\n', (6756, 6783), False, 'import cv2\n'), ((6822, 6846), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel'], {}), '(mask, kernel)\n', (6832, 6846), False, 'import cv2\n'), ((6910, 6952), 'numpy.where', 'np.where', (['(mask > 0)', 'text_erased_patch', 'dst'], {}), '(mask > 0, text_erased_patch, dst)\n', (6918, 6952), True, 'import numpy as np\n'), ((10278, 10296), 'numpy.mean', 'np.mean', (['psnr_list'], {}), '(psnr_list)\n', (10285, 10296), True, 'import numpy as np\n'), ((10312, 10330), 'numpy.mean', 'np.mean', (['ssim_list'], {}), '(ssim_list)\n', (10319, 10330), True, 'import numpy as np\n'), ((10345, 10362), 'numpy.mean', 'np.mean', (['mse_list'], {}), '(mse_list)\n', (10352, 10362), True, 'import numpy as np\n'), ((1255, 1278), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1276, 1278), False, 'from torchvision import transforms\n'), ((1366, 1385), 'numpy.asarray', 'np.asarray', (['jpg_img'], {}), '(jpg_img)\n', (1376, 1385), True, 'import numpy as np\n'), ((1522, 1545), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1543, 1545), False, 'from torchvision import transforms\n'), ((1633, 1652), 'numpy.asarray', 'np.asarray', (['jpg_img'], {}), '(jpg_img)\n', (1643, 1652), True, 'import numpy as np\n'), ((5008, 5046), 'cv2.cvtColor', 'cv2.cvtColor', (['input', 'cv2.COLOR_BGR2RGB'], {}), '(input, cv2.COLOR_BGR2RGB)\n', (5020, 5046), False, 'import cv2\n'), ((5236, 5302), 'torchvision.transforms.Resize', 'transforms.Resize', (['(resize_H, resize_W)', 'InterpolationMode.BICUBIC'], {}), '((resize_H, resize_W), InterpolationMode.BICUBIC)\n', (5253, 5302), False, 'from torchvision import transforms\n'), ((5304, 5325), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5323, 5325), False, 'from torchvision import transforms\n'), ((5332, 5386), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (5352, 5386), False, 'from torchvision import transforms\n'), ((5756, 5771), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5769, 5771), False, 'import torch\n'), ((2578, 2610), 'numpy.sqrt', 'np.sqrt', (['(cha_x ** 2 + cha_y ** 2)'], {}), '(cha_x ** 2 + cha_y ** 2)\n', (2585, 2610), True, 'import numpy as np\n'), ((7090, 7113), 'os.listdir', 'os.listdir', (['src_txt_dir'], {}), '(src_txt_dir)\n', (7100, 7113), False, 'import os\n'), ((7290, 7323), 'os.path.join', 'os.path.join', (['src_txt_dir', 'gt_txt'], {}), '(src_txt_dir, gt_txt)\n', (7302, 7323), False, 'import os\n'), ((8203, 8227), 'numpy.array', 'np.array', (['poly', 'np.int32'], {}), '(poly, np.int32)\n', (8211, 8227), True, 'import numpy as np\n'), ((9396, 9437), 'os.path.join', 'os.path.join', (['save_path', "(img_idx + '.png')"], {}), "(save_path, img_idx + '.png')\n", (9408, 9437), False, 'import os\n'), ((9515, 9535), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (9525, 9535), False, 'import os\n'), ((9761, 9793), 'os.path.join', 'os.path.join', (['img_path', 'img_name'], {}), '(img_path, img_name)\n', (9773, 9793), False, 'import os\n'), ((2641, 2665), 'numpy.arctan', 'np.arctan', (['(cha_y / cha_x)'], {}), '(cha_y / cha_x)\n', (2650, 2665), True, 'import numpy as np\n'), ((7575, 7618), 'os.path.join', 'os.path.join', (['src_img_dir', "(img_idx + '.jpg')"], {}), "(src_img_dir, img_idx + '.jpg')\n", (7587, 7618), False, 'import os\n'), ((2727, 2738), 'numpy.cos', 'np.cos', (['ang'], {}), '(ang)\n', (2733, 2738), True, 'import numpy as np\n'), ((2746, 2757), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (2752, 2757), True, 'import numpy as np\n'), ((7708, 7751), 'os.path.join', 'os.path.join', (['src_img_dir', "(img_idx + '.png')"], {}), "(src_img_dir, img_idx + '.png')\n", (7720, 7751), False, 'import os\n'), ((10012, 10046), 'os.path.join', 'os.path.join', (['label_path', 'img_name'], {}), '(label_path, img_name)\n', (10024, 10046), False, 'import os\n'), ((2794, 2805), 'numpy.cos', 'np.cos', (['ang'], {}), '(ang)\n', (2800, 2805), True, 'import numpy as np\n'), ((2816, 2827), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (2822, 2827), True, 'import numpy as np\n'), ((2934, 2945), 'numpy.cos', 'np.cos', (['ang'], {}), '(ang)\n', (2940, 2945), True, 'import numpy as np\n'), ((3098, 3109), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (3104, 3109), True, 'import numpy as np\n'), ((2957, 2968), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (2963, 2968), True, 'import numpy as np\n'), ((3076, 3087), 'numpy.cos', 'np.cos', (['ang'], {}), '(ang)\n', (3082, 3087), True, 'import numpy as np\n')] |
from numpy import fabs, arange
class Integration:
EPS = 1e-6
area_domain = list()
area_codomain = list()
@staticmethod
def rectangles(func, a, b, iterations=180):
delta_x = (b - a) / iterations
x = [a + 0.5 * delta_x]
y = [func(x[0])]
current_area = 0.0
for iteration in range(iterations):
prev_area = current_area
current_area += func(x[iteration]) * delta_x
if fabs(current_area - prev_area) < Integration.EPS:
break
x.append(x[iteration] + delta_x)
y.append(func(x[iteration+1]))
# plt.plot(x, y, 'bs', color='red', label='Rectangles')
return current_area
@staticmethod
def trapezoids(func, a, b, iterations=150):
delta_x = (b - a) / iterations
x = [a + 0.5 * delta_x]
y = [func(x[0])]
current_area = 0.0
for iteration in range(iterations):
current_area += func(x[iteration])
x.append(x[iteration] + delta_x)
y.append(func(x[iteration + 1]))
# plt.plot(x, y, 'g^', color='yellow', label='Rectangles')
return delta_x * ((func(a) + func(b)) / 2 + current_area)
@staticmethod
def simpson(func, a, b, iterations=50):
delta_x = (b - a) / iterations
Integration.area_domain.extend(arange(a, b, delta_x))
x = a
current_area = func(a) + func(b)
for iteration in range(iterations):
x += delta_x
if iteration % 2:
current_area += 2.0 * func(x)
else:
current_area += 4.0 * func(x)
Integration.area_codomain.append((delta_x / 3) * current_area)
return (delta_x/3.0)*current_area
| [
"numpy.fabs",
"numpy.arange"
] | [((1360, 1381), 'numpy.arange', 'arange', (['a', 'b', 'delta_x'], {}), '(a, b, delta_x)\n', (1366, 1381), False, 'from numpy import fabs, arange\n'), ((463, 493), 'numpy.fabs', 'fabs', (['(current_area - prev_area)'], {}), '(current_area - prev_area)\n', (467, 493), False, 'from numpy import fabs, arange\n')] |
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Tuple, List, Optional
import matplotlib
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from pydub import AudioSegment
from wav_steganography.wav_file import WAVFile
audio_files = Path(__file__).parent.parent / "audio"
all_audio_files = {}
for glob in ["*.wav", "1min_files/*.wav"]:
for curr_path in audio_files.glob(glob):
all_audio_files[curr_path.stem] = curr_path
def find_matching_audio_file(substring_of_filename: str) -> Path:
for stem, path in all_audio_files.items():
if substring_of_filename in stem:
return path
def convert_to_file_format_and_back(file_path, bitrate=None, file_format="mp3") -> Tuple[WAVFile, WAVFile]:
with TemporaryDirectory() as tmp_dir:
audio_file = AudioSegment.from_file(file_path)
mp3_file_path = Path(tmp_dir) / f"converted.{file_format}"
audio_file.export(mp3_file_path, format=file_format, bitrate=bitrate)
mp3_file = AudioSegment.from_file(mp3_file_path)
mp3_file.export(mp3_file_path.with_suffix(".wav"), format="wav")
pre_conversion = WAVFile(file_path)
after_conversion = WAVFile(mp3_file_path.with_suffix(".wav"))
return pre_conversion, after_conversion
def comparison_pre_and_after_mp3_conversion(file_path, bitrate=None, print_=False) -> Optional[List[float]]:
pre_conversion, after_conversion = convert_to_file_format_and_back(file_path, bitrate)
pre_data = pre_conversion.data
after_data = after_conversion.data
total = len(pre_data)
percentages = []
if len(pre_data) != len(after_data):
print(f"Shape mismatch pre-conversion: {len(pre_data)} with post-conversion: {len(after_data)}, skipping!")
return None
print(f"Average difference (bitrate={bitrate}): {np.average(np.abs(pre_data - after_data)):.1f}")
for bit in range(16):
power = 1 << bit
correct = np.sum(pre_data & power == after_data & power)
percent = correct / total
percentages.append(percent)
if print_:
print(f"Bit {bit + 1} ({power}): {correct:,d} are the same out of {total:,d} ({percent:.1%})")
return percentages
def plot_bit_percentages_for_file(curr_file_path: Path, show=False):
if curr_file_path:
figure_path = Path(__file__).parent / "figures" / curr_file_path.with_suffix(".png").name
print(f"Saving figure {figure_path}")
possible_bitrates = ["64k", "92k", "128k", "256k", "312k"]
data = {}
for bitrate in possible_bitrates:
percentages = comparison_pre_and_after_mp3_conversion(curr_file_path, bitrate=bitrate)
if percentages is None:
return
percentages.reverse()
data[bitrate] = percentages
dataframe = pd.DataFrame.from_dict(data, orient="index", columns=range(16, 0, -1))
matplotlib.rcParams["font.size"] = "5"
plt.imshow(dataframe, vmin=0.5, vmax=1)
for (j, i), label in np.ndenumerate(dataframe.round(2)):
plt.text(i, j, label, ha='center', va='center')
plt.text(i, j, label, ha='center', va='center')
plt.xticks(*zip(*enumerate(dataframe.columns)))
plt.xlabel("bit")
plt.yticks(*zip(*enumerate(dataframe.index.values)))
plt.ylabel("MP3 bitrate")
plt.title(f"Comparison of Equal Bits in WAV -> MP3 -> WAV Conversion for File '{curr_file_path.name}'")
# f"The numbers show the percentage of equal bits pre/post comparison. "
# f"1.0 means all bits are the same, 0.5 means that it is essentially random."
print(dataframe)
plt.tight_layout()
figure_path.parent.mkdir(exist_ok=True)
plt.savefig(figure_path, dpi=300, pad_inches=0)
if show:
plt.show()
plt.close()
def main():
for path in all_audio_files.values():
plot_bit_percentages_for_file(path)
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.imshow",
"tempfile.TemporaryDirectory",
"matplotlib.pyplot.text",
"numpy.abs",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"wav_steganography.wav_file.WAVFile",
"numpy.sum",
"pydub.AudioSegment.from_file",
"matplotlib.... | [((796, 816), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (814, 816), False, 'from tempfile import TemporaryDirectory\n'), ((850, 883), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['file_path'], {}), '(file_path)\n', (872, 883), False, 'from pydub import AudioSegment\n'), ((1049, 1086), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['mp3_file_path'], {}), '(mp3_file_path)\n', (1071, 1086), False, 'from pydub import AudioSegment\n'), ((1186, 1204), 'wav_steganography.wav_file.WAVFile', 'WAVFile', (['file_path'], {}), '(file_path)\n', (1193, 1204), False, 'from wav_steganography.wav_file import WAVFile\n'), ((1997, 2043), 'numpy.sum', 'np.sum', (['(pre_data & power == after_data & power)'], {}), '(pre_data & power == after_data & power)\n', (2003, 2043), True, 'import numpy as np\n'), ((3006, 3045), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dataframe'], {'vmin': '(0.5)', 'vmax': '(1)'}), '(dataframe, vmin=0.5, vmax=1)\n', (3016, 3045), True, 'from matplotlib import pyplot as plt\n'), ((3295, 3312), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""bit"""'], {}), "('bit')\n", (3305, 3312), True, 'from matplotlib import pyplot as plt\n'), ((3382, 3407), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MP3 bitrate"""'], {}), "('MP3 bitrate')\n", (3392, 3407), True, 'from matplotlib import pyplot as plt\n'), ((3416, 3529), 'matplotlib.pyplot.title', 'plt.title', (['f"""Comparison of Equal Bits in WAV -> MP3 -> WAV Conversion for File \'{curr_file_path.name}\'"""'], {}), '(\n f"Comparison of Equal Bits in WAV -> MP3 -> WAV Conversion for File \'{curr_file_path.name}\'"\n )\n', (3425, 3529), True, 'from matplotlib import pyplot as plt\n'), ((3721, 3739), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3737, 3739), True, 'from matplotlib import pyplot as plt\n'), ((3796, 3843), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_path'], {'dpi': '(300)', 'pad_inches': '(0)'}), '(figure_path, dpi=300, pad_inches=0)\n', (3807, 3843), True, 'from matplotlib import pyplot as plt\n'), ((3892, 3903), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3901, 3903), True, 'from matplotlib import pyplot as plt\n'), ((295, 309), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (299, 309), False, 'from pathlib import Path\n'), ((908, 921), 'pathlib.Path', 'Path', (['tmp_dir'], {}), '(tmp_dir)\n', (912, 921), False, 'from pathlib import Path\n'), ((3123, 3170), 'matplotlib.pyplot.text', 'plt.text', (['i', 'j', 'label'], {'ha': '"""center"""', 'va': '"""center"""'}), "(i, j, label, ha='center', va='center')\n", (3131, 3170), True, 'from matplotlib import pyplot as plt\n'), ((3183, 3230), 'matplotlib.pyplot.text', 'plt.text', (['i', 'j', 'label'], {'ha': '"""center"""', 'va': '"""center"""'}), "(i, j, label, ha='center', va='center')\n", (3191, 3230), True, 'from matplotlib import pyplot as plt\n'), ((3873, 3883), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3881, 3883), True, 'from matplotlib import pyplot as plt\n'), ((1889, 1918), 'numpy.abs', 'np.abs', (['(pre_data - after_data)'], {}), '(pre_data - after_data)\n', (1895, 1918), True, 'import numpy as np\n'), ((2379, 2393), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2383, 2393), False, 'from pathlib import Path\n')] |
import numpy as np
import scipy.special as ss
import scipy.signal as ss2
import scipy
from numpy import abs, sin, cos, real, exp, pi, sqrt
def psi_s(z, x, beta):
"""
2D longitudinal potential
Eq. (23) from Ref[1] with no constant factor (e*beta**2/2/rho**2).
Ref[1]: <NAME> and <NAME>, PRAB 23, 014402 (2020).
Note that 'x' here corresponds to 'chi = x / rho' in the paper.
"""
#try:
out = (cos(2 * alpha(z, x, beta)) - 1 / (1+x)) / (
kappa(z, x, beta) - beta * (1+x) * sin(2*alpha(z, x, beta)))
#except ZeroDivisionError:
# out = 0
# print(f"Oops! ZeroDivisionError at (z,x)= ({z:5.2f},{x:5.2f}). Returning 0.")
return np.nan_to_num(out)
def psi_x_where_x_equals_zero(z, dx, beta):
"""
Evaluate psi_x close to x = 0
This is a rough approximation of the singularity across x = 0
"""
return (psi_x(z, -dx/2, beta) + psi_x(z, dx/2, beta))/2
@np.vectorize
def ss_ellipf(phi, m):
y = ss.ellipkinc(phi, m)
# y = np.float(y)
return y
@np.vectorize
def ss_ellipe(phi, m):
y = ss.ellipeinc(phi, m)
# y = np.float(y)
return y
def psi_x(z, x, beta):
"""
Eq.(24) from Ref[1] with argument zeta=0 and no constant factor e*beta**2/2/rho**2.
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
# z = np.float(z)
# x = np.float(x)
kap = kappa(z, x, beta)
alp = alpha(z, x, beta)
arg2 = -4 * (1+x) / x**2
try:
T1 = (1/abs(x)/(1 + x) * ((2 + 2*x + x**2) * ss.ellipkinc(alp, arg2)- x**2 * ss.ellipeinc(alp, arg2)))
D = kap**2 - beta**2 * (1 + x)**2 * sin(2*alp)**2
T2 = ((kap**2 - 2*beta** 2 * (1+x)**2 + beta**2 * (1+x) * (2 + 2*x + x**2) * cos(2*alp))/ beta/ (1+x)/ D)
T3 = -kap * sin(2 * alp) / D
T4 = kap * beta ** 2 * (1 + x) * sin(2 * alp) * cos(2 * alp) / D
T5 = 1 / abs(x) * ss.ellipkinc(alp, arg2) # psi_phi without e/rho**2 factor
out = real((T1 + T2 + T3 + T4) - 2 / beta ** 2 * T5)
except ZeroDivisionError:
out = 0
# print(f"Oops! ZeroDivisionError at (z,x)= ({z:5.2f},{x:5.2f}). Returning 0.")
return np.nan_to_num(out)
def nu(x, beta):
"""
Eq. (6) from Ref[1] (coeffient of alpha**2)
Note that 'x' here corresponds to 'chi = x/rho' in the paper.
"""
return 3 * (1 - beta**2 - beta**2*x) / beta**2 / (1+x)
def eta(z, x, beta):
"""
Eq. (6) from Ref[1] (coeffient of alpha)
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
return -6 * z / beta**2 / (1+x)
def zeta(z, x, beta):
"""
Eq. (6) from Ref[1] (constant term)
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
return 3 * (4* z**2 - beta**2 * x**2) / 4 / beta**2 / (1+x)
def Omega(z, x, beta):
"""
Eq. (A3) from Ref[1]
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
temp = (eta(z, x, beta)**2/16
- zeta(z, x, beta) * nu(x, beta)/6
+ nu(x, beta)**3/216)
return temp + (temp**2 - (zeta(z, x, beta)/3 + nu(x, beta)**2/36)**3)**(1/2)
def m(z, x, beta):
"""
Eq. (A2) from Ref[1]
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
return (-nu(x, beta)/3
+ (zeta(z, x, beta)/3 + nu(x, beta)**2/36) * Omega(z, x, beta)**(-1/3)
+ Omega(z, x, beta)**(1/3))
def alpha_where_z_equals_zero(x, beta):
"""
Evaluate alpha(z,x) when z is zero.
Eq. (24) from Ref[1] simplifies to a quadratic equation for alpha^2.
"""
b = nu(x,beta)
c = -3*(beta**2 * x**2)/4/beta**2/(1+x)
root1 = (-b + sqrt(b**2 - 4*c))/2
# root2 = (-b - sqrt(b**2 - 4*c))/2
# since b>0, root2 is always negative and discarded
return sqrt(root1)
def alpha_where_z_not_zero(z, x, beta):
"""
Eq. (A4) from Ref[1]
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
arg1 = sqrt(2 * abs(m(z, x, beta)))
arg2 = -2 * (m(z, x, beta) + nu(x, beta))
arg3 = 2 * eta(z, x, beta) / arg1
zsign=np.sign(z)
return np.real(1 / 2 * (zsign*arg1 + sqrt(abs(arg2 -zsign*arg3))))
def alpha_old(z, x, beta):
"""
Eq. (A4) from Ref[1]
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
#return np.where(z==0, alpha_where_z_equals_zero(x, beta), alpha_where_z_not_zero(z, x, beta) )
out = np.empty(x.shape)
z_is_zero = z == 0
ix1 = np.where(z_is_zero)
ix2 = np.where(~z_is_zero)
out[ix1] = alpha_where_z_equals_zero(x[ix1], beta[ix1])
out[ix2] = alpha_where_z_not_zero(z[ix2], x[ix2], beta[ix2])
#print('ix1:', ix1)
#print('ix2:', ix2)
return out
def alpha(z, x, beta):
on_x_axis = z == 0
# Check for scalar, then return the normal functions
if not isinstance(z, np.ndarray):
if on_x_axis:
return alpha_where_z_equals_zero(x, beta)
else:
return alpha_where_z_not_zero(z, x, beta)
# Array z
out = np.empty(z.shape)
ix1 = np.where(on_x_axis)
ix2 = np.where(~on_x_axis)
if len(ix1)==0:
print('ix1:', ix1)
print(z)
# Check for arrays
if isinstance(x, np.ndarray):
x1 = x[ix1]
x2 = x[ix2]
else:
x1 = x
x2 = x
if isinstance(beta, np.ndarray):
beta1 = beta[ix1]
beta2 = beta[ix2]
else:
beta1 = beta
beta2 = beta
out[ix1] = alpha_where_z_equals_zero(x1, beta1)
out[ix2] = alpha_where_z_not_zero(z[ix2], x2, beta2)
return out
@np.vectorize
def alpha_exact(z, x, beta):
"""
Exact alpha calculation using numerical root finding.
For testing only!
Eq. (23) from Ref[1]
"""
f = lambda a: a - beta/2*sqrt(x**2 + 4*(1+x)*np.sin(a)**2 ) - z
res = scipy.optimize.root_scalar(f, bracket=(-1,1))
return res.root
def kappa(z, x, beta):
"""
Eq. (13) from Ref[1] with argumaent zeta = 0.
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
return (x**2 + 4*(1+x) * sin(alpha(z, x, beta))**2)**(1/2)
### Functions below are obsolete
def lambda_p_Gauss(z, x):
"""
The z derivative of a 2D Gaussian G(z,x)
"""
sigmaz = 10e-6
sigmax = 10e-6
return (
1/(2*pi*sigmaz*sigmax)
* exp(-x**2 / 2 / sigmax**2)
* exp(-z**2 / 2 / sigmaz**2)
* (-z / sigmaz**2))
def make_2dgrid(func, zmin, zmax, dz, xmin, xmax, dx):
"""
Make a 2D grid of a function
"""
zvec = np.arange(zmin, zmax, dz)
xvec = np.arange(xmin, xmax, dx)
list2d = [[func(i, j) for j in xvec] for i in zvec]
return np.array(list2d, dtype=float)
def WsOld(gamma, rho, sigmaz, sigmax, dz, dx):
"""
Apply 2D convolution to compute the longitudinal wake Ws on a grid
Also returns the zvec and xvec which define the grid
Still needs to improve the convolution step
"""
beta = (1 - 1 / gamma ** 2) ** (1 / 2)
zvec = np.arange(-5 * sigmaz, 5 * sigmaz, dz)
xvec = np.arange(-5 * sigmax, 5 * sigmax, dx)
lambdap_list = [[lambda_p_Gauss(i, j) for j in xvec] for i in zvec]
lambdap_grid = np.array(lambdap_list, dtype=float)
zvec2 = np.arange(-10 * sigmaz, 10 * sigmaz, dz)
xvec2 = np.arange(-10 * sigmax, 10 * sigmax, dx)
psi_s_list = [[psi_s(i / 2 / rho, j, beta) for j in xvec2] for i in zvec2]
psi_s_grid = np.array(psi_s_list, dtype=float)
conv_s = ss2.convolve2d(
lambdap_grid, psi_s_grid, mode="same", boundary="fill", fillvalue=0
)
WsConv = beta ** 2 / rho * conv_s * (dz) * (dx)
return zvec, xvec, WsConv
def WxOld(gamma, rho, sigmaz, sigmax, dz, dx):
"""
Apply 2D convolution to compute the transverse wake Wx on a grid
Also returns the zvec and xvec which define the grid
Still needs to improve the convolution step
"""
beta = (1 - 1 / gamma ** 2) ** (1 / 2)
zvec = np.arange(-5 * sigmaz, 5 * sigmaz, dz)
xvec = np.arange(-5 * sigmax, 5 * sigmax, dx)
lambdap_list = [[lambda_p_Gauss(i, j) for j in xvec] for i in zvec]
lambdap_grid = np.array(lambdap_list, dtype=float)
zvec2 = np.arange(-10 * sigmaz, 10 * sigmaz, dz)
xvec2 = np.arange(-10 * sigmax, 10 * sigmax, dx)
psi_x_list = [[psi_x(i / 2 / rho, j, beta) for j in xvec2] for i in zvec2]
psi_x_grid = np.array(psi_x_list, dtype=float)
conv_x = ss2.convolve2d(
lambdap_grid, psi_x_grid, mode="same", boundary="fill", fillvalue=0
)
WxConv = beta ** 2 / rho * conv_x * (dz) * (dx)
return zvec, xvec, WxConv
| [
"scipy.special.ellipeinc",
"scipy.signal.convolve2d",
"scipy.optimize.root_scalar",
"numpy.abs",
"numpy.sqrt",
"numpy.arange",
"numpy.where",
"numpy.exp",
"numpy.array",
"numpy.real",
"scipy.special.ellipkinc",
"numpy.sign",
"numpy.empty",
"numpy.cos",
"numpy.sin",
"numpy.nan_to_num"
] | [((691, 709), 'numpy.nan_to_num', 'np.nan_to_num', (['out'], {}), '(out)\n', (704, 709), True, 'import numpy as np\n'), ((989, 1009), 'scipy.special.ellipkinc', 'ss.ellipkinc', (['phi', 'm'], {}), '(phi, m)\n', (1001, 1009), True, 'import scipy.special as ss\n'), ((1092, 1112), 'scipy.special.ellipeinc', 'ss.ellipeinc', (['phi', 'm'], {}), '(phi, m)\n', (1104, 1112), True, 'import scipy.special as ss\n'), ((2216, 2234), 'numpy.nan_to_num', 'np.nan_to_num', (['out'], {}), '(out)\n', (2229, 2234), True, 'import numpy as np\n'), ((4016, 4027), 'numpy.sqrt', 'sqrt', (['root1'], {}), '(root1)\n', (4020, 4027), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((4367, 4377), 'numpy.sign', 'np.sign', (['z'], {}), '(z)\n', (4374, 4377), True, 'import numpy as np\n'), ((4750, 4767), 'numpy.empty', 'np.empty', (['x.shape'], {}), '(x.shape)\n', (4758, 4767), True, 'import numpy as np\n'), ((4801, 4820), 'numpy.where', 'np.where', (['z_is_zero'], {}), '(z_is_zero)\n', (4809, 4820), True, 'import numpy as np\n'), ((4831, 4851), 'numpy.where', 'np.where', (['(~z_is_zero)'], {}), '(~z_is_zero)\n', (4839, 4851), True, 'import numpy as np\n'), ((5360, 5377), 'numpy.empty', 'np.empty', (['z.shape'], {}), '(z.shape)\n', (5368, 5377), True, 'import numpy as np\n'), ((5388, 5407), 'numpy.where', 'np.where', (['on_x_axis'], {}), '(on_x_axis)\n', (5396, 5407), True, 'import numpy as np\n'), ((5418, 5438), 'numpy.where', 'np.where', (['(~on_x_axis)'], {}), '(~on_x_axis)\n', (5426, 5438), True, 'import numpy as np\n'), ((6176, 6222), 'scipy.optimize.root_scalar', 'scipy.optimize.root_scalar', (['f'], {'bracket': '(-1, 1)'}), '(f, bracket=(-1, 1))\n', (6202, 6222), False, 'import scipy\n'), ((6942, 6967), 'numpy.arange', 'np.arange', (['zmin', 'zmax', 'dz'], {}), '(zmin, zmax, dz)\n', (6951, 6967), True, 'import numpy as np\n'), ((6979, 7004), 'numpy.arange', 'np.arange', (['xmin', 'xmax', 'dx'], {}), '(xmin, xmax, dx)\n', (6988, 7004), True, 'import numpy as np\n'), ((7072, 7101), 'numpy.array', 'np.array', (['list2d'], {'dtype': 'float'}), '(list2d, dtype=float)\n', (7080, 7101), True, 'import numpy as np\n'), ((7404, 7442), 'numpy.arange', 'np.arange', (['(-5 * sigmaz)', '(5 * sigmaz)', 'dz'], {}), '(-5 * sigmaz, 5 * sigmaz, dz)\n', (7413, 7442), True, 'import numpy as np\n'), ((7454, 7492), 'numpy.arange', 'np.arange', (['(-5 * sigmax)', '(5 * sigmax)', 'dx'], {}), '(-5 * sigmax, 5 * sigmax, dx)\n', (7463, 7492), True, 'import numpy as np\n'), ((7584, 7619), 'numpy.array', 'np.array', (['lambdap_list'], {'dtype': 'float'}), '(lambdap_list, dtype=float)\n', (7592, 7619), True, 'import numpy as np\n'), ((7633, 7673), 'numpy.arange', 'np.arange', (['(-10 * sigmaz)', '(10 * sigmaz)', 'dz'], {}), '(-10 * sigmaz, 10 * sigmaz, dz)\n', (7642, 7673), True, 'import numpy as np\n'), ((7686, 7726), 'numpy.arange', 'np.arange', (['(-10 * sigmax)', '(10 * sigmax)', 'dx'], {}), '(-10 * sigmax, 10 * sigmax, dx)\n', (7695, 7726), True, 'import numpy as np\n'), ((7823, 7856), 'numpy.array', 'np.array', (['psi_s_list'], {'dtype': 'float'}), '(psi_s_list, dtype=float)\n', (7831, 7856), True, 'import numpy as np\n'), ((7871, 7958), 'scipy.signal.convolve2d', 'ss2.convolve2d', (['lambdap_grid', 'psi_s_grid'], {'mode': '"""same"""', 'boundary': '"""fill"""', 'fillvalue': '(0)'}), "(lambdap_grid, psi_s_grid, mode='same', boundary='fill',\n fillvalue=0)\n", (7885, 7958), True, 'import scipy.signal as ss2\n'), ((8351, 8389), 'numpy.arange', 'np.arange', (['(-5 * sigmaz)', '(5 * sigmaz)', 'dz'], {}), '(-5 * sigmaz, 5 * sigmaz, dz)\n', (8360, 8389), True, 'import numpy as np\n'), ((8401, 8439), 'numpy.arange', 'np.arange', (['(-5 * sigmax)', '(5 * sigmax)', 'dx'], {}), '(-5 * sigmax, 5 * sigmax, dx)\n', (8410, 8439), True, 'import numpy as np\n'), ((8531, 8566), 'numpy.array', 'np.array', (['lambdap_list'], {'dtype': 'float'}), '(lambdap_list, dtype=float)\n', (8539, 8566), True, 'import numpy as np\n'), ((8580, 8620), 'numpy.arange', 'np.arange', (['(-10 * sigmaz)', '(10 * sigmaz)', 'dz'], {}), '(-10 * sigmaz, 10 * sigmaz, dz)\n', (8589, 8620), True, 'import numpy as np\n'), ((8633, 8673), 'numpy.arange', 'np.arange', (['(-10 * sigmax)', '(10 * sigmax)', 'dx'], {}), '(-10 * sigmax, 10 * sigmax, dx)\n', (8642, 8673), True, 'import numpy as np\n'), ((8770, 8803), 'numpy.array', 'np.array', (['psi_x_list'], {'dtype': 'float'}), '(psi_x_list, dtype=float)\n', (8778, 8803), True, 'import numpy as np\n'), ((8818, 8905), 'scipy.signal.convolve2d', 'ss2.convolve2d', (['lambdap_grid', 'psi_x_grid'], {'mode': '"""same"""', 'boundary': '"""fill"""', 'fillvalue': '(0)'}), "(lambdap_grid, psi_x_grid, mode='same', boundary='fill',\n fillvalue=0)\n", (8832, 8905), True, 'import scipy.signal as ss2\n'), ((2023, 2067), 'numpy.real', 'real', (['(T1 + T2 + T3 + T4 - 2 / beta ** 2 * T5)'], {}), '(T1 + T2 + T3 + T4 - 2 / beta ** 2 * T5)\n', (2027, 2067), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((1950, 1973), 'scipy.special.ellipkinc', 'ss.ellipkinc', (['alp', 'arg2'], {}), '(alp, arg2)\n', (1962, 1973), True, 'import scipy.special as ss\n'), ((3881, 3901), 'numpy.sqrt', 'sqrt', (['(b ** 2 - 4 * c)'], {}), '(b ** 2 - 4 * c)\n', (3885, 3901), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((6770, 6800), 'numpy.exp', 'exp', (['(-z ** 2 / 2 / sigmaz ** 2)'], {}), '(-z ** 2 / 2 / sigmaz ** 2)\n', (6773, 6800), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((1834, 1846), 'numpy.sin', 'sin', (['(2 * alp)'], {}), '(2 * alp)\n', (1837, 1846), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((1907, 1919), 'numpy.cos', 'cos', (['(2 * alp)'], {}), '(2 * alp)\n', (1910, 1919), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((1941, 1947), 'numpy.abs', 'abs', (['x'], {}), '(x)\n', (1944, 1947), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((6733, 6763), 'numpy.exp', 'exp', (['(-x ** 2 / 2 / sigmax ** 2)'], {}), '(-x ** 2 / 2 / sigmax ** 2)\n', (6736, 6763), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((1547, 1553), 'numpy.abs', 'abs', (['x'], {}), '(x)\n', (1550, 1553), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((1584, 1607), 'scipy.special.ellipkinc', 'ss.ellipkinc', (['alp', 'arg2'], {}), '(alp, arg2)\n', (1596, 1607), True, 'import scipy.special as ss\n'), ((1616, 1639), 'scipy.special.ellipeinc', 'ss.ellipeinc', (['alp', 'arg2'], {}), '(alp, arg2)\n', (1628, 1639), True, 'import scipy.special as ss\n'), ((1686, 1698), 'numpy.sin', 'sin', (['(2 * alp)'], {}), '(2 * alp)\n', (1689, 1698), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((1892, 1904), 'numpy.sin', 'sin', (['(2 * alp)'], {}), '(2 * alp)\n', (1895, 1904), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((4429, 4453), 'numpy.abs', 'abs', (['(arg2 - zsign * arg3)'], {}), '(arg2 - zsign * arg3)\n', (4432, 4453), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((1785, 1797), 'numpy.cos', 'cos', (['(2 * alp)'], {}), '(2 * alp)\n', (1788, 1797), False, 'from numpy import abs, sin, cos, real, exp, pi, sqrt\n'), ((6142, 6151), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (6148, 6151), True, 'import numpy as np\n')] |
import os
import numpy as np
import scipy.signal
import torch
from matplotlib import pyplot as plt
def triplet_loss(alpha = 0.2):
def _triplet_loss(y_pred,Batch_size):
anchor, positive, negative = y_pred[:int(Batch_size)], y_pred[int(Batch_size):int(2*Batch_size)], y_pred[int(2*Batch_size):]
pos_dist = torch.sqrt(torch.sum(torch.pow(anchor - positive,2), axis=-1))
neg_dist = torch.sqrt(torch.sum(torch.pow(anchor - negative,2), axis=-1))
keep_all = (neg_dist - pos_dist < alpha).cpu().numpy().flatten()
hard_triplets = np.where(keep_all == 1)
pos_dist = pos_dist[hard_triplets].cuda()
neg_dist = neg_dist[hard_triplets].cuda()
basic_loss = pos_dist - neg_dist + alpha
loss = torch.sum(basic_loss)/torch.max(torch.tensor(1),torch.tensor(len(hard_triplets[0])))
return loss
return _triplet_loss
def weights_init(net, init_type='normal', init_gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and classname.find('Conv') != -1:
if init_type == 'normal':
torch.nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
print('initialize network with %s type' % init_type)
net.apply(init_func)
class LossHistory():
def __init__(self, log_dir):
import datetime
curr_time = datetime.datetime.now()
time_str = datetime.datetime.strftime(curr_time,'%Y_%m_%d_%H_%M_%S')
self.log_dir = log_dir
self.time_str = time_str
self.save_path = os.path.join(self.log_dir, "loss_" + str(self.time_str))
self.acc = []
self.losses = []
self.val_loss = []
os.makedirs(self.save_path)
def append_loss(self, acc, loss, val_loss):
self.acc.append(acc)
self.losses.append(loss)
self.val_loss.append(val_loss)
with open(os.path.join(self.save_path, "epoch_acc_" + str(self.time_str) + ".txt"), 'a') as f:
f.write(str(acc))
f.write("\n")
with open(os.path.join(self.save_path, "epoch_loss_" + str(self.time_str) + ".txt"), 'a') as f:
f.write(str(loss))
f.write("\n")
with open(os.path.join(self.save_path, "epoch_val_loss_" + str(self.time_str) + ".txt"), 'a') as f:
f.write(str(val_loss))
f.write("\n")
self.loss_plot()
def loss_plot(self):
iters = range(len(self.losses))
plt.figure()
plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')
plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')
try:
if len(self.losses) < 25:
num = 5
else:
num = 15
plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')
plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')
except:
pass
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc="upper right")
plt.savefig(os.path.join(self.save_path, "epoch_loss_" + str(self.time_str) + ".png"))
plt.cla()
plt.close("all")
plt.figure()
plt.plot(iters, self.acc, 'red', linewidth = 2, label='lfw acc')
try:
if len(self.losses) < 25:
num = 5
else:
num = 15
plt.plot(iters, scipy.signal.savgol_filter(self.acc, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth lfw acc')
except:
pass
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Lfw Acc')
plt.legend(loc="upper right")
plt.savefig(os.path.join(self.save_path, "epoch_acc_" + str(self.time_str) + ".png"))
plt.cla()
plt.close("all")
| [
"matplotlib.pyplot.grid",
"torch.nn.init.constant_",
"matplotlib.pyplot.ylabel",
"torch.pow",
"torch.nn.init.xavier_normal_",
"torch.nn.init.orthogonal_",
"torch.sum",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torch.nn.init.kaiming_normal_",
"matplotlib.pyplot.close... | [((595, 618), 'numpy.where', 'np.where', (['(keep_all == 1)'], {}), '(keep_all == 1)\n', (603, 618), True, 'import numpy as np\n'), ((2069, 2092), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2090, 2092), False, 'import datetime\n'), ((2113, 2171), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['curr_time', '"""%Y_%m_%d_%H_%M_%S"""'], {}), "(curr_time, '%Y_%m_%d_%H_%M_%S')\n", (2139, 2171), False, 'import datetime\n'), ((2435, 2462), 'os.makedirs', 'os.makedirs', (['self.save_path'], {}), '(self.save_path)\n', (2446, 2462), False, 'import os\n'), ((3222, 3234), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3232, 3234), True, 'from matplotlib import pyplot as plt\n'), ((3244, 3312), 'matplotlib.pyplot.plot', 'plt.plot', (['iters', 'self.losses', '"""red"""'], {'linewidth': '(2)', 'label': '"""train loss"""'}), "(iters, self.losses, 'red', linewidth=2, label='train loss')\n", (3252, 3312), True, 'from matplotlib import pyplot as plt\n'), ((3324, 3394), 'matplotlib.pyplot.plot', 'plt.plot', (['iters', 'self.val_loss', '"""coral"""'], {'linewidth': '(2)', 'label': '"""val loss"""'}), "(iters, self.val_loss, 'coral', linewidth=2, label='val loss')\n", (3332, 3394), True, 'from matplotlib import pyplot as plt\n'), ((3876, 3890), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3884, 3890), True, 'from matplotlib import pyplot as plt\n'), ((3900, 3919), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (3910, 3919), True, 'from matplotlib import pyplot as plt\n'), ((3929, 3947), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3939, 3947), True, 'from matplotlib import pyplot as plt\n'), ((3957, 3986), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3967, 3986), True, 'from matplotlib import pyplot as plt\n'), ((4096, 4105), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4103, 4105), True, 'from matplotlib import pyplot as plt\n'), ((4115, 4131), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4124, 4131), True, 'from matplotlib import pyplot as plt\n'), ((4143, 4155), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4153, 4155), True, 'from matplotlib import pyplot as plt\n'), ((4165, 4227), 'matplotlib.pyplot.plot', 'plt.plot', (['iters', 'self.acc', '"""red"""'], {'linewidth': '(2)', 'label': '"""lfw acc"""'}), "(iters, self.acc, 'red', linewidth=2, label='lfw acc')\n", (4173, 4227), True, 'from matplotlib import pyplot as plt\n'), ((4554, 4568), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4562, 4568), True, 'from matplotlib import pyplot as plt\n'), ((4578, 4597), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4588, 4597), True, 'from matplotlib import pyplot as plt\n'), ((4607, 4628), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Lfw Acc"""'], {}), "('Lfw Acc')\n", (4617, 4628), True, 'from matplotlib import pyplot as plt\n'), ((4638, 4667), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4648, 4667), True, 'from matplotlib import pyplot as plt\n'), ((4776, 4785), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4783, 4785), True, 'from matplotlib import pyplot as plt\n'), ((4795, 4811), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4804, 4811), True, 'from matplotlib import pyplot as plt\n'), ((791, 812), 'torch.sum', 'torch.sum', (['basic_loss'], {}), '(basic_loss)\n', (800, 812), False, 'import torch\n'), ((361, 392), 'torch.pow', 'torch.pow', (['(anchor - positive)', '(2)'], {}), '(anchor - positive, 2)\n', (370, 392), False, 'import torch\n'), ((444, 475), 'torch.pow', 'torch.pow', (['(anchor - negative)', '(2)'], {}), '(anchor - negative, 2)\n', (453, 475), False, 'import torch\n'), ((823, 838), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (835, 838), False, 'import torch\n'), ((1173, 1225), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['m.weight.data', '(0.0)', 'init_gain'], {}), '(m.weight.data, 0.0, init_gain)\n', (1194, 1225), False, 'import torch\n'), ((1774, 1821), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['m.weight.data', '(1.0)', '(0.02)'], {}), '(m.weight.data, 1.0, 0.02)\n', (1795, 1821), False, 'import torch\n'), ((1835, 1876), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.bias.data', '(0.0)'], {}), '(m.bias.data, 0.0)\n', (1858, 1876), False, 'import torch\n'), ((1284, 1343), 'torch.nn.init.xavier_normal_', 'torch.nn.init.xavier_normal_', (['m.weight.data'], {'gain': 'init_gain'}), '(m.weight.data, gain=init_gain)\n', (1312, 1343), False, 'import torch\n'), ((1403, 1467), 'torch.nn.init.kaiming_normal_', 'torch.nn.init.kaiming_normal_', (['m.weight.data'], {'a': '(0)', 'mode': '"""fan_in"""'}), "(m.weight.data, a=0, mode='fan_in')\n", (1432, 1467), False, 'import torch\n'), ((1530, 1586), 'torch.nn.init.orthogonal_', 'torch.nn.init.orthogonal_', (['m.weight.data'], {'gain': 'init_gain'}), '(m.weight.data, gain=init_gain)\n', (1555, 1586), False, 'import torch\n')] |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
f = lambda x: (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
X = np.linspace(-1.2, 2, n_samples)
y = f(X) + np.random.normal(0, noise, size=n_samples)
X_train, y_train, X_test, y_test = split_train_test(pd.DataFrame(X),
pd.Series(y),
2 / 3)
X_train, X_test, y_train, y_test = np.array(X_train).flatten(), \
np.array(X_test).flatten(), \
np.array(y_train).flatten(), \
np.array(y_test).flatten()
make_subplots(1, 1).add_traces([go.Scatter(x=X, y=f(X), mode="markers",
marker=dict(color="black", opacity=.7),
showlegend=False),
go.Scatter(x=X_train, y=y_train, mode="markers",
marker=dict(color="blue", opacity=.7),
showlegend=False),
go.Scatter(x=X_test, y=y_test, mode="markers",
marker=dict(color="red", opacity=.7),
showlegend=False)],).update_layout(
title="Polynomial Fitting",
margin=dict(t=100)).show()
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
fig = go.Figure()
test_errors = np.zeros(11)
train_errors = np.zeros(11)
for degree in range(11):
train_errors[degree], test_errors[degree] = cross_validate(PolynomialFitting(degree), X_train, y_train, mean_square_error, 5)
x_axis = np.linspace(0, 10, 11)
fig.add_trace(go.Scatter(x=x_axis, y=train_errors, mode="markers+lines",
name="Train Error",
line=dict(color="blue", width=2)))
fig.add_trace(go.Scatter(x=x_axis, y=test_errors, mode="markers+lines",
name="Test Error",
line=dict(color="red", width=2)))
fig.update_layout(title="Fitting Polynomials of Different Degrees",
margin=dict(t=100))
fig.show()
# Question 3 - Using best value of k, fit a k-degree polynomial model and report test error
best_degree = np.argmin(test_errors)
model = PolynomialFitting(int(best_degree)).fit(X_train, y_train)
model_y_test = model.predict(X_test)
print(f"Test error for {best_degree}-degree polynomial model: {mean_square_error(y_test, model_y_test)}")
def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X, y = datasets.load_diabetes(return_X_y=True)
X_train, y_train, X_test, y_test = X[:n_samples], y[:n_samples], X[n_samples:], y[n_samples:]
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
fig = go.Figure()
rig_test_errors = np.zeros(n_evaluations)
rig_train_errors = np.zeros(n_evaluations)
las_test_errors = np.zeros(n_evaluations)
las_train_errors = np.zeros(n_evaluations)
for i in range(n_evaluations):
reg_param = 2*i / n_evaluations
rig_train_errors[i], rig_test_errors[i] = cross_validate(RidgeRegression(reg_param), X_train, y_train, mean_square_error, 5)
las_train_errors[i], las_test_errors[i] = cross_validate(Lasso(reg_param), X_train, y_train, mean_square_error, 5)
x_axis = np.linspace(0, 2, n_evaluations)
fig.add_trace(go.Scatter(x=x_axis, y=rig_train_errors,
mode="lines",name="Ridge Train Error",
line=dict(color="blue", width=2)))
fig.add_trace(go.Scatter(x=x_axis, y=rig_test_errors,
mode="lines",name="Ridge Test Error",
line=dict(color="red", width=2)))
fig.add_trace(go.Scatter(x=x_axis, y=las_train_errors,
mode="lines",name="Lasso Train Error",
line=dict(color="green", width=2)))
fig.add_trace(go.Scatter(x=x_axis, y=las_test_errors,
mode="lines",name="Lasso Test Error",
line=dict(color="orange", width=2)))
fig.update_layout(title="Fitting Ridge and Lasso Regressions",
margin=dict(t=100))
fig.show()
# Question 8 - Compare best Ridge model, best Lasso model and Least Squares model
reg_param_func = lambda x: 2*x / n_evaluations
best_reg_param = reg_param_func(np.argmin(rig_test_errors))
best_lasso_param = reg_param_func(np.argmin(las_test_errors))
reg = RidgeRegression(float(best_reg_param)).fit(X_train, y_train)
lasso = Lasso(best_lasso_param).fit(X_train, y_train)
LS = LinearRegression().fit(X_train, y_train)
print(f"Ridge Regression: {mean_square_error(y_test, reg.predict(X_test))}")
print(f"Lasso Regression: {mean_square_error(y_test, lasso.predict(X_test))}")
print(f"Least Squares: {mean_square_error(y_test, LS.predict(X_test))}")
if __name__ == '__main__':
np.random.seed(0)
select_polynomial_degree()
select_polynomial_degree(100, 0)
select_polynomial_degree(1500, 10)
select_regularization_parameter()
| [
"numpy.random.normal",
"pandas.Series",
"plotly.subplots.make_subplots",
"pandas.DataFrame",
"sklearn.linear_model.Lasso",
"IMLearn.metrics.mean_square_error",
"IMLearn.learners.regressors.RidgeRegression",
"plotly.graph_objects.Figure",
"numpy.linspace",
"numpy.zeros",
"sklearn.datasets.load_di... | [((1070, 1101), 'numpy.linspace', 'np.linspace', (['(-1.2)', '(2)', 'n_samples'], {}), '(-1.2, 2, n_samples)\n', (1081, 1101), True, 'import numpy as np\n'), ((2421, 2432), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (2430, 2432), True, 'import plotly.graph_objects as go\n'), ((2451, 2463), 'numpy.zeros', 'np.zeros', (['(11)'], {}), '(11)\n', (2459, 2463), True, 'import numpy as np\n'), ((2483, 2495), 'numpy.zeros', 'np.zeros', (['(11)'], {}), '(11)\n', (2491, 2495), True, 'import numpy as np\n'), ((2672, 2694), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(11)'], {}), '(0, 10, 11)\n', (2683, 2694), True, 'import numpy as np\n'), ((3314, 3336), 'numpy.argmin', 'np.argmin', (['test_errors'], {}), '(test_errors)\n', (3323, 3336), True, 'import numpy as np\n'), ((4139, 4178), 'sklearn.datasets.load_diabetes', 'datasets.load_diabetes', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (4161, 4178), False, 'from sklearn import datasets\n'), ((4404, 4415), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (4413, 4415), True, 'import plotly.graph_objects as go\n'), ((4438, 4461), 'numpy.zeros', 'np.zeros', (['n_evaluations'], {}), '(n_evaluations)\n', (4446, 4461), True, 'import numpy as np\n'), ((4485, 4508), 'numpy.zeros', 'np.zeros', (['n_evaluations'], {}), '(n_evaluations)\n', (4493, 4508), True, 'import numpy as np\n'), ((4531, 4554), 'numpy.zeros', 'np.zeros', (['n_evaluations'], {}), '(n_evaluations)\n', (4539, 4554), True, 'import numpy as np\n'), ((4578, 4601), 'numpy.zeros', 'np.zeros', (['n_evaluations'], {}), '(n_evaluations)\n', (4586, 4601), True, 'import numpy as np\n'), ((4946, 4978), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', 'n_evaluations'], {}), '(0, 2, n_evaluations)\n', (4957, 4978), True, 'import numpy as np\n'), ((6610, 6627), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6624, 6627), True, 'import numpy as np\n'), ((1117, 1159), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise'], {'size': 'n_samples'}), '(0, noise, size=n_samples)\n', (1133, 1159), True, 'import numpy as np\n'), ((1216, 1231), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (1228, 1231), True, 'import pandas as pd\n'), ((1289, 1301), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (1298, 1301), True, 'import pandas as pd\n'), ((6063, 6089), 'numpy.argmin', 'np.argmin', (['rig_test_errors'], {}), '(rig_test_errors)\n', (6072, 6089), True, 'import numpy as np\n'), ((6129, 6155), 'numpy.argmin', 'np.argmin', (['las_test_errors'], {}), '(las_test_errors)\n', (6138, 6155), True, 'import numpy as np\n'), ((2592, 2617), 'IMLearn.learners.regressors.PolynomialFitting', 'PolynomialFitting', (['degree'], {}), '(degree)\n', (2609, 2617), False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression\n'), ((4742, 4768), 'IMLearn.learners.regressors.RidgeRegression', 'RidgeRegression', (['reg_param'], {}), '(reg_param)\n', (4757, 4768), False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression\n'), ((4875, 4891), 'sklearn.linear_model.Lasso', 'Lasso', (['reg_param'], {}), '(reg_param)\n', (4880, 4891), False, 'from sklearn.linear_model import Lasso\n'), ((6240, 6263), 'sklearn.linear_model.Lasso', 'Lasso', (['best_lasso_param'], {}), '(best_lasso_param)\n', (6245, 6263), False, 'from sklearn.linear_model import Lasso\n'), ((6295, 6313), 'IMLearn.learners.regressors.LinearRegression', 'LinearRegression', ([], {}), '()\n', (6311, 6313), False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression\n'), ((1405, 1422), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (1413, 1422), True, 'import numpy as np\n'), ((1475, 1491), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (1483, 1491), True, 'import numpy as np\n'), ((1544, 1561), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1552, 1561), True, 'import numpy as np\n'), ((1614, 1630), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (1622, 1630), True, 'import numpy as np\n'), ((3515, 3554), 'IMLearn.metrics.mean_square_error', 'mean_square_error', (['y_test', 'model_y_test'], {}), '(y_test, model_y_test)\n', (3532, 3554), False, 'from IMLearn.metrics import mean_square_error\n'), ((1645, 1664), 'plotly.subplots.make_subplots', 'make_subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1658, 1664), False, 'from plotly.subplots import make_subplots\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.