code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 26 21:30:15 2017
@author: jeremy
"""
import numpy as np
from scipy.optimize import curve_fit
def getGaussianFit(xVals, yVals, yErrors=None):
"Fits a gaussian function to the given function, returns array with y values of fit"
if yErrors == None:
yErrors = np.full(yVals.size, 1)
# gaussian function
gauss = lambda x, A, mu, sigma: A * np.exp( -(x - mu)**2 / (2.0 * sigma**2) )
# initial guesses for A, mu, sigma
p0 = [1.0, 0.0, 1.0]
params, cov_matrix = curve_fit(gauss, xVals, yVals, p0, yErrors)
fitcurve = gauss(xVals, params[0], params[1], params[2]) # get the y values: arguments are A, mu, sigma
return fitcurve
def getSAAAvgSpec(data, ID_x, ID_y):
"""
Averages associated spectral values in an SAA, returns the averaged spectrum array
"""
avgedspectra = []
for spectralslice in data:
SAAspecslice = spectralslice[np.ix_(ID_y, ID_x)] # get spectral values in this slice of the SAA
avgedspectra.append(np.mean(SAAspecslice)) # get the average of all spec values in the SAA
return avgedspectra
def getSTDDEVError(xVals, yVals, windowStart, windowStop):
"""
Gets the standard deviation of the given y values between two points in the x values
"""
windowYVals = yVals[(xVals >= windowStart) & (xVals <= windowStop)] # gets y values where start <= x <= stop
stddev = np.std(windowYVals)
return stddev
| [
"scipy.optimize.curve_fit",
"numpy.mean",
"numpy.ix_",
"numpy.exp",
"numpy.std",
"numpy.full"
] | [((575, 618), 'scipy.optimize.curve_fit', 'curve_fit', (['gauss', 'xVals', 'yVals', 'p0', 'yErrors'], {}), '(gauss, xVals, yVals, p0, yErrors)\n', (584, 618), False, 'from scipy.optimize import curve_fit\n'), ((1477, 1496), 'numpy.std', 'np.std', (['windowYVals'], {}), '(windowYVals)\n', (1483, 1496), True, 'import numpy as np\n'), ((350, 372), 'numpy.full', 'np.full', (['yVals.size', '(1)'], {}), '(yVals.size, 1)\n', (357, 372), True, 'import numpy as np\n'), ((442, 485), 'numpy.exp', 'np.exp', (['(-(x - mu) ** 2 / (2.0 * sigma ** 2))'], {}), '(-(x - mu) ** 2 / (2.0 * sigma ** 2))\n', (448, 485), True, 'import numpy as np\n'), ((985, 1003), 'numpy.ix_', 'np.ix_', (['ID_y', 'ID_x'], {}), '(ID_y, ID_x)\n', (991, 1003), True, 'import numpy as np\n'), ((1080, 1101), 'numpy.mean', 'np.mean', (['SAAspecslice'], {}), '(SAAspecslice)\n', (1087, 1101), True, 'import numpy as np\n')] |
import os
import pytest
from flair.visual import *
from flair.data import Sentence
from flair.embeddings import FlairEmbeddings, StackedEmbeddings
import numpy
from flair.visual.manifold import Visualizer, tSNE
from flair.visual.training_curves import Plotter
@pytest.mark.slow
def test_visualize_word_emeddings(resources_path):
with open(resources_path / 'visual/snippet.txt') as f:
sentences = [x for x in f.read().split('\n') if x]
sentences = [Sentence(x) for x in sentences]
charlm_embedding_forward = FlairEmbeddings('news-forward')
charlm_embedding_backward = FlairEmbeddings('news-backward')
embeddings = StackedEmbeddings([charlm_embedding_backward, charlm_embedding_forward])
visualizer = Visualizer()
visualizer.visualize_word_emeddings(embeddings, sentences, str(resources_path / 'visual/sentence_embeddings.html'))
# clean up directory
(resources_path / 'visual/sentence_embeddings.html').unlink()
@pytest.mark.slow
def test_visualize_word_emeddings(resources_path):
with open(resources_path / 'visual/snippet.txt') as f:
sentences = [x for x in f.read().split('\n') if x]
sentences = [Sentence(x) for x in sentences]
charlm_embedding_forward = FlairEmbeddings('news-forward')
visualizer = Visualizer()
visualizer.visualize_char_emeddings(charlm_embedding_forward, sentences, str(resources_path / 'visual/sentence_embeddings.html'))
# clean up directory
(resources_path / 'visual/sentence_embeddings.html').unlink()
@pytest.mark.slow
def test_visualize(resources_path):
with open(resources_path / 'visual/snippet.txt') as f:
sentences = [x for x in f.read().split('\n') if x]
sentences = [Sentence(x) for x in sentences]
embeddings = FlairEmbeddings('news-forward')
visualizer = Visualizer()
X_forward = visualizer.prepare_char_embeddings(embeddings, sentences)
embeddings = FlairEmbeddings('news-backward')
X_backward = visualizer.prepare_char_embeddings(embeddings, sentences)
X = numpy.concatenate([X_forward, X_backward], axis=1)
contexts = visualizer.char_contexts(sentences)
trans_ = tSNE()
reduced = trans_.fit(X)
visualizer.visualize(reduced, contexts, str(resources_path / 'visual/char_embeddings.html'))
# clean up directory
(resources_path / 'visual/char_embeddings.html').unlink()
def test_highlighter(resources_path):
with (resources_path / 'visual/snippet.txt').open() as f:
sentences = [x for x in f.read().split('\n') if x]
embeddings = FlairEmbeddings('news-forward')
features = embeddings.lm.get_representation(sentences[0]).squeeze()
Highlighter().highlight_selection(features, sentences[0], n=1000, file_=str(resources_path / 'visual/highligh.html'))
# clean up directory
(resources_path / 'visual/highligh.html').unlink()
def test_plotting_training_curves_and_weights(resources_path):
plotter = Plotter()
plotter.plot_training_curves(resources_path / 'visual/loss.tsv')
plotter.plot_weights(resources_path / 'visual/weights.txt')
# clean up directory
(resources_path / 'visual/weights.png').unlink()
(resources_path / 'visual/training.png').unlink()
| [
"flair.visual.manifold.tSNE",
"flair.embeddings.StackedEmbeddings",
"flair.visual.manifold.Visualizer",
"flair.visual.training_curves.Plotter",
"flair.embeddings.FlairEmbeddings",
"numpy.concatenate",
"flair.data.Sentence"
] | [((535, 566), 'flair.embeddings.FlairEmbeddings', 'FlairEmbeddings', (['"""news-forward"""'], {}), "('news-forward')\n", (550, 566), False, 'from flair.embeddings import FlairEmbeddings, StackedEmbeddings\n'), ((599, 631), 'flair.embeddings.FlairEmbeddings', 'FlairEmbeddings', (['"""news-backward"""'], {}), "('news-backward')\n", (614, 631), False, 'from flair.embeddings import FlairEmbeddings, StackedEmbeddings\n'), ((650, 722), 'flair.embeddings.StackedEmbeddings', 'StackedEmbeddings', (['[charlm_embedding_backward, charlm_embedding_forward]'], {}), '([charlm_embedding_backward, charlm_embedding_forward])\n', (667, 722), False, 'from flair.embeddings import FlairEmbeddings, StackedEmbeddings\n'), ((741, 753), 'flair.visual.manifold.Visualizer', 'Visualizer', ([], {}), '()\n', (751, 753), False, 'from flair.visual.manifold import Visualizer, tSNE\n'), ((1238, 1269), 'flair.embeddings.FlairEmbeddings', 'FlairEmbeddings', (['"""news-forward"""'], {}), "('news-forward')\n", (1253, 1269), False, 'from flair.embeddings import FlairEmbeddings, StackedEmbeddings\n'), ((1288, 1300), 'flair.visual.manifold.Visualizer', 'Visualizer', ([], {}), '()\n', (1298, 1300), False, 'from flair.visual.manifold import Visualizer, tSNE\n'), ((1770, 1801), 'flair.embeddings.FlairEmbeddings', 'FlairEmbeddings', (['"""news-forward"""'], {}), "('news-forward')\n", (1785, 1801), False, 'from flair.embeddings import FlairEmbeddings, StackedEmbeddings\n'), ((1820, 1832), 'flair.visual.manifold.Visualizer', 'Visualizer', ([], {}), '()\n', (1830, 1832), False, 'from flair.visual.manifold import Visualizer, tSNE\n'), ((1926, 1958), 'flair.embeddings.FlairEmbeddings', 'FlairEmbeddings', (['"""news-backward"""'], {}), "('news-backward')\n", (1941, 1958), False, 'from flair.embeddings import FlairEmbeddings, StackedEmbeddings\n'), ((2044, 2094), 'numpy.concatenate', 'numpy.concatenate', (['[X_forward, X_backward]'], {'axis': '(1)'}), '([X_forward, X_backward], axis=1)\n', (2061, 2094), False, 'import numpy\n'), ((2161, 2167), 'flair.visual.manifold.tSNE', 'tSNE', ([], {}), '()\n', (2165, 2167), False, 'from flair.visual.manifold import Visualizer, tSNE\n'), ((2561, 2592), 'flair.embeddings.FlairEmbeddings', 'FlairEmbeddings', (['"""news-forward"""'], {}), "('news-forward')\n", (2576, 2592), False, 'from flair.embeddings import FlairEmbeddings, StackedEmbeddings\n'), ((2949, 2958), 'flair.visual.training_curves.Plotter', 'Plotter', ([], {}), '()\n', (2956, 2958), False, 'from flair.visual.training_curves import Plotter\n'), ((471, 482), 'flair.data.Sentence', 'Sentence', (['x'], {}), '(x)\n', (479, 482), False, 'from flair.data import Sentence\n'), ((1174, 1185), 'flair.data.Sentence', 'Sentence', (['x'], {}), '(x)\n', (1182, 1185), False, 'from flair.data import Sentence\n'), ((1720, 1731), 'flair.data.Sentence', 'Sentence', (['x'], {}), '(x)\n', (1728, 1731), False, 'from flair.data import Sentence\n')] |
from __future__ import absolute_import, division, print_function
from contextlib import contextmanager
import inspect
import datetime
import tempfile
import os
import numpy as np
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
yield filename
try:
if os.path.exists(filename):
os.remove(filename)
except OSError: # Sometimes Windows can't close files
if os.name == 'nt':
os.close(handle)
try:
os.remove(filename)
except OSError: # finally give up
pass
def keywords(func):
""" Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
"""
if isinstance(func, type):
return keywords(func.__init__)
return inspect.getargspec(func).args
def cls_name(cls):
if 'builtin' in cls.__module__:
return cls.__name__
else:
return cls.__module__.split('.')[0] + '.' + cls.__name__
@contextmanager
def filetext(text, extension='', open=open, mode='wt'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and not dt.time():
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
| [
"os.path.exists",
"numpy.isclose",
"os.close",
"inspect.getargspec",
"tempfile.mkstemp",
"os.remove"
] | [((797, 824), 'tempfile.mkstemp', 'tempfile.mkstemp', (['extension'], {}), '(extension)\n', (813, 824), False, 'import tempfile\n'), ((866, 890), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (880, 890), False, 'import os\n'), ((1394, 1418), 'inspect.getargspec', 'inspect.getargspec', (['func'], {}), '(func)\n', (1412, 1418), False, 'import inspect\n'), ((2385, 2409), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2399, 2409), False, 'import os\n'), ((904, 923), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (913, 923), False, 'import os\n'), ((2423, 2442), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (2432, 2442), False, 'import os\n'), ((1023, 1039), 'os.close', 'os.close', (['handle'], {}), '(handle)\n', (1031, 1039), False, 'import os\n'), ((1073, 1092), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (1082, 1092), False, 'import os\n'), ((2803, 2842), 'numpy.isclose', 'np.isclose', (['left', 'right'], {'equal_nan': '(True)'}), '(left, right, equal_nan=True)\n', (2813, 2842), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the
`ensemble_copula_coupling._scipy_continuous_distns` scipy truncnorm workaround.
"""
import unittest
import numpy as np
import pytest
from scipy.stats import truncnorm as scipytruncnorm
from improver.ensemble_copula_coupling._scipy_continuous_distns import truncnorm
LINSPACE = np.linspace(0, 1, 10)
ARANGE = list(range(-20, 20))
@pytest.mark.parametrize(
"method,x",
[
("ppf", LINSPACE),
("cdf", ARANGE),
("sf", ARANGE),
("pdf", ARANGE),
("logpdf", ARANGE),
],
)
def test_method(method, x):
"""
Test each method available for scipy truncnorm.
Test is between the scipy v1.3.3 truncnorm and the scipy truncnorm
within the Python environment.
"""
loc = 0
scale = 3
a = -1
b = 3
scipy_tnorm = scipytruncnorm(a, b, loc, scale)
our_tnorm = truncnorm(a, b, loc, scale)
target = getattr(scipy_tnorm, method)(x)
result = getattr(our_tnorm, method)(x)
np.testing.assert_allclose(result, target, rtol=1e-5)
if __name__ == "__main__":
unittest.main()
| [
"numpy.testing.assert_allclose",
"improver.ensemble_copula_coupling._scipy_continuous_distns.truncnorm",
"pytest.mark.parametrize",
"numpy.linspace",
"scipy.stats.truncnorm",
"unittest.main"
] | [((1961, 1982), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (1972, 1982), True, 'import numpy as np\n'), ((2016, 2147), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method,x"""', "[('ppf', LINSPACE), ('cdf', ARANGE), ('sf', ARANGE), ('pdf', ARANGE), (\n 'logpdf', ARANGE)]"], {}), "('method,x', [('ppf', LINSPACE), ('cdf', ARANGE), (\n 'sf', ARANGE), ('pdf', ARANGE), ('logpdf', ARANGE)])\n", (2039, 2147), False, 'import pytest\n'), ((2470, 2502), 'scipy.stats.truncnorm', 'scipytruncnorm', (['a', 'b', 'loc', 'scale'], {}), '(a, b, loc, scale)\n', (2484, 2502), True, 'from scipy.stats import truncnorm as scipytruncnorm\n'), ((2519, 2546), 'improver.ensemble_copula_coupling._scipy_continuous_distns.truncnorm', 'truncnorm', (['a', 'b', 'loc', 'scale'], {}), '(a, b, loc, scale)\n', (2528, 2546), False, 'from improver.ensemble_copula_coupling._scipy_continuous_distns import truncnorm\n'), ((2639, 2693), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'target'], {'rtol': '(1e-05)'}), '(result, target, rtol=1e-05)\n', (2665, 2693), True, 'import numpy as np\n'), ((2726, 2741), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2739, 2741), False, 'import unittest\n')] |
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import nnabla as nn
import nnabla.logger as logger
import nnabla.functions as F
import nnabla.parametric_functions as PF
from nnabla.monitor import Monitor, MonitorImageTile
import nnabla.utils.save as save
from nnabla.ext_utils import get_extension_context
from args import get_args, save_args
from generator import Generator
def generate(args):
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
scope_gen = "Generator"
scope_gen_ema = "Generator_EMA"
gen_param_path = args.model_load_path + '/Gen_iter100000.h5'
gen_ema_param_path = args.model_load_path + '/GenEMA_iter100000.h5'
with nn.parameter_scope(scope_gen):
nn.load_parameters(gen_param_path)
with nn.parameter_scope(scope_gen_ema):
nn.load_parameters(gen_ema_param_path)
monitor = Monitor(args.monitor_path)
monitor_image_tile_test = MonitorImageTile("Image Tile", monitor,
num_images=args.batch_size,
interval=1,
normalize_method=lambda x: (x + 1.) / 2.)
monitor_image_tile_test_ema = MonitorImageTile("Image Tile with EMA", monitor,
num_images=args.batch_size,
interval=1,
normalize_method=lambda x: (x + 1.) / 2.)
z_test = nn.Variable([args.batch_size, args.latent, 1, 1])
x_test = Generator(z_test, scope_name=scope_gen,
train=True, img_size=args.image_size)[0]
x_test_ema = Generator(z_test, scope_name=scope_gen_ema,
train=True, img_size=args.image_size)[0]
z_test.d = np.random.randn(args.batch_size, args.latent, 1, 1)
x_test.forward(clear_buffer=True)
x_test_ema.forward(clear_buffer=True)
monitor_image_tile_test.add(0, x_test)
monitor_image_tile_test_ema.add(0, x_test_ema)
def main():
args = get_args()
save_args(args, "generate")
generate(args)
if __name__ == '__main__':
main()
| [
"generator.Generator",
"nnabla.monitor.MonitorImageTile",
"nnabla.set_default_context",
"args.save_args",
"nnabla.load_parameters",
"nnabla.parameter_scope",
"nnabla.ext_utils.get_extension_context",
"args.get_args",
"nnabla.Variable",
"nnabla.monitor.Monitor",
"numpy.random.randn"
] | [((978, 1074), 'nnabla.ext_utils.get_extension_context', 'get_extension_context', (['args.context'], {'device_id': 'args.device_id', 'type_config': 'args.type_config'}), '(args.context, device_id=args.device_id, type_config=\n args.type_config)\n', (999, 1074), False, 'from nnabla.ext_utils import get_extension_context\n'), ((1083, 1110), 'nnabla.set_default_context', 'nn.set_default_context', (['ctx'], {}), '(ctx)\n', (1105, 1110), True, 'import nnabla as nn\n'), ((1502, 1528), 'nnabla.monitor.Monitor', 'Monitor', (['args.monitor_path'], {}), '(args.monitor_path)\n', (1509, 1528), False, 'from nnabla.monitor import Monitor, MonitorImageTile\n'), ((1559, 1686), 'nnabla.monitor.MonitorImageTile', 'MonitorImageTile', (['"""Image Tile"""', 'monitor'], {'num_images': 'args.batch_size', 'interval': '(1)', 'normalize_method': '(lambda x: (x + 1.0) / 2.0)'}), "('Image Tile', monitor, num_images=args.batch_size,\n interval=1, normalize_method=lambda x: (x + 1.0) / 2.0)\n", (1575, 1686), False, 'from nnabla.monitor import Monitor, MonitorImageTile\n'), ((1856, 1992), 'nnabla.monitor.MonitorImageTile', 'MonitorImageTile', (['"""Image Tile with EMA"""', 'monitor'], {'num_images': 'args.batch_size', 'interval': '(1)', 'normalize_method': '(lambda x: (x + 1.0) / 2.0)'}), "('Image Tile with EMA', monitor, num_images=args.batch_size,\n interval=1, normalize_method=lambda x: (x + 1.0) / 2.0)\n", (1872, 1992), False, 'from nnabla.monitor import Monitor, MonitorImageTile\n'), ((2154, 2203), 'nnabla.Variable', 'nn.Variable', (['[args.batch_size, args.latent, 1, 1]'], {}), '([args.batch_size, args.latent, 1, 1])\n', (2165, 2203), True, 'import nnabla as nn\n'), ((2465, 2516), 'numpy.random.randn', 'np.random.randn', (['args.batch_size', 'args.latent', '(1)', '(1)'], {}), '(args.batch_size, args.latent, 1, 1)\n', (2480, 2516), True, 'import numpy as np\n'), ((2717, 2727), 'args.get_args', 'get_args', ([], {}), '()\n', (2725, 2727), False, 'from args import get_args, save_args\n'), ((2732, 2759), 'args.save_args', 'save_args', (['args', '"""generate"""'], {}), "(args, 'generate')\n", (2741, 2759), False, 'from args import get_args, save_args\n'), ((1322, 1351), 'nnabla.parameter_scope', 'nn.parameter_scope', (['scope_gen'], {}), '(scope_gen)\n', (1340, 1351), True, 'import nnabla as nn\n'), ((1361, 1395), 'nnabla.load_parameters', 'nn.load_parameters', (['gen_param_path'], {}), '(gen_param_path)\n', (1379, 1395), True, 'import nnabla as nn\n'), ((1405, 1438), 'nnabla.parameter_scope', 'nn.parameter_scope', (['scope_gen_ema'], {}), '(scope_gen_ema)\n', (1423, 1438), True, 'import nnabla as nn\n'), ((1448, 1486), 'nnabla.load_parameters', 'nn.load_parameters', (['gen_ema_param_path'], {}), '(gen_ema_param_path)\n', (1466, 1486), True, 'import nnabla as nn\n'), ((2217, 2294), 'generator.Generator', 'Generator', (['z_test'], {'scope_name': 'scope_gen', 'train': '(True)', 'img_size': 'args.image_size'}), '(z_test, scope_name=scope_gen, train=True, img_size=args.image_size)\n', (2226, 2294), False, 'from generator import Generator\n'), ((2338, 2424), 'generator.Generator', 'Generator', (['z_test'], {'scope_name': 'scope_gen_ema', 'train': '(True)', 'img_size': 'args.image_size'}), '(z_test, scope_name=scope_gen_ema, train=True, img_size=args.\n image_size)\n', (2347, 2424), False, 'from generator import Generator\n')] |
import numpy as np
__all__ = ['Gradient']
class Gradient:
@staticmethod
def _forward(network, x, y):
layer_inputs = [x]
layer_outputs = [network.layers[0](x)]
for i, layer in enumerate(network.layers[1:]):
layer_inputs += [layer_outputs[i]]
layer_outputs += [layer(layer_inputs[i + 1])]
return layer_inputs, layer_outputs
@staticmethod
def _normalize_vectors(vectors):
if isinstance(vectors, float):
return np.asarray([vectors])
return vectors
@staticmethod
def _calculate_layer_gradient(gradient, layer, inputs, outputs, outputs_gradient):
activation_function_gradient = outputs_gradient * layer.activation_function.derivative(inputs, outputs)
weights_gradient = np.zeros((layer.input_dimension, layer.output_dimension))
for i in range(inputs.shape[0]):
current_input = inputs[i].reshape((layer.input_dimension, 1))
current_output = activation_function_gradient[i].reshape((1, layer.output_dimension))
weights_gradient += np.dot(current_input, current_output)
biases_gradient = np.sum(activation_function_gradient, axis=0)
gradient += [[
weights_gradient,
biases_gradient
]]
return activation_function_gradient.dot(layer.weights.T)
def __call__(self, network, inputs, outputs, error):
inputs = Gradient._normalize_vectors(inputs)
outputs = Gradient._normalize_vectors(outputs)
layer_inputs, layer_outputs = Gradient._forward(network, inputs, outputs)
outputs_gradient = error(outputs, layer_outputs[-1], 1)
gradient = []
for i, layer in enumerate(network.layers[::-1]):
outputs_gradient = Gradient._calculate_layer_gradient(
gradient,
layer,
layer_inputs[-1 - i],
layer_outputs[-1 - i],
outputs_gradient
)
return np.asarray(gradient[::-1])
| [
"numpy.dot",
"numpy.sum",
"numpy.zeros",
"numpy.asarray"
] | [((793, 850), 'numpy.zeros', 'np.zeros', (['(layer.input_dimension, layer.output_dimension)'], {}), '((layer.input_dimension, layer.output_dimension))\n', (801, 850), True, 'import numpy as np\n'), ((1160, 1204), 'numpy.sum', 'np.sum', (['activation_function_gradient'], {'axis': '(0)'}), '(activation_function_gradient, axis=0)\n', (1166, 1204), True, 'import numpy as np\n'), ((2009, 2035), 'numpy.asarray', 'np.asarray', (['gradient[::-1]'], {}), '(gradient[::-1])\n', (2019, 2035), True, 'import numpy as np\n'), ((503, 524), 'numpy.asarray', 'np.asarray', (['[vectors]'], {}), '([vectors])\n', (513, 524), True, 'import numpy as np\n'), ((1096, 1133), 'numpy.dot', 'np.dot', (['current_input', 'current_output'], {}), '(current_input, current_output)\n', (1102, 1133), True, 'import numpy as np\n')] |
### please change the corresponding path prefix ${PATH}
import sys, os, errno
import numpy as np
import csv
import json
import copy
assert len(sys.argv) == 2, "Usage: python log_analysis.py <test_log>"
log = sys.argv[1]
with open(log, 'r') as f:
lines = f.read().splitlines()
split='test'
with open('${PATH}/Charades/Charades_v1_%s.csv'%split, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
data = [row for row in reader][1:]
vid_length={}
for i, row in enumerate(data):
vid = row[0]
length= float(row[10])
vid_length[vid]=length
def nms(dets, thresh=0.4):
"""Pure Python NMS baseline."""
if len(dets) == 0: return []
x1 = dets[:, 0]
x2 = dets[:, 1]
scores = dets[:, 2]
lengths = x2 - x1
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
inter = np.maximum(0.0, xx2 - xx1)
ovr = inter / (lengths[i] + lengths[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def get_segments(data):
segments = []
vid = 'Background'
find_next = False
tmp = {'label' : 'c0', 'segment': [0, 0, 0]}
for l in data:
# video name and sliding window length
if "fg_name :" in l:
vid = l.split('/')[7]
continue
# frame index, time, confident score
elif "frames :" in l:
start_frame=int(l.split()[4])
stride = int(l.split()[6].split(']')[0])
elif "activity:" in l:
label = int(l.split()[1])
tmp['label'] ='c%03d' % (label-1)
find_next = True
elif "im_detect" in l:
return vid, segments
elif find_next:
tmp1 = copy.deepcopy(tmp)
left = ( float(l.split()[1])*stride + start_frame) / 25.0
right = ( float(l.split()[2])*stride + start_frame) / 25.0
score = float(l.split()[3].split(']')[0])
tmp1['segment'] = [left, right, score]
segments.append(tmp1)
segmentations = {}
predict_data = []
for l in lines:
if "gt_classes :" in l:
predict_data = []
predict_data.append(l)
if "im_detect:" in l:
vid, segments = get_segments(predict_data)
if vid not in segmentations:
segmentations[vid] = []
segmentations[vid] += segments
res = {}
for vid, vinfo in segmentations.iteritems():
labels = list(set([d['label'] for d in vinfo]))
res[vid] = []
for lab in labels:
nms_in = [d['segment'] for d in vinfo if d['label'] == lab]
keep = nms(np.array(nms_in), thresh=0.4)
for i in keep:
tmp = {'label':lab, 'segment': nms_in[i]}
res[vid].append(tmp)
SAMPLE = 25
text_file = open("results.txt", "w")
text_file.close()
text_file = open("results.txt", "w")
for vid, vinfo in res.iteritems():
length = len(os.listdir('../../../preprocess/charades/frames/'+vid))
for i in xrange(SAMPLE):
tmp = '%s %d' % (vid, i)
t = i *vid_length[vid] * 1.0 / SAMPLE
select = [d for d in vinfo if d['segment'][0] <= t and d['segment'][1] >= t]
scores = {}
for d in select:
if d['label'] not in scores:
scores[d['label']] = d['segment'][2]
else:
if d['segment'][2] > scores[d['label']]:
scores[d['label']] = d['segment'][2]
for j in xrange(157):
lab = 'c%03d'%j
tmp += ' ' + (str(scores[lab]) if lab in scores else '0')
text_file.write(tmp + '\n')
text_file.close()
| [
"os.listdir",
"numpy.minimum",
"numpy.where",
"numpy.array",
"copy.deepcopy",
"numpy.maximum",
"csv.reader"
] | [((377, 411), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (387, 411), False, 'import csv\n'), ((877, 909), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (887, 909), True, 'import numpy as np\n'), ((924, 956), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (934, 956), True, 'import numpy as np\n'), ((973, 999), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1)'], {}), '(0.0, xx2 - xx1)\n', (983, 999), True, 'import numpy as np\n'), ((2916, 2972), 'os.listdir', 'os.listdir', (["('../../../preprocess/charades/frames/' + vid)"], {}), "('../../../preprocess/charades/frames/' + vid)\n", (2926, 2972), False, 'import sys, os, errno\n'), ((1079, 1102), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (1087, 1102), True, 'import numpy as np\n'), ((2636, 2652), 'numpy.array', 'np.array', (['nms_in'], {}), '(nms_in)\n', (2644, 2652), True, 'import numpy as np\n'), ((1831, 1849), 'copy.deepcopy', 'copy.deepcopy', (['tmp'], {}), '(tmp)\n', (1844, 1849), False, 'import copy\n')] |
# IPython log file
get_ipython().run_line_magic('logstart', '')
get_ipython().run_line_magic('logstart', '')
get_ipython().run_line_magic('logstart', '')
import pandas as pd
import pandas as pd
from pandas import Series,DataFrame
obj = Series(['c','a','d','a','a','b','b','c','c'])
obj
uniques = obj.unique()
uniques = obj.unique()
uniques
obj.value_counts
obj.value_counts()
obj.values()
obj.values
pd.value_counts(obj.values,sort=False)
mask = obj.isin(['b','c'])
mask
obj[mask]
data = DataFrame({'qu1':[1,3,4,3,4],'qu2':[2,3,1,2,3],'qu3':[1,5,2,4,4]})
data
pd.value_counts(data)
get_ipython().run_line_magic('pinfo', 'pd.value_counts')
pd.value_counts(data.values)
pd.value_counts(data.values)
data.values
data.apply(pd.value_counts)
counts = data.apply(pd.value_counts)
counts.plot(kind='bar')
get_ipython().run_line_magic('matplatlib', '')
get_ipython().run_line_magic('matplatlib', 'inline')
get_ipython().run_line_magic('matplotlib', 'inline')
counts.plot(kind='bar')
string_data = Series(['aardvark','artichoke','np.nan','avocado'])
string_data
string_data = Series(['aardvark','artichoke',np.nan,'avocado'])
import numpy as np
string_data = Series(['aardvark','artichoke',np.nan,'avocado'])
string_data
string_data.isnull()
string_data.isna()
string_data.isnull()
string_data.isna()
from numpy import nan as NA
data = Series([1,NA,3.5,NA,7])
data
data.dropna()
data.drop()
data.drop
data.notnull()
data = DataFrame([1,6.5,3],[1,NA,NA],[NA,NA,NA],[NA,6.5,3])
data = DataFrame([[1,6.5,3],[1,NA,NA],[NA,NA,NA],[NA,6.5,3]])
data
data.dropna()
data.dropna(how='all')
data[4]=NA
data
data.dropna(axis=1,how='all')
df = DataFrame(np.random.randn(7,3))
df
df.ix[:2,1] = NA
df
df.loc[:3,2] = NA
df
df.dropna()
df.dropna(thresh=1)
df.dropna(thresh=2)
df.dropna(thresh=4)
df.dropna(thresh=0)
df.dropna(thresh=1)
df.dropna(thresh=2)
df.dropna(thresh=3)
df.dropna(thresh=4)
df.dropna(thresh=3)
get_ipython().run_line_magic('logstart', '')
df.dropna(thresh=1)
df.dropna(thresh=2)
df.fillna(0)
df
df.fillna({1:0.5,2:1,4:4})
_
__
print(_)
get_ipython().system('pwd')
get_ipython().system('cd')
_
_ = df.fillna(0,inplace=True)
df
df = DataFrame(np.random.randn(6,3))
df.loc[2:,1] = NA;df.loc[4:,2] = NA
df
df.fillna(mmethod='ffill')
df.fillna(method='ffill')
data = Series(np.random.randn(10),index=[['a','a','a','b','b','b','c','c','d','d'],[1,2,3,1,2,3,1,2,2,3]])
data
data.index
data['b']
data['b']['1']
data['b'][:1]
data[:,2]
data['b'][1]
df
df[0]
df[:0]
frame = DataFrame(np.arange(12).reshape(4,3))
frame
frame = DataFrame(np.arange(12).reshape(4,3),index=[['a','a','b','b'],[1,2,1,2]],columns=[['ohio','ohio','colorado'],['green','red','green']])
frame
frame['a']
frame[a]
frame['a']
frame['ohio']
frame['ohio']['a']
frame['ohio']['a':]
frame['ohio'][['a',]]
frame['ohio'][['a']]
frame['ohio']['a']
frame['ohio'][:'a']
frame['ohio'][:'a'][1]
frame['ohio'][:'a'][:1]
frame[:'a'][:1]
frame[:'a']
import pandas as pd
from pandas import Series,DataFrame
obj = Series(['c','a','d','a','a','b','b','c','c'])
obj
uniques = obj.unique()
uniques
obj.value_counts()
| [
"pandas.Series",
"pandas.value_counts",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.arange"
] | [((237, 290), 'pandas.Series', 'Series', (["['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c']"], {}), "(['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c'])\n", (243, 290), False, 'from pandas import Series, DataFrame\n'), ((401, 440), 'pandas.value_counts', 'pd.value_counts', (['obj.values'], {'sort': '(False)'}), '(obj.values, sort=False)\n', (416, 440), True, 'import pandas as pd\n'), ((489, 576), 'pandas.DataFrame', 'DataFrame', (["{'qu1': [1, 3, 4, 3, 4], 'qu2': [2, 3, 1, 2, 3], 'qu3': [1, 5, 2, 4, 4]}"], {}), "({'qu1': [1, 3, 4, 3, 4], 'qu2': [2, 3, 1, 2, 3], 'qu3': [1, 5, 2,\n 4, 4]})\n", (498, 576), False, 'from pandas import Series, DataFrame\n'), ((561, 582), 'pandas.value_counts', 'pd.value_counts', (['data'], {}), '(data)\n', (576, 582), True, 'import pandas as pd\n'), ((640, 668), 'pandas.value_counts', 'pd.value_counts', (['data.values'], {}), '(data.values)\n', (655, 668), True, 'import pandas as pd\n'), ((669, 697), 'pandas.value_counts', 'pd.value_counts', (['data.values'], {}), '(data.values)\n', (684, 697), True, 'import pandas as pd\n'), ((990, 1044), 'pandas.Series', 'Series', (["['aardvark', 'artichoke', 'np.nan', 'avocado']"], {}), "(['aardvark', 'artichoke', 'np.nan', 'avocado'])\n", (996, 1044), False, 'from pandas import Series, DataFrame\n'), ((1068, 1120), 'pandas.Series', 'Series', (["['aardvark', 'artichoke', np.nan, 'avocado']"], {}), "(['aardvark', 'artichoke', np.nan, 'avocado'])\n", (1074, 1120), False, 'from pandas import Series, DataFrame\n'), ((1151, 1203), 'pandas.Series', 'Series', (["['aardvark', 'artichoke', np.nan, 'avocado']"], {}), "(['aardvark', 'artichoke', np.nan, 'avocado'])\n", (1157, 1203), False, 'from pandas import Series, DataFrame\n'), ((1328, 1355), 'pandas.Series', 'Series', (['[1, NA, 3.5, NA, 7]'], {}), '([1, NA, 3.5, NA, 7])\n', (1334, 1355), False, 'from pandas import Series, DataFrame\n'), ((1415, 1478), 'pandas.DataFrame', 'DataFrame', (['[1, 6.5, 3]', '[1, NA, NA]', '[NA, NA, NA]', '[NA, 6.5, 3]'], {}), '([1, 6.5, 3], [1, NA, NA], [NA, NA, NA], [NA, 6.5, 3])\n', (1424, 1478), False, 'from pandas import Series, DataFrame\n'), ((1475, 1540), 'pandas.DataFrame', 'DataFrame', (['[[1, 6.5, 3], [1, NA, NA], [NA, NA, NA], [NA, 6.5, 3]]'], {}), '([[1, 6.5, 3], [1, NA, NA], [NA, NA, NA], [NA, 6.5, 3]])\n', (1484, 1540), False, 'from pandas import Series, DataFrame\n'), ((2957, 3010), 'pandas.Series', 'Series', (["['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c']"], {}), "(['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c'])\n", (2963, 3010), False, 'from pandas import Series, DataFrame\n'), ((1633, 1654), 'numpy.random.randn', 'np.random.randn', (['(7)', '(3)'], {}), '(7, 3)\n', (1648, 1654), True, 'import numpy as np\n'), ((2138, 2159), 'numpy.random.randn', 'np.random.randn', (['(6)', '(3)'], {}), '(6, 3)\n', (2153, 2159), True, 'import numpy as np\n'), ((2266, 2285), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (2281, 2285), True, 'import numpy as np\n'), ((2471, 2484), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (2480, 2484), True, 'import numpy as np\n'), ((2523, 2536), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (2532, 2536), True, 'import numpy as np\n')] |
import numpy as np
import numpy.typing as npt
from typing import List, Tuple, Optional, Union
from .config.cfg import SweepConfig
from .run import SweepRun, RunState
from .params import HyperParameter, HyperParameterSet
from sklearn import gaussian_process as sklearn_gaussian
from scipy import stats as scipy_stats
from ._types import floating, integer
NUGGET = 1e-10
def fit_normalized_gaussian_process(
X: npt.ArrayLike, y: npt.ArrayLike, nu: floating = 1.5
) -> Tuple[sklearn_gaussian.GaussianProcessRegressor, floating, floating]:
gp = sklearn_gaussian.GaussianProcessRegressor(
kernel=sklearn_gaussian.kernels.Matern(nu=nu),
n_restarts_optimizer=2,
alpha=0.0000001,
random_state=2,
)
if len(y) == 1:
y = np.array(y)
y_mean = y[0]
y_stddev = 1
else:
y_mean = np.mean(y)
y_stddev = np.std(y) + 0.0001
y_norm = (y - y_mean) / y_stddev
gp.fit(X, y_norm)
return gp, y_mean, y_stddev
def sigmoid(x: npt.ArrayLike) -> npt.ArrayLike:
return np.exp(-np.logaddexp(0, -x))
def random_sample(X_bounds: npt.ArrayLike, num_test_samples: integer) -> npt.ArrayLike:
num_hyperparameters = len(X_bounds)
test_X = np.empty((num_test_samples, num_hyperparameters))
for ii in range(num_test_samples):
for jj in range(num_hyperparameters):
if type(X_bounds[jj][0]) == int:
assert type(X_bounds[jj][1]) == int
test_X[ii, jj] = np.random.randint(X_bounds[jj][0], X_bounds[jj][1])
else:
test_X[ii, jj] = (
np.random.uniform() * (X_bounds[jj][1] - X_bounds[jj][0])
+ X_bounds[jj][0]
)
return test_X
def predict(
X: npt.ArrayLike, y: npt.ArrayLike, test_X: npt.ArrayLike, nu: floating = 1.5
) -> Tuple[npt.ArrayLike, npt.ArrayLike]:
gp, norm_mean, norm_stddev = fit_normalized_gaussian_process(X, y, nu=nu)
y_pred, y_std = gp.predict([test_X], return_std=True)
y_std_norm = y_std * norm_stddev
y_pred_norm = (y_pred * norm_stddev) + norm_mean
return y_pred_norm[0], y_std_norm[0]
def train_gaussian_process(
sample_X: npt.ArrayLike,
sample_y: npt.ArrayLike,
X_bounds: Optional[npt.ArrayLike] = None,
current_X: npt.ArrayLike = None,
nu: floating = 1.5,
max_samples: integer = 100,
) -> Tuple[sklearn_gaussian.GaussianProcessRegressor, floating, floating]:
"""Trains a Gaussian Process function from sample_X, sample_y data.
Handles the case where there are other training runs in flight (current_X)
Arguments:
sample_X: vector of already evaluated sets of hyperparameters
sample_y: vector of already evaluated loss function values
X_bounds: minimum and maximum values for every dimension of X
current_X: hyperparameters currently being explored
nu: input to the Matern function, higher numbers make it smoother 0.5, 1.5, 2.5 are good values
see http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.Matern.html
Returns:
gp: the gaussian process function
y_mean: mean
y_stddev: stddev
To make a prediction with gp on real world data X, need to call:
(gp.predict(X) * y_stddev) + y_mean
"""
if current_X is not None:
current_X = np.array(current_X)
if len(current_X.shape) != 2:
raise ValueError("Current X must be a 2 dimensional array")
# we can't let the current samples be bigger than max samples
# because we need to use some real samples to build the curve
if current_X.shape[0] > max_samples - 5:
print(
"current_X is bigger than max samples - 5 so dropping some currently running parameters"
)
current_X = current_X[: (max_samples - 5), :]
if len(sample_y.shape) != 1:
raise ValueError("Sample y must be a 1 dimensional array")
if sample_X.shape[0] != sample_y.shape[0]:
raise ValueError(
"Sample X and sample y must be the same size {} {}".format(
sample_X.shape[0], sample_y.shape[0]
)
)
if X_bounds is not None and sample_X.shape[1] != len(X_bounds):
raise ValueError(
"Bounds must be the same length as Sample X's second dimension"
)
# gaussian process takes a long time to train, so if there's more than max_samples
# we need to sample from it
if sample_X.shape[0] > max_samples:
sample_indices = np.random.randint(sample_X.shape[0], size=max_samples)
X = sample_X[sample_indices]
y = sample_y[sample_indices]
else:
X = sample_X
y = sample_y
gp, y_mean, y_stddev = fit_normalized_gaussian_process(X, y, nu=nu)
if current_X is not None:
# if we have some hyperparameters running, we pretend that they return
# the prediction of the function we've fit
X = np.append(X, current_X, axis=0)
current_y_fantasy = (gp.predict(current_X) * y_stddev) + y_mean
y = np.append(y, current_y_fantasy)
gp, y_mean, y_stddev = fit_normalized_gaussian_process(X, y, nu=nu)
return gp, y_mean, y_stddev
def filter_nans(sample_X: npt.ArrayLike, sample_y: npt.ArrayLike) -> npt.ArrayLike:
is_row_finite = ~(np.isnan(sample_X).any(axis=1) | np.isnan(sample_y))
sample_X = sample_X[is_row_finite, :]
sample_y = sample_y[is_row_finite]
return sample_X, sample_y
def next_sample(
*,
sample_X: npt.ArrayLike,
sample_y: npt.ArrayLike,
X_bounds: Optional[npt.ArrayLike] = None,
current_X: Optional[npt.ArrayLike] = None,
nu: floating = 1.5,
max_samples_for_gp: integer = 100,
improvement: floating = 0.01,
num_points_to_try: integer = 1000,
opt_func: str = "expected_improvement",
test_X: Optional[npt.ArrayLike] = None,
) -> Tuple[npt.ArrayLike, floating, floating, floating, floating]:
"""Calculates the best next sample to look at via bayesian optimization.
Args:
sample_X: ArrayLike, shape (N_runs, N_params)
2d array of already evaluated sets of hyperparameters
sample_y: ArrayLike, shape (N_runs,)
1d array of already evaluated loss function values
X_bounds: ArrayLike, optional, shape (N_params, 2), default None
2d array minimum and maximum values for every dimension of X
current_X: ArrayLike, optional, shape (N_runs_in_flight, N_params), default None
hyperparameters currently being explored
nu: floating, optional, default = 1.5
input to the Matern function, higher numbers make it smoother. 0.5,
1.5, 2.5 are good values see
http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.Matern.html
max_samples_for_gp: integer, optional, default 100
maximum samples to consider (since algo is O(n^3)) for performance,
but also adds some randomness. this number of samples will be chosen
randomly from the sample_X and used to train the GP.
improvement: floating, optional, default 0.1
amount of improvement to optimize for -- higher means take more exploratory risks
num_points_to_try: integer, optional, default 1000
number of X values to try when looking for value with highest expected probability
of improvement
opt_func: one of {"expected_improvement", "prob_of_improvement"} - whether to optimize expected
improvement of probability of improvement. Expected improvement is generally better - may want
to remove probability of improvement at some point. (But I think prboability of improvement
is a little easier to calculate)
test_X: X values to test when looking for the best values to try
Returns:
suggested_X: optimal X value to try
prob_of_improvement: probability of an improvement
predicted_y: predicted value
predicted_std: stddev of predicted value
expected_improvement: expected improvement
"""
# Sanity check the data
sample_X = np.array(sample_X)
sample_y = np.array(sample_y)
if test_X is not None:
test_X = np.array(test_X)
if len(sample_X.shape) != 2:
raise ValueError("Sample X must be a 2 dimensional array")
if len(sample_y.shape) != 1:
raise ValueError("Sample y must be a 1 dimensional array")
if sample_X.shape[0] != sample_y.shape[0]:
raise ValueError("Sample X and y must be same length")
if test_X is not None:
# if test_X is set, usually this is for simulation/testing
if X_bounds is not None:
raise ValueError("Can't set test_X and X_bounds")
else:
# normal case where we randomly sample our test_X
if X_bounds is None:
raise ValueError("Must pass in test_X or X_bounds")
filtered_X, filtered_y = filter_nans(sample_X, sample_y)
# we can't run this algothim with less than two sample points, so we'll
# just return a random point
if filtered_X.shape[0] < 2:
if test_X is not None:
# pick a random row from test_X
row = np.random.choice(test_X.shape[0])
X = test_X[row, :]
else:
X = random_sample(X_bounds, 1)[0]
if filtered_X.shape[0] < 1:
prediction = 0.0
else:
prediction = filtered_y[0]
return (
X,
1.0,
prediction,
np.nan,
np.nan,
)
# build the acquisition function
gp, y_mean, y_stddev, = train_gaussian_process(
filtered_X, filtered_y, X_bounds, current_X, nu, max_samples_for_gp
)
# Look for the minimum value of our fitted-target-function + (kappa * fitted-target-std_dev)
if test_X is None: # this is the usual case
test_X = random_sample(X_bounds, num_points_to_try)
y_pred, y_pred_std = gp.predict(test_X, return_std=True)
# best value of y we've seen so far. i.e. y*
min_unnorm_y = np.min(filtered_y)
# hack for dealing with predicted std of 0
epsilon = 0.00000001
"""
if opt_func == "probability_of_improvement":
min_norm_y = (min_unnorm_y - y_mean) / y_stddev - improvement
else:
"""
min_norm_y = (min_unnorm_y - y_mean) / y_stddev
Z = -(y_pred - min_norm_y) / (y_pred_std + epsilon)
prob_of_improve: np.ndarray = scipy_stats.norm.cdf(Z)
e_i = -(y_pred - min_norm_y) * scipy_stats.norm.cdf(
Z
) + y_pred_std * scipy_stats.norm.pdf(Z)
"""
if opt_func == "probability_of_improvement":
best_test_X_index = np.argmax(prob_of_improve)
else:
"""
best_test_X_index = np.argmax(e_i)
suggested_X = test_X[best_test_X_index]
suggested_X_prob_of_improvement = prob_of_improve[best_test_X_index]
suggested_X_predicted_y = y_pred[best_test_X_index] * y_stddev + y_mean
suggested_X_predicted_std = y_pred_std[best_test_X_index] * y_stddev
# recalculate expected improvement
min_norm_y = (min_unnorm_y - y_mean) / y_stddev
z_best = -(y_pred[best_test_X_index] - min_norm_y) / (
y_pred_std[best_test_X_index] + epsilon
)
suggested_X_expected_improvement = -(
y_pred[best_test_X_index] - min_norm_y
) * scipy_stats.norm.cdf(z_best) + y_pred_std[
best_test_X_index
] * scipy_stats.norm.pdf(
z_best
)
return (
suggested_X,
suggested_X_prob_of_improvement,
suggested_X_predicted_y,
suggested_X_predicted_std,
suggested_X_expected_improvement,
)
def bayes_search_next_run(
runs: List[SweepRun],
config: Union[dict, SweepConfig],
validate: bool = False,
minimum_improvement: float = 0.1,
) -> SweepRun:
"""Suggest runs using Bayesian optimization.
>>> suggestion = bayes_search_next_run([], {
... 'method': 'bayes',
... 'parameters': {'a': {'min': 1., 'max': 2.}},
... 'metric': {'name': 'loss', 'goal': 'maximize'}
... })
Args:
runs: The runs in the sweep.
config: The sweep's config.
minimum_improvement: The minimium improvement to optimize for. Higher means take more exploratory risks.
validate: Whether to validate `sweep_config` against the SweepConfig JSONschema.
If true, will raise a Validation error if `sweep_config` does not conform to
the schema. If false, will attempt to run the sweep with an unvalidated schema.
Returns:
The suggested run.
"""
if validate:
config = SweepConfig(config)
if "metric" not in config:
raise ValueError('Bayesian search requires "metric" section')
if config["method"] != "bayes":
raise ValueError("Invalid sweep configuration for bayes_search_next_run.")
goal = config["metric"]["goal"]
metric_name = config["metric"]["name"]
worst_func = min if goal == "maximize" else max
params = HyperParameterSet.from_config(config["parameters"])
sample_X = []
current_X = []
y = []
X_bounds = [[0.0, 1.0]] * len(params.searchable_params)
# we calc the max metric to put as the metric for failed runs
# so that our bayesian search stays away from them
worst_metric = 0.0
for run in runs:
if run.state == RunState.finished:
try:
run_extremum = run.metric_extremum(
metric_name, kind="minimum" if goal == "maximize" else "maximum"
)
except ValueError:
run_extremum = 0.0 # default
worst_metric = worst_func(worst_metric, run_extremum)
X_norms = params.convert_runs_to_normalized_vector(runs)
for run, X_norm in zip(runs, X_norms):
if run.state == RunState.finished:
try:
metric = run.metric_extremum(
metric_name, kind="maximum" if goal == "maximize" else "minimum"
)
except ValueError:
metric = 0.0 # default
y.append(metric)
sample_X.append(X_norm)
elif run.state in [RunState.running, RunState.preempting, RunState.preempted]:
# run is in progress
# we wont use the metric, but we should pass it into our optimizer to
# account for the fact that it is running
current_X.append(X_norm)
elif run.state in [RunState.failed, RunState.crashed, RunState.killed]:
# run failed, but we're still going to use it
# maybe we should be smarter about this
y.append(worst_metric)
sample_X.append(X_norm)
else:
raise ValueError("Run is in unknown state")
if len(sample_X) == 0:
sample_X = np.empty([0, 0])
else:
sample_X = np.asarray(sample_X)
if len(current_X) > 0:
current_X = np.array(current_X)
# impute bad metric values from y
y = np.asarray(y)
if len(y) > 0:
y[~np.isfinite(y)] = worst_metric
# next_sample is a minimizer, so if we are trying to
# maximize, we need to negate y
y *= -1 if goal == "maximize" else 1
(
suggested_X,
suggested_X_prob_of_improvement,
suggested_X_predicted_y,
suggested_X_predicted_std,
suggested_X_expected_improvement,
) = next_sample(
sample_X=sample_X,
sample_y=y,
X_bounds=X_bounds,
current_X=current_X if len(current_X) > 0 else None,
improvement=minimum_improvement,
)
# convert the parameters from vector of [0,1] values
# to the original ranges
for param in params:
if param.type == HyperParameter.CONSTANT:
continue
try_value = suggested_X[params.param_names_to_index[param.name]]
param.value = param.ppf(try_value)
ret_dict = params.to_config()
info = {
"success_probability": suggested_X_prob_of_improvement,
"predicted_value": suggested_X_predicted_y,
"predicted_value_std_dev": suggested_X_predicted_std,
"expected_improvement": suggested_X_expected_improvement,
}
return SweepRun(config=ret_dict, search_info=info)
| [
"numpy.mean",
"numpy.random.choice",
"numpy.std",
"numpy.asarray",
"numpy.argmax",
"numpy.logaddexp",
"numpy.append",
"numpy.array",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.empty",
"numpy.isnan",
"scipy.stats.norm.pdf",
"numpy.min",
"numpy.isfinite",
"sklearn.gaussian_pr... | [((1227, 1276), 'numpy.empty', 'np.empty', (['(num_test_samples, num_hyperparameters)'], {}), '((num_test_samples, num_hyperparameters))\n', (1235, 1276), True, 'import numpy as np\n'), ((8255, 8273), 'numpy.array', 'np.array', (['sample_X'], {}), '(sample_X)\n', (8263, 8273), True, 'import numpy as np\n'), ((8289, 8307), 'numpy.array', 'np.array', (['sample_y'], {}), '(sample_y)\n', (8297, 8307), True, 'import numpy as np\n'), ((10205, 10223), 'numpy.min', 'np.min', (['filtered_y'], {}), '(filtered_y)\n', (10211, 10223), True, 'import numpy as np\n'), ((10585, 10608), 'scipy.stats.norm.cdf', 'scipy_stats.norm.cdf', (['Z'], {}), '(Z)\n', (10605, 10608), True, 'from scipy import stats as scipy_stats\n'), ((10876, 10890), 'numpy.argmax', 'np.argmax', (['e_i'], {}), '(e_i)\n', (10885, 10890), True, 'import numpy as np\n'), ((15125, 15138), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (15135, 15138), True, 'import numpy as np\n'), ((772, 783), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (780, 783), True, 'import numpy as np\n'), ((854, 864), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (861, 864), True, 'import numpy as np\n'), ((3381, 3400), 'numpy.array', 'np.array', (['current_X'], {}), '(current_X)\n', (3389, 3400), True, 'import numpy as np\n'), ((4586, 4640), 'numpy.random.randint', 'np.random.randint', (['sample_X.shape[0]'], {'size': 'max_samples'}), '(sample_X.shape[0], size=max_samples)\n', (4603, 4640), True, 'import numpy as np\n'), ((5011, 5042), 'numpy.append', 'np.append', (['X', 'current_X'], {'axis': '(0)'}), '(X, current_X, axis=0)\n', (5020, 5042), True, 'import numpy as np\n'), ((5127, 5158), 'numpy.append', 'np.append', (['y', 'current_y_fantasy'], {}), '(y, current_y_fantasy)\n', (5136, 5158), True, 'import numpy as np\n'), ((8352, 8368), 'numpy.array', 'np.array', (['test_X'], {}), '(test_X)\n', (8360, 8368), True, 'import numpy as np\n'), ((14943, 14959), 'numpy.empty', 'np.empty', (['[0, 0]'], {}), '([0, 0])\n', (14951, 14959), True, 'import numpy as np\n'), ((14989, 15009), 'numpy.asarray', 'np.asarray', (['sample_X'], {}), '(sample_X)\n', (14999, 15009), True, 'import numpy as np\n'), ((15058, 15077), 'numpy.array', 'np.array', (['current_X'], {}), '(current_X)\n', (15066, 15077), True, 'import numpy as np\n'), ((613, 651), 'sklearn.gaussian_process.kernels.Matern', 'sklearn_gaussian.kernels.Matern', ([], {'nu': 'nu'}), '(nu=nu)\n', (644, 651), True, 'from sklearn import gaussian_process as sklearn_gaussian\n'), ((884, 893), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (890, 893), True, 'import numpy as np\n'), ((1063, 1082), 'numpy.logaddexp', 'np.logaddexp', (['(0)', '(-x)'], {}), '(0, -x)\n', (1075, 1082), True, 'import numpy as np\n'), ((5408, 5426), 'numpy.isnan', 'np.isnan', (['sample_y'], {}), '(sample_y)\n', (5416, 5426), True, 'import numpy as np\n'), ((9330, 9363), 'numpy.random.choice', 'np.random.choice', (['test_X.shape[0]'], {}), '(test_X.shape[0])\n', (9346, 9363), True, 'import numpy as np\n'), ((10644, 10667), 'scipy.stats.norm.cdf', 'scipy_stats.norm.cdf', (['Z'], {}), '(Z)\n', (10664, 10667), True, 'from scipy import stats as scipy_stats\n'), ((10697, 10720), 'scipy.stats.norm.pdf', 'scipy_stats.norm.pdf', (['Z'], {}), '(Z)\n', (10717, 10720), True, 'from scipy import stats as scipy_stats\n'), ((11460, 11488), 'scipy.stats.norm.cdf', 'scipy_stats.norm.cdf', (['z_best'], {}), '(z_best)\n', (11480, 11488), True, 'from scipy import stats as scipy_stats\n'), ((11537, 11565), 'scipy.stats.norm.pdf', 'scipy_stats.norm.pdf', (['z_best'], {}), '(z_best)\n', (11557, 11565), True, 'from scipy import stats as scipy_stats\n'), ((1492, 1543), 'numpy.random.randint', 'np.random.randint', (['X_bounds[jj][0]', 'X_bounds[jj][1]'], {}), '(X_bounds[jj][0], X_bounds[jj][1])\n', (1509, 1543), True, 'import numpy as np\n'), ((15169, 15183), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (15180, 15183), True, 'import numpy as np\n'), ((5375, 5393), 'numpy.isnan', 'np.isnan', (['sample_X'], {}), '(sample_X)\n', (5383, 5393), True, 'import numpy as np\n'), ((1617, 1636), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1634, 1636), True, 'import numpy as np\n')] |
if __name__ == "__main__":
import numpy as np
import os.path
import pycalib
import pycalib.scoring
import pycalib.calibration_methods as calm
import pycalib.benchmark as bm
import pycalib.plotting
import matplotlib.pyplot as plt
import pycalib.texfig as texfig
# Seed
random_state = 1
plot_hist = True
for use_logits in [True, False]:
if use_logits:
# Classifier display names
clf_name_dict = {
'alexnet': "AlexNet",
'vgg19': "VGG19",
'resnet50': "ResNet-50",
'resnet152': "ResNet-152",
'densenet121': "DenseNet-121",
'densenet201': "DenseNet-201",
'inceptionv4': "InceptionV4",
'se_resnext50_32x4d': "SE-ResNeXt-50",
'se_resnext101_32x4d': "SE-ResNeXt-101",
'polynet': "PolyNet",
'senet154': "SENet-154",
'pnasnet5large': "PNASNet-5-Large",
'nasnetalarge': "NASNet-A-Large"
}
# Classifier combinations to plot
clf_combinations = {
"latent_maps_main": ["resnet152", "polynet", "pnasnet5large"],
"latent_maps_appendix1": ["vgg19", "densenet201", 'nasnetalarge'],
"latent_maps_appendix2": ['se_resnext50_32x4d', 'se_resnext101_32x4d', "senet154"]
}
# Data set setup
n_classes = 1000
file = "/home/j/Documents/research/projects/nonparametric_calibration/pycalib/datasets/imagenet/"
else:
clf_name_dict = {
"AdaBoost": "AdaBoost",
"XGBoost": "XGBoost",
"mondrian_forest": "Mondrian Forest",
"random_forest": "Random Forest",
"1layer_NN": "1-layer NN"
}
clf_combinations = {
"latent_maps_appendix1": ["AdaBoost", "mondrian_forest", "1layer_NN"],
"latent_maps_appendix2": ["XGBoost", "random_forest"]
}
# Data set setup
n_classes = 10
file = "/home/j/Documents/research/projects/nonparametric_calibration/pycalib/datasets/mnist/"
output_folder = "clf_output"
# Filepaths
output_folder = "clf_output"
data_dir = os.path.join(file, output_folder)
run_dir = os.path.join(file, "calibration")
for clf_comb_name, clf_names in clf_combinations.items():
# Benchmark data set
if use_logits:
benchmark = bm.ImageNetData(run_dir=run_dir, clf_output_dir=data_dir,
classifier_names=clf_names,
cal_methods=[],
cal_method_names=[],
use_logits=use_logits, n_splits=10, test_size=10000,
train_size=1000, random_state=random_state)
else:
benchmark = pycalib.benchmark.MNISTData(run_dir=run_dir, clf_output_dir=data_dir,
classifier_names=clf_names,
cal_methods=[],
cal_method_names=[],
n_splits=10, test_size=9000,
train_size=1000, random_state=random_state)
# Filepath and plot axes
folder_path = "/home/j/Documents/research/projects/nonparametric_calibration/" + \
"pycalib/figures/latent_functions/plots/"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_suffix = "_probs"
if use_logits:
file_suffix = "_logits"
filename = folder_path + clf_comb_name + file_suffix
fig, axes = texfig.subplots(width=7 / 3 * len(clf_names), ratio=.25 * 3 / len(clf_names),
nrows=1, ncols=len(clf_names), w_pad=1)
# Iterate through data sets, calibrate and plot latent functions
for (i_clf, (Z, y, info_dict)) in enumerate(benchmark.data_gen()):
# Train, test split
cal_ind, test_ind = next(benchmark.cross_validator.split(Z, y))
Z_cal = Z[cal_ind, :]
y_cal = y[cal_ind]
Z_test = Z[test_ind, :]
y_test = y[test_ind]
hist_data = Z_cal.flatten()
# Calibrate
nocal = calm.NoCalibration(logits=use_logits)
ts = calm.TemperatureScaling()
ts.fit(Z_cal, y_cal)
gpc = calm.GPCalibration(n_classes=n_classes, maxiter=1000, n_inducing_points=10,
logits=use_logits, verbose=True,
random_state=random_state)
gpc.fit(Z_cal, y_cal)
# # Compute calibration error
# ECE_nocal = pycalib.scoring.expected_calibration_error(y_test, nocal.predict_proba(Z_test), n_bins=100)
# ECE_ts = pycalib.scoring.expected_calibration_error(y_test, ts.predict_proba(Z_test), n_bins=100)
# ECE_gpc = pycalib.scoring.expected_calibration_error(y_test, gpc.predict_proba(Z_test), n_bins=100)
# Plot reliability diagrams
if not os.path.exists(os.path.join(folder_path, "reliability_diagrams")):
os.makedirs(os.path.join(folder_path, "reliability_diagrams"))
p_pred_nocal = nocal.predict_proba(Z_test)
p_pred_ts = ts.predict_proba(Z_test)
p_pred_gpc = gpc.predict_proba(Z_test)
pycalib.plotting.reliability_diagram(y=y_test, p_pred=[p_pred_nocal, p_pred_ts, p_pred_gpc], n_bins=15,
show_ece=False, show_legend=False,
title=["Uncal.", "Temp.", "GPcalib"],
model_name=None, plot_width=2.2 * 3 + .2, plot_height=2.2,
filename=os.path.join(folder_path, "reliability_diagrams",
info_dict['Model'] + "_reldiagram"))
# Get latent function values
z = np.linspace(start=3 * 10 ** -2, stop=1, num=1000)
if use_logits:
z = np.linspace(start=np.min(Z), stop=np.max(Z), num=1000)
ts_latent = ts.latent(z)
gpc_latent, gpc_latent_var = gpc.latent(z)
if use_logits:
# Compute factor for histogram height
xlims = np.array([np.min(z), np.max(z)])
ylims = xlims
factor = 1.5*ylims[1] / len(hist_data)
# Plot latent function of no calibration
axes[i_clf].plot(xlims, ylims, '-', color='tab:gray', zorder=0,
# label="Uncal: $\\textup{ECE}_1" + "={:.4f}$".format(ECE_nocal))
label="Uncal.")
# Compute y-intercepts by minimizing L2 distance between latent functions and identity
y_intercept_ts = 0.5 * (1 - ts.T) * (np.max(z) + np.min(z))
y_intercept_gpcalib = np.mean(gpc_latent - z)
# Plot shifted latent function: temperature scaling
ts_latent_shifted = ts_latent + y_intercept_ts
ts_indices = (ts_latent_shifted <= ylims[1]) & (
ts_latent_shifted >= ylims[0]) # Only display points in plot area
axes[i_clf].plot(z[ts_indices], ts_latent_shifted[ts_indices],
'-', color='tab:orange',
# label="Temp: $\\textup{ECE}_1" + "={:.4f}$".format(ECE_ts),
label="Temp.", zorder=2)
# Plot shifted latent function: GPcalib
gpc_latent_shifted = gpc_latent + y_intercept_gpcalib
gpc_indices = (gpc_latent_shifted <= ylims[1]) & (
gpc_latent_shifted >= ylims[0]) # Only display points in plot area
axes[i_clf].plot(z[gpc_indices], gpc_latent_shifted[gpc_indices],
# label="GPcalib: $\\textup{ECE}_1" + "={:.4f}$".format(ECE_gpc),
label="GPcalib", color='tab:blue',
zorder=3)
axes[i_clf].fill_between(z,
np.clip(a=gpc_latent + y_intercept_gpcalib - 2 * np.sqrt(gpc_latent_var),
a_min=ylims[0], a_max=ylims[1]),
np.clip(a=gpc_latent + y_intercept_gpcalib + 2 * np.sqrt(gpc_latent_var),
a_min=ylims[0], a_max=ylims[1]),
color='tab:blue', alpha=.2, zorder=1)
# Plot annotation
axes[i_clf].set_xlabel("logit")
else:
# Plot latent function corresponding to no calibration
axes[i_clf].plot(z, np.log(z), '-', color='tab:gray', zorder=1,
# label="Uncal: $\\textup{ECE}_1" + "={:.4f}$".format(ECE_nocal))
label="Uncal.")
# Compute y-intercepts by minimizing L2 distance between latent functions and identity
y_intercept_ts = np.mean(np.log(z) - ts_latent)
# y_intercept_gpcalib = np.mean(gpc_latent - np.log(z))
# Plot shifted latent function: temperature scaling
ts_latent_shifted = ts_latent + y_intercept_ts
axes[i_clf].plot(z, ts_latent_shifted,
'-', color='tab:orange',
# label="Temp: $\\textup{ECE}_1" + "={:.4f}$".format(ECE_ts),
label="Temp.", zorder=3)
# Plot GPcalib latent function
axes[i_clf].plot(z, gpc_latent,
# label="GPcalib: $\\textup{ECE}_1" + "={:.4f}$".format(ECE_gpc),
label="GPcalib.",
color='tab:blue',
zorder=3)
axes[i_clf].fill_between(z, gpc_latent - 2 * np.sqrt(gpc_latent_var),
gpc_latent + 2 * np.sqrt(gpc_latent_var),
color='tab:blue', alpha=.2, zorder=2)
# Compute factor for histogram height
ylims = axes[i_clf].get_ylim()
factor = 1.5*ylims[1] / len(hist_data)
# Plot annotation
axes[i_clf].set_xlabel("probability score")
# Plot histogram
if plot_hist:
axes[i_clf].hist(hist_data, weights=factor * np.ones_like(hist_data),
bottom=ylims[0], zorder=0, color="gray", alpha=0.4)
# Plot annotation and legend
axes[i_clf].set_title(clf_name_dict[clf_names[i_clf]])
if i_clf == 0:
axes[i_clf].set_ylabel("latent function")
if i_clf + 1 == len(clf_names):
axes[i_clf].legend(prop={'size': 9}, labelspacing=0.2) # loc="lower right"
# Save plot to file
texfig.savefig(filename)
plt.close("all")
| [
"numpy.mean",
"numpy.ones_like",
"numpy.sqrt",
"pycalib.texfig.savefig",
"numpy.log",
"pycalib.calibration_methods.NoCalibration",
"matplotlib.pyplot.close",
"pycalib.benchmark.ImageNetData",
"pycalib.calibration_methods.TemperatureScaling",
"numpy.linspace",
"pycalib.calibration_methods.GPCalib... | [((12109, 12133), 'pycalib.texfig.savefig', 'texfig.savefig', (['filename'], {}), '(filename)\n', (12123, 12133), True, 'import pycalib.texfig as texfig\n'), ((12146, 12162), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (12155, 12162), True, 'import matplotlib.pyplot as plt\n'), ((2620, 2848), 'pycalib.benchmark.ImageNetData', 'bm.ImageNetData', ([], {'run_dir': 'run_dir', 'clf_output_dir': 'data_dir', 'classifier_names': 'clf_names', 'cal_methods': '[]', 'cal_method_names': '[]', 'use_logits': 'use_logits', 'n_splits': '(10)', 'test_size': '(10000)', 'train_size': '(1000)', 'random_state': 'random_state'}), '(run_dir=run_dir, clf_output_dir=data_dir, classifier_names=\n clf_names, cal_methods=[], cal_method_names=[], use_logits=use_logits,\n n_splits=10, test_size=10000, train_size=1000, random_state=random_state)\n', (2635, 2848), True, 'import pycalib.benchmark as bm\n'), ((3106, 3321), 'pycalib.benchmark.MNISTData', 'pycalib.benchmark.MNISTData', ([], {'run_dir': 'run_dir', 'clf_output_dir': 'data_dir', 'classifier_names': 'clf_names', 'cal_methods': '[]', 'cal_method_names': '[]', 'n_splits': '(10)', 'test_size': '(9000)', 'train_size': '(1000)', 'random_state': 'random_state'}), '(run_dir=run_dir, clf_output_dir=data_dir,\n classifier_names=clf_names, cal_methods=[], cal_method_names=[],\n n_splits=10, test_size=9000, train_size=1000, random_state=random_state)\n', (3133, 3321), False, 'import pycalib\n'), ((4755, 4792), 'pycalib.calibration_methods.NoCalibration', 'calm.NoCalibration', ([], {'logits': 'use_logits'}), '(logits=use_logits)\n', (4773, 4792), True, 'import pycalib.calibration_methods as calm\n'), ((4815, 4840), 'pycalib.calibration_methods.TemperatureScaling', 'calm.TemperatureScaling', ([], {}), '()\n', (4838, 4840), True, 'import pycalib.calibration_methods as calm\n'), ((4901, 5040), 'pycalib.calibration_methods.GPCalibration', 'calm.GPCalibration', ([], {'n_classes': 'n_classes', 'maxiter': '(1000)', 'n_inducing_points': '(10)', 'logits': 'use_logits', 'verbose': '(True)', 'random_state': 'random_state'}), '(n_classes=n_classes, maxiter=1000, n_inducing_points=10,\n logits=use_logits, verbose=True, random_state=random_state)\n', (4919, 5040), True, 'import pycalib.calibration_methods as calm\n'), ((6646, 6695), 'numpy.linspace', 'np.linspace', ([], {'start': '(3 * 10 ** -2)', 'stop': '(1)', 'num': '(1000)'}), '(start=3 * 10 ** -2, stop=1, num=1000)\n', (6657, 6695), True, 'import numpy as np\n'), ((7683, 7706), 'numpy.mean', 'np.mean', (['(gpc_latent - z)'], {}), '(gpc_latent - z)\n', (7690, 7706), True, 'import numpy as np\n'), ((9697, 9706), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (9703, 9706), True, 'import numpy as np\n'), ((6769, 6778), 'numpy.min', 'np.min', (['Z'], {}), '(Z)\n', (6775, 6778), True, 'import numpy as np\n'), ((6785, 6794), 'numpy.max', 'np.max', (['Z'], {}), '(Z)\n', (6791, 6794), True, 'import numpy as np\n'), ((7035, 7044), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (7041, 7044), True, 'import numpy as np\n'), ((7046, 7055), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (7052, 7055), True, 'import numpy as np\n'), ((7618, 7627), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (7624, 7627), True, 'import numpy as np\n'), ((7630, 7639), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (7636, 7639), True, 'import numpy as np\n'), ((10050, 10059), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (10056, 10059), True, 'import numpy as np\n'), ((11000, 11023), 'numpy.sqrt', 'np.sqrt', (['gpc_latent_var'], {}), '(gpc_latent_var)\n', (11007, 11023), True, 'import numpy as np\n'), ((11087, 11110), 'numpy.sqrt', 'np.sqrt', (['gpc_latent_var'], {}), '(gpc_latent_var)\n', (11094, 11110), True, 'import numpy as np\n'), ((11596, 11619), 'numpy.ones_like', 'np.ones_like', (['hist_data'], {}), '(hist_data)\n', (11608, 11619), True, 'import numpy as np\n'), ((9069, 9092), 'numpy.sqrt', 'np.sqrt', (['gpc_latent_var'], {}), '(gpc_latent_var)\n', (9076, 9092), True, 'import numpy as np\n'), ((9274, 9297), 'numpy.sqrt', 'np.sqrt', (['gpc_latent_var'], {}), '(gpc_latent_var)\n', (9281, 9297), True, 'import numpy as np\n')] |
import os
import subprocess
import numpy as np
from Cython.Build import build_ext
from setuptools import find_packages, setup, Extension
# Include libraries from the OS X Command Line Tools. On OS X Big Sur, these libraries
# are not automatically included anymore.
osx_library_path = "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib"
if os.path.exists(osx_library_path):
if "LIBRARY_PATH" in os.environ and os.environ["LIBRARY_PATH"]:
os.environ["LIBRARY_PATH"] += ":" + osx_library_path
else:
os.environ["LIBRARY_PATH"] = osx_library_path
# If `xcrun` is available, make sure the includes are added to CPATH.
if subprocess.call("which xcrun", shell=True) == 0:
path = (
subprocess.check_output("xcrun --show-sdk-path", shell=True)
.strip()
.decode("ascii")
)
path += "/usr/include"
# Add to CPATH.
if "CPATH" not in os.environ:
os.environ["CPATH"] = ""
os.environ["CPATH"] += path
# Default to use gcc as the compiler if `$CC` is not set.
if "CC" not in os.environ or not os.environ["CC"]:
os.environ["CC"] = "gcc"
# Check whether `gfortran` is available.
if subprocess.call("which gfortran", shell=True) != 0:
if "LAB_GFORTRAN" in os.environ and os.environ["LAB_GFORTRAN"]:
gfortran = os.environ["LAB_GFORTRAN"]
else:
gfortran = False
else:
gfortran = "gfortran"
# Ensure that `$CC` is not symlinked to `clang`, because the default shipped
# one often does not support OpenMP, but `gcc` does.
out = subprocess.check_output("$CC --version", shell=True)
if "clang" in out.decode("ascii"):
# It is. Now try to find a `gcc` to replace it with.
found = False
for i in range(100, 3, -1):
gcci = "gcc-{}".format(i)
if subprocess.call(["which", gcci]) == 0:
# Set both `$CC` and `$CXX` in this case, just to be sure.
os.environ["CC"] = gcci
os.environ["CXX"] = "g++-{}".format(i)
found = True
break
# Ensure that one was found.
if not found:
raise RuntimeError(
"Your gcc runs clang, and no version of gcc could be found. "
"Please install gcc. "
'On OS X, this can be done with "brew install gcc".'
)
# Compile TVPACK if `gfortran` is available.
if gfortran:
if (
subprocess.call(
f"{gfortran} -fPIC -O2 -c lab/bvn_cdf/tvpack.f -o lab/bvn_cdf/tvpack.o",
shell=True,
)
!= 0
):
raise RuntimeError("Compilation of TVPACK failed.")
requirements = ["numpy>=1.16", "scipy>=1.3", "fdm", "plum-dispatch>=1.5.3"]
# Determine which external modules to compile.
ext_modules = []
if gfortran:
extra_objects = ["lab/bvn_cdf/tvpack.o"]
extra_link_args = ["-fopenmp"]
# Allow the libraries for `gfortran` to be explicitly linked.
if "LAB_LIBGFORTRAN" in os.environ and os.environ["LAB_LIBGFORTRAN"]:
extra_objects += [os.environ["LAB_LIBGFORTRAN"]]
else:
extra_link_args += ["-lgfortran"]
ext_modules.append(
Extension(
"lab.bvn_cdf",
sources=["lab/bvn_cdf/bvn_cdf.pyx"],
include_dirs=[np.get_include()],
extra_compile_args=["-fPIC", "-O2", "-fopenmp"],
extra_objects=extra_objects,
extra_link_args=extra_link_args
)
)
setup(
packages=find_packages(exclude=["docs"]),
python_requires=">=3.6",
install_requires=requirements,
cmdclass={"build_ext": build_ext},
ext_modules=ext_modules,
include_package_data=True,
)
| [
"subprocess.check_output",
"os.path.exists",
"setuptools.find_packages",
"subprocess.call",
"numpy.get_include"
] | [((353, 385), 'os.path.exists', 'os.path.exists', (['osx_library_path'], {}), '(osx_library_path)\n', (367, 385), False, 'import os\n'), ((1534, 1587), 'subprocess.check_output', 'subprocess.check_output', (['"""$CC --version"""'], {'shell': '(True)'}), "('$CC --version', shell=True)\n", (1557, 1587), False, 'import subprocess\n'), ((654, 696), 'subprocess.call', 'subprocess.call', (['"""which xcrun"""'], {'shell': '(True)'}), "('which xcrun', shell=True)\n", (669, 696), False, 'import subprocess\n'), ((1164, 1209), 'subprocess.call', 'subprocess.call', (['"""which gfortran"""'], {'shell': '(True)'}), "('which gfortran', shell=True)\n", (1179, 1209), False, 'import subprocess\n'), ((2355, 2464), 'subprocess.call', 'subprocess.call', (['f"""{gfortran} -fPIC -O2 -c lab/bvn_cdf/tvpack.f -o lab/bvn_cdf/tvpack.o"""'], {'shell': '(True)'}), "(\n f'{gfortran} -fPIC -O2 -c lab/bvn_cdf/tvpack.f -o lab/bvn_cdf/tvpack.o',\n shell=True)\n", (2370, 2464), False, 'import subprocess\n'), ((3406, 3437), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['docs']"}), "(exclude=['docs'])\n", (3419, 3437), False, 'from setuptools import find_packages, setup, Extension\n'), ((1775, 1807), 'subprocess.call', 'subprocess.call', (["['which', gcci]"], {}), "(['which', gcci])\n", (1790, 1807), False, 'import subprocess\n'), ((724, 784), 'subprocess.check_output', 'subprocess.check_output', (['"""xcrun --show-sdk-path"""'], {'shell': '(True)'}), "('xcrun --show-sdk-path', shell=True)\n", (747, 784), False, 'import subprocess\n'), ((3204, 3220), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (3218, 3220), True, 'import numpy as np\n')] |
import numpy as np
import networkx
from zephyr.Problem import SeisFDFDProblem
# Plotting configuration
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib
matplotlib.rcParams.update({'font.size': 20})
# System / modelling configuration
cellSize = 1 # m
freqs = [2e2] # Hz
density = 2700 # units of density
Q = np.inf # can be inf
nx = 164 # count
nz = 264 # count
freeSurf = [False, False, False, False] # t r b l
dims = (nx,nz) # tuple
nPML = 32
rho = np.fliplr(np.ones(dims) * density)
nfreq = len(freqs) # number of frequencies
nky = 48 # number of y-directional plane-wave components
nsp = nfreq * nky # total number of 2D subproblems
velocity = 2500 # m/s
vanom = 500 # m/s
cPert = np.zeros(dims)
cPert[(nx/2)-20:(nx/2)+20,(nz/2)-20:(nz/2)+20] = vanom
c = np.fliplr(np.ones(dims) * velocity)
cFlat = c
c += np.fliplr(cPert)
cTrue = c
srcs = np.array([np.ones(101)*32, np.zeros(101), np.linspace(32, 232, 101)]).T
recs = np.array([np.ones(101)*132, np.zeros(101), np.linspace(32, 232, 101)]).T
nsrc = len(srcs)
nrec = len(recs)
recmode = 'fixed'
geom = {
'src': srcs,
'rec': recs,
'mode': 'fixed',
}
cache = False
cacheDir = '.'
# Base configuration for all subproblems
systemConfig = {
'dx': cellSize, # m
'dz': cellSize, # m
'c': c.T, # m/s
'rho': rho.T, # density
'Q': Q, # can be inf
'nx': nx, # count
'nz': nz, # count
'freeSurf': freeSurf, # t r b l
'nPML': nPML,
'geom': geom,
'cache': cache,
'cacheDir': cacheDir,
'freqs': freqs,
'nky': nky,
}
sp = SeisFDFDProblem(systemConfig)
jobs = sp.forwardAccumulate()
def trackprogress(sp, jobs, interval=1.0):
systemJobs = jobs['systemJobs']
jobkeys = systemJobs.keys()
jobkeys.sort()
fig = plt.figure()
ax1 = fig.add_axes([0.1,0.10,0.15,0.85], xlabel='Subproblem', ylabel='Source')
ax1.get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
ax2 = fig.add_axes([0.25,0.10,0.75,0.85], xlabel='Receiver')
im1 = ax2.imshow(np.zeros((nsrc, nrec)), vmin=-50*nky, vmax=50*nky, cmap=cm.bwr)
im2 = ax1.imshow(np.zeros((nsrc, nsp)), vmin=0, vmax=2, interpolation='nearest', aspect='auto')
plt.show()
def update():
#try:
# res = reduce(np.add, sp.par['dview']['resultTracker'])
#except:
# res = {}
#keys = [(freqs[0], i) for i in range(nrec)]
#resarr = np.array([res[key] if key in res.keys() else np.zeros(nrec) for key in keys])
status = np.zeros((len(jobkeys),nsrc))
for i, key in enumerate(jobkeys):
status[i,:] = 1. * systemJobs[key][0].ready()#np.array([systemJobs[key][j].ready() for j in xrange(1)])
if systemJobs[key][0].ready():#for j in np.argwhere(status[i,:]):
status[i,:] += not systemJobs[key][0].successful()
#im1.set_data(resarr.real)
im2.set_data(status.T)
fig.canvas.draw()
fig.canvas.flush_events()
while True:
try:
plt.pause(interval)
update()
except KeyboardInterrupt:
print('Exiting loop...')
break
finally:
if not reduce(np.add, sp.par['dview']['resultTracker.interactcounter']) < (nsp * nsrc):
break
trackprogress(sp, jobs)
| [
"numpy.ones",
"matplotlib.rcParams.update",
"numpy.fliplr",
"zephyr.Problem.SeisFDFDProblem",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.ticker.MaxNLocator",
"numpy.linspace",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show"
] | [((218, 263), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (244, 263), False, 'import matplotlib\n'), ((953, 967), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (961, 967), True, 'import numpy as np\n'), ((1103, 1119), 'numpy.fliplr', 'np.fliplr', (['cPert'], {}), '(cPert)\n', (1112, 1119), True, 'import numpy as np\n'), ((1988, 2017), 'zephyr.Problem.SeisFDFDProblem', 'SeisFDFDProblem', (['systemConfig'], {}), '(systemConfig)\n', (2003, 2017), False, 'from zephyr.Problem import SeisFDFDProblem\n'), ((2192, 2204), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2202, 2204), True, 'import matplotlib.pyplot as plt\n'), ((2616, 2626), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2624, 2626), True, 'import matplotlib.pyplot as plt\n'), ((656, 669), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (663, 669), True, 'import numpy as np\n'), ((1047, 1060), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (1054, 1060), True, 'import numpy as np\n'), ((2326, 2358), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (2344, 2358), True, 'import matplotlib.ticker as ticker\n'), ((2447, 2469), 'numpy.zeros', 'np.zeros', (['(nsrc, nrec)'], {}), '((nsrc, nrec))\n', (2455, 2469), True, 'import numpy as np\n'), ((2532, 2553), 'numpy.zeros', 'np.zeros', (['(nsrc, nsp)'], {}), '((nsrc, nsp))\n', (2540, 2553), True, 'import numpy as np\n'), ((1178, 1191), 'numpy.zeros', 'np.zeros', (['(101)'], {}), '(101)\n', (1186, 1191), True, 'import numpy as np\n'), ((1193, 1218), 'numpy.linspace', 'np.linspace', (['(32)', '(232)', '(101)'], {}), '(32, 232, 101)\n', (1204, 1218), True, 'import numpy as np\n'), ((1265, 1278), 'numpy.zeros', 'np.zeros', (['(101)'], {}), '(101)\n', (1273, 1278), True, 'import numpy as np\n'), ((1280, 1305), 'numpy.linspace', 'np.linspace', (['(32)', '(232)', '(101)'], {}), '(32, 232, 101)\n', (1291, 1305), True, 'import numpy as np\n'), ((3466, 3485), 'matplotlib.pyplot.pause', 'plt.pause', (['interval'], {}), '(interval)\n', (3475, 3485), True, 'import matplotlib.pyplot as plt\n'), ((1161, 1173), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (1168, 1173), True, 'import numpy as np\n'), ((1247, 1259), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (1254, 1259), True, 'import numpy as np\n')] |
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FedAvgM tests."""
from typing import List, Tuple
from unittest.mock import MagicMock
from numpy import array, float32
from numpy.testing import assert_almost_equal
from flwr.common import FitRes, Weights, parameters_to_weights
from flwr.common.parameter import weights_to_parameters
from flwr.server.client_proxy import ClientProxy
from .fedavgm import FedAvgM
def test_aggregate_fit_using_near_one_server_lr_and_no_momentum() -> None:
"""Test aggregate with near-one learning rate and no momentum."""
# Prepare
weights0_0 = array([[1, 2, 3], [4, 5, 6]], dtype=float32)
weights0_1 = array([7, 8, 9, 10], dtype=float32)
weights1_0 = array([[1, 2, 3], [4, 5, 6]], dtype=float32)
weights1_1 = array([7, 8, 9, 10], dtype=float32)
initial_weights: Weights = [
array([[0, 0, 0], [0, 0, 0]], dtype=float32),
array([0, 0, 0, 0], dtype=float32),
]
results: List[Tuple[ClientProxy, FitRes]] = [
(
MagicMock(),
FitRes(
parameters=weights_to_parameters([weights0_0, weights0_1]),
num_examples=1,
metrics={},
),
),
(
MagicMock(),
FitRes(
parameters=weights_to_parameters([weights1_0, weights1_1]),
num_examples=2,
metrics={},
),
),
]
failures: List[BaseException] = []
expected: Weights = [
array([[1, 2, 3], [4, 5, 6]], dtype=float32),
array([7, 8, 9, 10], dtype=float32),
]
strategy = FedAvgM(
initial_parameters=weights_to_parameters(initial_weights),
server_learning_rate=1.0 + 1e-9,
)
# Execute
actual, _ = strategy.aggregate_fit(1, results, failures)
# Assert
assert actual
for w_act, w_exp in zip(parameters_to_weights(actual), expected):
assert_almost_equal(w_act, w_exp)
def test_aggregate_fit_server_learning_rate_and_momentum() -> None:
"""Test aggregate with near-one learning rate and near-zero momentum."""
# Prepare
weights0_0 = array([[1, 2, 3], [4, 5, 6]], dtype=float32)
weights0_1 = array([7, 8, 9, 10], dtype=float32)
weights1_0 = array([[1, 2, 3], [4, 5, 6]], dtype=float32)
weights1_1 = array([7, 8, 9, 10], dtype=float32)
initial_weights: Weights = [
array([[0, 0, 0], [0, 0, 0]], dtype=float32),
array([0, 0, 0, 0], dtype=float32),
]
results: List[Tuple[ClientProxy, FitRes]] = [
(
MagicMock(),
FitRes(
parameters=weights_to_parameters([weights0_0, weights0_1]),
num_examples=1,
metrics={},
),
),
(
MagicMock(),
FitRes(
parameters=weights_to_parameters([weights1_0, weights1_1]),
num_examples=2,
metrics={},
),
),
]
failures: List[BaseException] = []
expected: Weights = [
array([[1, 2, 3], [4, 5, 6]], dtype=float32),
array([7, 8, 9, 10], dtype=float32),
]
strategy = FedAvgM(
initial_parameters=weights_to_parameters(initial_weights),
server_learning_rate=1.0 + 1e-9,
server_momentum=1.0e-9,
)
# Execute
# First round (activate momentum)
actual, _ = strategy.aggregate_fit(1, results, failures)
# Second round (update momentum)
actual, _ = strategy.aggregate_fit(2, results, failures)
# Assert
assert actual
for w_act, w_exp in zip(parameters_to_weights(actual), expected):
assert_almost_equal(w_act, w_exp)
| [
"flwr.common.parameter.weights_to_parameters",
"unittest.mock.MagicMock",
"numpy.array",
"numpy.testing.assert_almost_equal",
"flwr.common.parameters_to_weights"
] | [((1222, 1266), 'numpy.array', 'array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'float32'}), '([[1, 2, 3], [4, 5, 6]], dtype=float32)\n', (1227, 1266), False, 'from numpy import array, float32\n'), ((1284, 1319), 'numpy.array', 'array', (['[7, 8, 9, 10]'], {'dtype': 'float32'}), '([7, 8, 9, 10], dtype=float32)\n', (1289, 1319), False, 'from numpy import array, float32\n'), ((1337, 1381), 'numpy.array', 'array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'float32'}), '([[1, 2, 3], [4, 5, 6]], dtype=float32)\n', (1342, 1381), False, 'from numpy import array, float32\n'), ((1399, 1434), 'numpy.array', 'array', (['[7, 8, 9, 10]'], {'dtype': 'float32'}), '([7, 8, 9, 10], dtype=float32)\n', (1404, 1434), False, 'from numpy import array, float32\n'), ((2771, 2815), 'numpy.array', 'array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'float32'}), '([[1, 2, 3], [4, 5, 6]], dtype=float32)\n', (2776, 2815), False, 'from numpy import array, float32\n'), ((2833, 2868), 'numpy.array', 'array', (['[7, 8, 9, 10]'], {'dtype': 'float32'}), '([7, 8, 9, 10], dtype=float32)\n', (2838, 2868), False, 'from numpy import array, float32\n'), ((2886, 2930), 'numpy.array', 'array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'float32'}), '([[1, 2, 3], [4, 5, 6]], dtype=float32)\n', (2891, 2930), False, 'from numpy import array, float32\n'), ((2948, 2983), 'numpy.array', 'array', (['[7, 8, 9, 10]'], {'dtype': 'float32'}), '([7, 8, 9, 10], dtype=float32)\n', (2953, 2983), False, 'from numpy import array, float32\n'), ((1477, 1521), 'numpy.array', 'array', (['[[0, 0, 0], [0, 0, 0]]'], {'dtype': 'float32'}), '([[0, 0, 0], [0, 0, 0]], dtype=float32)\n', (1482, 1521), False, 'from numpy import array, float32\n'), ((1531, 1565), 'numpy.array', 'array', (['[0, 0, 0, 0]'], {'dtype': 'float32'}), '([0, 0, 0, 0], dtype=float32)\n', (1536, 1565), False, 'from numpy import array, float32\n'), ((2137, 2181), 'numpy.array', 'array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'float32'}), '([[1, 2, 3], [4, 5, 6]], dtype=float32)\n', (2142, 2181), False, 'from numpy import array, float32\n'), ((2191, 2226), 'numpy.array', 'array', (['[7, 8, 9, 10]'], {'dtype': 'float32'}), '([7, 8, 9, 10], dtype=float32)\n', (2196, 2226), False, 'from numpy import array, float32\n'), ((2509, 2538), 'flwr.common.parameters_to_weights', 'parameters_to_weights', (['actual'], {}), '(actual)\n', (2530, 2538), False, 'from flwr.common import FitRes, Weights, parameters_to_weights\n'), ((2559, 2592), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['w_act', 'w_exp'], {}), '(w_act, w_exp)\n', (2578, 2592), False, 'from numpy.testing import assert_almost_equal\n'), ((3026, 3070), 'numpy.array', 'array', (['[[0, 0, 0], [0, 0, 0]]'], {'dtype': 'float32'}), '([[0, 0, 0], [0, 0, 0]], dtype=float32)\n', (3031, 3070), False, 'from numpy import array, float32\n'), ((3080, 3114), 'numpy.array', 'array', (['[0, 0, 0, 0]'], {'dtype': 'float32'}), '([0, 0, 0, 0], dtype=float32)\n', (3085, 3114), False, 'from numpy import array, float32\n'), ((3686, 3730), 'numpy.array', 'array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'float32'}), '([[1, 2, 3], [4, 5, 6]], dtype=float32)\n', (3691, 3730), False, 'from numpy import array, float32\n'), ((3740, 3775), 'numpy.array', 'array', (['[7, 8, 9, 10]'], {'dtype': 'float32'}), '([7, 8, 9, 10], dtype=float32)\n', (3745, 3775), False, 'from numpy import array, float32\n'), ((4227, 4256), 'flwr.common.parameters_to_weights', 'parameters_to_weights', (['actual'], {}), '(actual)\n', (4248, 4256), False, 'from flwr.common import FitRes, Weights, parameters_to_weights\n'), ((4277, 4310), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['w_act', 'w_exp'], {}), '(w_act, w_exp)\n', (4296, 4310), False, 'from numpy.testing import assert_almost_equal\n'), ((1646, 1657), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1655, 1657), False, 'from unittest.mock import MagicMock\n'), ((1863, 1874), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1872, 1874), False, 'from unittest.mock import MagicMock\n'), ((2286, 2324), 'flwr.common.parameter.weights_to_parameters', 'weights_to_parameters', (['initial_weights'], {}), '(initial_weights)\n', (2307, 2324), False, 'from flwr.common.parameter import weights_to_parameters\n'), ((3195, 3206), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3204, 3206), False, 'from unittest.mock import MagicMock\n'), ((3412, 3423), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3421, 3423), False, 'from unittest.mock import MagicMock\n'), ((3835, 3873), 'flwr.common.parameter.weights_to_parameters', 'weights_to_parameters', (['initial_weights'], {}), '(initial_weights)\n', (3856, 3873), False, 'from flwr.common.parameter import weights_to_parameters\n'), ((1706, 1753), 'flwr.common.parameter.weights_to_parameters', 'weights_to_parameters', (['[weights0_0, weights0_1]'], {}), '([weights0_0, weights0_1])\n', (1727, 1753), False, 'from flwr.common.parameter import weights_to_parameters\n'), ((1923, 1970), 'flwr.common.parameter.weights_to_parameters', 'weights_to_parameters', (['[weights1_0, weights1_1]'], {}), '([weights1_0, weights1_1])\n', (1944, 1970), False, 'from flwr.common.parameter import weights_to_parameters\n'), ((3255, 3302), 'flwr.common.parameter.weights_to_parameters', 'weights_to_parameters', (['[weights0_0, weights0_1]'], {}), '([weights0_0, weights0_1])\n', (3276, 3302), False, 'from flwr.common.parameter import weights_to_parameters\n'), ((3472, 3519), 'flwr.common.parameter.weights_to_parameters', 'weights_to_parameters', (['[weights1_0, weights1_1]'], {}), '([weights1_0, weights1_1])\n', (3493, 3519), False, 'from flwr.common.parameter import weights_to_parameters\n')] |
import numpy as np
from ..core import RewardFunction
from ..utils import set_state_array
import warnings
class WeightedSumOfErrors(RewardFunction):
"""
Reward Function that calculates the reward as the weighted sum of errors with a certain power.
.. math:
`reward = - reward_weights * (abs(state - reference)/ state_length) ** reward_power `
.. math:
` limit\_violation\_reward = -1 / (1 - \gamma)`
| state_length[i] = 1 for states with positive values only.
| state_length[i] = 2 for states with positive and negative values.
"""
def __init__(self, reward_weights=None, normed=False, observed_states=None, gamma=0.9, reward_power=1, **__):
"""
Args:
reward_weights(dict/list/ndarray(float)): Dict mapping state names to reward_weights, 0 otherwise.
Or an array with the reward_weights on the position of the state_names.
normed(bool): If True, the reward weights will be normalized to 1.
observed_states(list(str)): List of state names of which the limit are observed. Default: no observation.
gamma(float): Discount factor for the reward punishment. Should equal agents' discount factor gamma.
reward_power(dict/list(float)/float): Reward power for each of the systems states.
"""
self._n = reward_power
self._reward_weights = reward_weights
self._state_length = None
self._normed = normed
self._gamma = gamma
super().__init__(observed_states)
def set_modules(self, physical_system, reference_generator):
super().set_modules(physical_system, reference_generator)
self._state_length = self._physical_system.state_space.high - self._physical_system.state_space.low
self._n = set_state_array(self._n, self._physical_system.state_names)
referenced_states = reference_generator.referenced_states
if self._reward_weights is None:
reward_weights = dict.fromkeys(
np.array(physical_system.state_names)[referenced_states],
1/len(np.array(physical_system.state_names)[referenced_states])
)
else:
reward_weights = self._reward_weights
self._reward_weights = set_state_array(reward_weights, self._physical_system.state_names)
if sum(self._reward_weights) == 0:
warnings.warn("All reward weights sum up to zero", Warning, stacklevel=2)
rw_sum = sum(self._reward_weights)
if self._normed:
self._reward_weights /= rw_sum
self.reward_range = (-1, 0)
else:
self.reward_range = (-rw_sum, 0)
def _limit_violation_reward(self, state):
return self.reward_range[0] / (1 - self._gamma)
def _reward(self, state, reference, *_):
return -np.sum(self._reward_weights * (abs(state - reference) / self._state_length)**self._n)
class ShiftedWeightedSumOfErrors(WeightedSumOfErrors):
"""
Weighted Sum of Errors shifted by the maximum negative reward to obtain rewards in the positive range.
.. math::
reward = max\_reward - reward\_weights * (abs(state - reference)/ state\_length) ** reward\_power
.. math::
limit~violation~reward = 0`
| state_length[i] = 1 for states with positive values only.
| state_length[i] = 2 for states with positive and negative values.
"""
def _reward(self, state, reference, *_):
return self.reward_range[1] + super()._reward(state, reference)
def _limit_violation_reward(self, state):
return 0.0
def set_modules(self, physical_system, reference_generator):
super().set_modules(physical_system, reference_generator)
self.reward_range = (0, -self.reward_range[0])
| [
"warnings.warn",
"numpy.array"
] | [((2453, 2526), 'warnings.warn', 'warnings.warn', (['"""All reward weights sum up to zero"""', 'Warning'], {'stacklevel': '(2)'}), "('All reward weights sum up to zero', Warning, stacklevel=2)\n", (2466, 2526), False, 'import warnings\n'), ((2077, 2114), 'numpy.array', 'np.array', (['physical_system.state_names'], {}), '(physical_system.state_names)\n', (2085, 2114), True, 'import numpy as np\n'), ((2158, 2195), 'numpy.array', 'np.array', (['physical_system.state_names'], {}), '(physical_system.state_names)\n', (2166, 2195), True, 'import numpy as np\n')] |
import numpy as np
# import PIL.Image
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# lowest = -1.0
lowest = 0.0
highest = 1.0
# --------------------------------------
# Color maps ([-1,1] -> [0,1]^3)
# --------------------------------------
def heatmap(x):
x = x[..., np.newaxis]
# positive relevance
hrp = 0.9 - np.clip(x-0.3, 0, 0.7)/0.7*0.5
hgp = 0.9 - np.clip(x-0.0, 0, 0.3)/0.3*0.5 - np.clip(x-0.3, 0, 0.7)/0.7*0.4
hbp = 0.9 - np.clip(x-0.0, 0, 0.3)/0.3*0.5 - np.clip(x-0.3, 0, 0.7)/0.7*0.4
# negative relevance
hrn = 0.9 - np.clip(-x-0.0, 0, 0.3)/0.3*0.5 - np.clip(-x-0.3, 0, 0.7)/0.7*0.4
hgn = 0.9 - np.clip(-x-0.0, 0, 0.3)/0.3*0.5 - np.clip(-x-0.3, 0, 0.7)/0.7*0.4
hbn = 0.9 - np.clip(-x-0.3, 0, 0.7)/0.7*0.5
r = hrp*(x >= 0)+hrn*(x < 0)
g = hgp*(x >= 0)+hgn*(x < 0)
b = hbp*(x >= 0)+hbn*(x < 0)
return np.concatenate([r, g, b], axis=-1)
def graymap(x):
x = x[..., np.newaxis]
return np.concatenate([x, x, x], axis=-1)*0.5+0.5
# --------------------------------------
# Visualizing data
# --------------------------------------
# def visualize(x,colormap,name):
#
# N = len(x)
# assert(N <= 16)
#
# x = colormap(x/np.abs(x).max())
#
# # Create a mosaic and upsample
# x = x.reshape([1, N, 29, 29, 3])
# x = np.pad(x, ((0, 0), (0, 0), (2, 2), (2, 2), (0, 0)), 'constant', constant_values=1)
# x = x.transpose([0, 2, 1, 3, 4]).reshape([1*33, N*33, 3])
# x = np.kron(x, np.ones([2, 2, 1]))
#
# PIL.Image.fromarray((x*255).astype('byte'), 'RGB').save(name)
def plt_vector(x, colormap, num_headers):
N = len(x)
assert (N <= 16)
len_x = 54
len_y = num_headers
# size = int(np.ceil(np.sqrt(len(x[0]))))
length = len_y*len_x
data = np.zeros((N, length), dtype=np.float64)
data[:, :x.shape[1]] = x
data = colormap(data / np.abs(data).max())
# data = data.reshape([1, N, size, size, 3])
data = data.reshape([1, N, len_y, len_x, 3])
# data = np.pad(data, ((0, 0), (0, 0), (2, 2), (2, 2), (0, 0)), 'constant', constant_values=1)
data = data.transpose([0, 2, 1, 3, 4]).reshape([1 * (len_y), N * (len_x), 3])
return data
# data = np.kron(data, np.ones([2, 2, 1])) # scales
def add_subplot(data, num_plots, plot_index, title, figure):
fig = figure
ax = fig.add_subplot(num_plots, 1, plot_index)
cax = ax.imshow(data, interpolation='nearest', aspect='auto')
# cbar = fig.colorbar(cax, ticks=[0, 1])
# cbar.ax.set_yticklabels(['0', '> 1']) # vertically oriented colorbar
ax.set_title(title)
def plot_data(data, title):
plt.figure(figsize=(6.4, 2.5)) # figuresize to make 16 headers plot look good
# plt.axis('scaled')
plt.imshow(data, interpolation='nearest', aspect='auto')
plt.title(title)
plt.tight_layout()
def plotNNFilter(units):
filters = units.shape[3]
plt.figure(1, figsize=(20, 20))
n_columns = 6
n_rows = np.ceil(filters / n_columns) + 1
for i in range(filters):
plt.subplot(n_rows, n_columns, i+1)
plt.title('Filter ' + str(i))
plt.imshow(units[0, :, :, i], interpolation="nearest", cmap="gray")
| [
"matplotlib.pyplot.imshow",
"numpy.clip",
"numpy.ceil",
"numpy.abs",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.concatenate",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot"
] | [((885, 919), 'numpy.concatenate', 'np.concatenate', (['[r, g, b]'], {'axis': '(-1)'}), '([r, g, b], axis=-1)\n', (899, 919), True, 'import numpy as np\n'), ((1785, 1824), 'numpy.zeros', 'np.zeros', (['(N, length)'], {'dtype': 'np.float64'}), '((N, length), dtype=np.float64)\n', (1793, 1824), True, 'import numpy as np\n'), ((2627, 2657), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4, 2.5)'}), '(figsize=(6.4, 2.5))\n', (2637, 2657), True, 'import matplotlib.pyplot as plt\n'), ((2735, 2791), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data'], {'interpolation': '"""nearest"""', 'aspect': '"""auto"""'}), "(data, interpolation='nearest', aspect='auto')\n", (2745, 2791), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2812), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2805, 2812), True, 'import matplotlib.pyplot as plt\n'), ((2817, 2835), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2833, 2835), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2927), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(20, 20)'}), '(1, figsize=(20, 20))\n', (2906, 2927), True, 'import matplotlib.pyplot as plt\n'), ((2959, 2987), 'numpy.ceil', 'np.ceil', (['(filters / n_columns)'], {}), '(filters / n_columns)\n', (2966, 2987), True, 'import numpy as np\n'), ((3029, 3066), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', 'n_columns', '(i + 1)'], {}), '(n_rows, n_columns, i + 1)\n', (3040, 3066), True, 'import matplotlib.pyplot as plt\n'), ((3111, 3178), 'matplotlib.pyplot.imshow', 'plt.imshow', (['units[0, :, :, i]'], {'interpolation': '"""nearest"""', 'cmap': '"""gray"""'}), "(units[0, :, :, i], interpolation='nearest', cmap='gray')\n", (3121, 3178), True, 'import matplotlib.pyplot as plt\n'), ((977, 1011), 'numpy.concatenate', 'np.concatenate', (['[x, x, x]'], {'axis': '(-1)'}), '([x, x, x], axis=-1)\n', (991, 1011), True, 'import numpy as np\n'), ((344, 368), 'numpy.clip', 'np.clip', (['(x - 0.3)', '(0)', '(0.7)'], {}), '(x - 0.3, 0, 0.7)\n', (351, 368), True, 'import numpy as np\n'), ((424, 448), 'numpy.clip', 'np.clip', (['(x - 0.3)', '(0)', '(0.7)'], {}), '(x - 0.3, 0, 0.7)\n', (431, 448), True, 'import numpy as np\n'), ((504, 528), 'numpy.clip', 'np.clip', (['(x - 0.3)', '(0)', '(0.7)'], {}), '(x - 0.3, 0, 0.7)\n', (511, 528), True, 'import numpy as np\n'), ((611, 636), 'numpy.clip', 'np.clip', (['(-x - 0.3)', '(0)', '(0.7)'], {}), '(-x - 0.3, 0, 0.7)\n', (618, 636), True, 'import numpy as np\n'), ((693, 718), 'numpy.clip', 'np.clip', (['(-x - 0.3)', '(0)', '(0.7)'], {}), '(-x - 0.3, 0, 0.7)\n', (700, 718), True, 'import numpy as np\n'), ((741, 766), 'numpy.clip', 'np.clip', (['(-x - 0.3)', '(0)', '(0.7)'], {}), '(-x - 0.3, 0, 0.7)\n', (748, 766), True, 'import numpy as np\n'), ((391, 415), 'numpy.clip', 'np.clip', (['(x - 0.0)', '(0)', '(0.3)'], {}), '(x - 0.0, 0, 0.3)\n', (398, 415), True, 'import numpy as np\n'), ((471, 495), 'numpy.clip', 'np.clip', (['(x - 0.0)', '(0)', '(0.3)'], {}), '(x - 0.0, 0, 0.3)\n', (478, 495), True, 'import numpy as np\n'), ((577, 602), 'numpy.clip', 'np.clip', (['(-x - 0.0)', '(0)', '(0.3)'], {}), '(-x - 0.0, 0, 0.3)\n', (584, 602), True, 'import numpy as np\n'), ((659, 684), 'numpy.clip', 'np.clip', (['(-x - 0.0)', '(0)', '(0.3)'], {}), '(-x - 0.0, 0, 0.3)\n', (666, 684), True, 'import numpy as np\n'), ((1881, 1893), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (1887, 1893), True, 'import numpy as np\n')] |
import pandas as pd
import tensorflow as tf
import matplotlib as plt
import glob
import numpy as np
from tensorflow import keras
from keras import Sequential
from sklearn.utils import shuffle
import sklearn.model_selection
STEP_SIZE = 20
SENSOR_NUM = 6
NUM_CLASSESS = 7
df = pd.concat([pd.read_csv(f) for f in glob.glob('./train_new/*.csv')], ignore_index = True)
print(df)
Label = { 'STD':0, 'WAL':1, 'JOG':2 , 'JUM':3, 'FALL':4 , 'LYI':5,'RA':6}
class_names = { 0:'STD', 1:'WAL', 2:'JOG' , 3:'JUM', 4:'FALL', 5:'LYI',6:'RA'}
dataSet = df[["acc_x", "acc_y", "acc_z", "gyro_x","gyro_y","gyro_z", "label"]]
dataSet.label = [Label[item] for item in dataSet.label]
print(dataSet)
x = np.array(dataSet.drop(["label"],1))
y = np.array(dataSet["label"])
modDataset = []
modTruth =[]
for i in range(len(x)-STEP_SIZE):
temp = []
for j in range(i, i+STEP_SIZE):
temp.append(x[j])
modDataset.append(temp)
for i in range(len(y)-STEP_SIZE):
temp = []
for j in range(i, i+STEP_SIZE):
temp.append(y[j])
most_common_item = max(temp, key = temp.count)
modTruth.append(most_common_item)
print(len(modDataset))
print(len(modTruth))
print(len(modDataset[0]))
print(modDataset[1])
modDataset = np.array(modDataset).reshape(-1, STEP_SIZE, SENSOR_NUM)
print(modDataset)
y = np.array(modTruth)
x = modDataset
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x,y,test_size = 0.3)
print(x_train)
print(y_train)
model = Sequential()
model.add(keras.layers.Flatten(input_shape=(STEP_SIZE, SENSOR_NUM)))
model.add(keras.layers.Dense(128, activation='relu'))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.Dense(128, activation='relu'))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.Dense(NUM_CLASSESS, activation='softmax'))
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.summary()
model.fit(x_train,y_train, epochs=30, validation_split =0.1)
model.save('./model_x/')
pred = model.predict(x_test)
results = np.argmax(pred, axis=1)
for i in range(50) :
if class_names[y_test[i]] == class_names[results[i]]:
print("prediction: ", class_names[results[i]], " actual: ", class_names[y_test[i]], "prediction: Correct!!!" )
else:
print("prediction: ", class_names[results[i]], " actual: ", class_names[y_test[i]], "prediction: Wrong :( " ) | [
"keras.Sequential",
"pandas.read_csv",
"tensorflow.keras.layers.Dropout",
"numpy.argmax",
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Flatten",
"glob.glob"
] | [((764, 790), 'numpy.array', 'np.array', (["dataSet['label']"], {}), "(dataSet['label'])\n", (772, 790), True, 'import numpy as np\n'), ((1387, 1405), 'numpy.array', 'np.array', (['modTruth'], {}), '(modTruth)\n', (1395, 1405), True, 'import numpy as np\n'), ((1567, 1579), 'keras.Sequential', 'Sequential', ([], {}), '()\n', (1577, 1579), False, 'from keras import Sequential\n'), ((2156, 2179), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (2165, 2179), True, 'import numpy as np\n'), ((1591, 1648), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': '(STEP_SIZE, SENSOR_NUM)'}), '(input_shape=(STEP_SIZE, SENSOR_NUM))\n', (1611, 1648), False, 'from tensorflow import keras\n'), ((1661, 1703), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1679, 1703), False, 'from tensorflow import keras\n'), ((1716, 1741), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.3)'], {}), '(0.3)\n', (1736, 1741), False, 'from tensorflow import keras\n'), ((1754, 1796), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1772, 1796), False, 'from tensorflow import keras\n'), ((1809, 1834), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.3)'], {}), '(0.3)\n', (1829, 1834), False, 'from tensorflow import keras\n'), ((1847, 1901), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['NUM_CLASSESS'], {'activation': '"""softmax"""'}), "(NUM_CLASSESS, activation='softmax')\n", (1865, 1901), False, 'from tensorflow import keras\n'), ((307, 321), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (318, 321), True, 'import pandas as pd\n'), ((1299, 1319), 'numpy.array', 'np.array', (['modDataset'], {}), '(modDataset)\n', (1307, 1319), True, 'import numpy as np\n'), ((331, 361), 'glob.glob', 'glob.glob', (['"""./train_new/*.csv"""'], {}), "('./train_new/*.csv')\n", (340, 361), False, 'import glob\n')] |
# -*- coding: utf-8 -*-
#
from datetime import datetime
import sys
import pathlib
import numpy as np
import math
from numpy.core.defchararray import center
# このソースのあるディレクトリの絶対パスを取得
current_dir = pathlib.Path(__file__).resolve().parent
# モジュールのあるパスを追加
sys.path.append(str(current_dir) + '/../')
sys.path.append(str(current_dir) + '/../src/')
from mmd.PmxReader import PmxReader # noqa
from mmd.VmdReader import VmdReader # noqa
from mmd.VmdWriter import VmdWriter # noqa
from mmd.PmxWriter import PmxWriter # noqa
from mmd.PmxData import PmxModel, Vertex, Material, Bone, Morph, DisplaySlot, RigidBody, Joint, Bdef1, Bdef2, Bdef4 # noqa
from mmd.VmdData import VmdMotion, VmdBoneFrame, VmdCameraFrame, VmdInfoIk, VmdLightFrame, VmdMorphFrame, VmdShadowFrame, VmdShowIkFrame # noqa
from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4 # noqa
from module.MOptions import MOptionsDataSet # noqa
from module.MParams import BoneLinks # noqa
from utils import MBezierUtils, MServiceUtils # noqa
from utils.MException import SizingException # noqa
from utils.MLogger import MLogger # noqa
MLogger.initialize(level=MLogger.DEBUG_INFO, is_file=True)
logger = MLogger(__name__, level=MLogger.DEBUG_INFO)
def exec():
max_cnt = 8
edge_size = 1
center_cnt = int(max_cnt / 2)
model_prefix = '絨毯-'
model = PmxReader("D:\\MMD\\MikuMikuDance_v926x64\\UserFile\\Accessory\\家具\\空飛ぶ絨毯 miu\\空飛ぶ絨毯2.pmx", is_check=False, is_sizing=False).read_data()
vertices_xs = []
vertices_zs = []
vertices_vec = []
for k, vs in model.vertices.items():
for v in vs:
vertices_vec.append(v.position.data())
vertices_xs.append(v.position.x())
vertices_zs.append(v.position.z())
vertices_xs = np.sort(np.unique(vertices_xs))
vertices_zs = np.sort(np.unique(vertices_zs))
max_vec = MVector3D(np.max(vertices_vec, axis=0))
min_vec = MVector3D(np.min(vertices_vec, axis=0))
size = (max_vec - min_vec)
display_name = "絨毯回転"
model.display_slots[display_name] = DisplaySlot(display_name, display_name, 0, 0)
bone_xs = np.append(np.arange(min_vec.x(), max_vec.x(), (size.x() / max_cnt)), max_vec.x())
bone_zs = np.append(np.arange(max_vec.z(), min_vec.z(), -(size.z() / max_cnt)), min_vec.z())
r_size = MVector3D(abs(bone_xs[1] - bone_xs[0]), 0, abs(bone_zs[1] - bone_zs[0]))
for zidx in range(max_cnt + 1):
for xidx in range(max_cnt + 1):
x = bone_xs[xidx]
z = bone_zs[zidx]
bone_name = f'{model_prefix}{(xidx):02d}-{(zidx):02d}'
bone = Bone(bone_name, bone_name, MVector3D(x, 0, z), len(list(model.bones.keys())) - 1, 0, 0x0000 | 0x0002 | 0x0008 | 0x0010)
bone.index = len(list(model.bones.keys()))
# ボーン
model.bones[bone.name] = bone
# 表示枠
model.display_slots[display_name].references.append(model.bones[bone.name].index)
rigidbody_no_collisions = 0
for nc in range(16):
if nc in [15]:
rigidbody_no_collisions |= 1 << nc
# 剛体
for zidx in range(max_cnt + 1):
for xidx in range(max_cnt + 1):
# if edge_size < xidx < max_cnt - edge_size and edge_size < zidx < max_cnt - edge_size:
# continue
bone_name = f'{model_prefix}{(xidx):02d}-{(zidx):02d}'
bone = model.bones[bone_name]
left_top_name = f'{model_prefix}{xidx:02d}-{zidx:02d}'
right_top_name = f'{model_prefix}{(xidx + 1):02d}-{zidx:02d}'
left_bottom_name = f'{model_prefix}{xidx:02d}-{(zidx + 1):02d}'
right_bottom_name = f'{model_prefix}{(xidx + 1):02d}-{(zidx + 1):02d}'
target_poses = []
for target_name in [left_top_name, right_top_name, left_bottom_name, right_bottom_name]:
if target_name in model.bones:
target_poses.append(model.bones[target_name].position.data())
center_pos = MVector3D(np.mean(target_poses, axis=0)) + MVector3D(-r_size.x() / 2, 0, r_size.z() / 2)
if right_top_name not in model.bones:
center_pos.setX(center_pos.x() + (r_size.x() / 2))
if left_bottom_name not in model.bones:
center_pos.setZ(center_pos.z() - (r_size.z() / 2))
# 剛体
mode = 1
rigidbody = RigidBody(bone.name, bone.english_name, bone.index, 14, rigidbody_no_collisions, \
1, MVector3D(abs(r_size.x() / 2), 0.1, r_size.z() / 2), center_pos, MVector3D(), \
0.5, 0.5, 0.5, 0, 1, mode)
rigidbody.index = len(model.rigidbodies)
model.rigidbodies[rigidbody.name] = rigidbody
bone_rigidbody_no_collisions = 0
for nc in range(16):
if nc not in [0, 14]:
bone_rigidbody_no_collisions |= 1 << nc
# 追従剛体
for zidx in range(max_cnt + 1):
for xidx in range(max_cnt + 1):
# if edge_size < xidx < max_cnt - edge_size and edge_size < zidx < max_cnt - edge_size:
# continue
bone_name = f'{model_prefix}{(xidx):02d}-{(zidx):02d}'
bone = model.bones[bone_name]
left_top_name = f'{model_prefix}{xidx:02d}-{zidx:02d}'
right_top_name = f'{model_prefix}{(xidx + 1):02d}-{zidx:02d}'
left_bottom_name = f'{model_prefix}{xidx:02d}-{(zidx + 1):02d}'
right_bottom_name = f'{model_prefix}{(xidx + 1):02d}-{(zidx + 1):02d}'
target_poses = []
for target_name in [left_top_name, right_top_name, left_bottom_name, right_bottom_name]:
if target_name in model.bones:
target_poses.append(model.bones[target_name].position.data())
center_pos = MVector3D(np.mean(target_poses, axis=0)) + MVector3D(-r_size.x() / 2, -0.5, r_size.z() / 2)
if right_top_name not in model.bones:
center_pos.setX(center_pos.x() + (r_size.x() / 2))
if left_bottom_name not in model.bones:
center_pos.setZ(center_pos.z() - (r_size.z() / 2))
# 剛体
bone_rigidbody = RigidBody(f'{bone.name}_追従', f'{bone.name}_追従', bone.index, 14, bone_rigidbody_no_collisions, \
1, MVector3D(abs(r_size.x() / 2), 0.5, r_size.z() / 2), center_pos, MVector3D(), \
0.5, 0.5, 0.5, 0, 1, 0)
bone_rigidbody.index = len(model.rigidbodies)
model.rigidbodies[bone_rigidbody.name] = bone_rigidbody
for zidx in range(max_cnt + 1):
for xidx in range(max_cnt + 1):
# if edge_size < xidx < max_cnt - edge_size and edge_size < zidx < max_cnt - edge_size:
# continue
vertical_joint_name = f'{model_prefix}{(xidx):02d}-{(zidx + 1):02d}'
bone_name = f'{model_prefix}{(xidx):02d}-{(zidx):02d}'
if vertical_joint_name in model.rigidbodies and bone_name in model.rigidbodies:
joint = Joint(bone_name, bone_name, 0, model.rigidbodies[bone_name].index, model.rigidbodies[vertical_joint_name].index,
model.bones[bone_name].position - MVector3D(0, 0, r_size.z() / 2), MVector3D(), MVector3D(), MVector3D(),
MVector3D(math.radians(-(max_cnt * 5)), math.radians(0), math.radians(0)),
MVector3D(math.radians((max_cnt * 3)), math.radians(0), math.radians(0)), MVector3D(100, 100, 100), MVector3D(100, 100, 100))
model.joints[joint.name] = joint
for zidx in range(max_cnt + 1):
for xidx in range(max_cnt + 1):
# if edge_size < xidx < max_cnt - edge_size and edge_size < zidx < max_cnt - edge_size:
# continue
right_bottom_joint_name = f'{model_prefix}{(xidx + 1):02d}-{(zidx + 1):02d}'
bone_name = f'{model_prefix}{(xidx):02d}-{(zidx):02d}'
if right_bottom_joint_name in model.rigidbodies and bone_name in model.rigidbodies:
joint = Joint(f'{bone_name}_右', f'{bone_name}_右', 0, model.rigidbodies[bone_name].index, model.rigidbodies[right_bottom_joint_name].index,
model.bones[bone_name].position + MVector3D(r_size.x() / 2, 0, 0), MVector3D(), MVector3D(), MVector3D(),
MVector3D(math.radians(-(max_cnt * 5)), math.radians(0), math.radians(0)),
MVector3D(math.radians((max_cnt * 3)), math.radians(0), math.radians(0)), MVector3D(100, 100, 100), MVector3D(100, 100, 100))
model.joints[joint.name] = joint
for zidx in range(max_cnt + 1):
for xidx in range(1, max_cnt + 1):
# if edge_size < xidx < max_cnt - edge_size and edge_size < zidx < max_cnt - edge_size:
# continue
left_bottom_joint_name = f'{model_prefix}{(xidx - 1):02d}-{(zidx + 1):02d}'
bone_name = f'{model_prefix}{(xidx):02d}-{(zidx):02d}'
if left_bottom_joint_name in model.rigidbodies and bone_name in model.rigidbodies:
joint = Joint(f'{bone_name}_左', f'{bone_name}_左', 0, model.rigidbodies[bone_name].index, model.rigidbodies[left_bottom_joint_name].index,
model.bones[bone_name].position + MVector3D(r_size.x() / 2, 0, 0), MVector3D(), MVector3D(), MVector3D(),
MVector3D(math.radians(-(max_cnt * 5)), math.radians(0), math.radians(0)),
MVector3D(math.radians((max_cnt * 3)), math.radians(0), math.radians(0)), MVector3D(100, 100, 100), MVector3D(100, 100, 100))
model.joints[joint.name] = joint
for zidx in range(max_cnt + 1):
for xidx in range(max_cnt + 1):
# if edge_size < xidx < max_cnt - edge_size and edge_size < zidx < max_cnt - edge_size:
# continue
horizonal_joint_name = f'{model_prefix}{(xidx + 1):02d}-{(zidx):02d}'
bone_name = f'{model_prefix}{(xidx):02d}-{(zidx):02d}'
if horizonal_joint_name in model.rigidbodies and bone_name in model.rigidbodies:
joint = Joint(f'{bone_name}_横', f'{bone_name}_横', 0, model.rigidbodies[bone_name].index, model.rigidbodies[horizonal_joint_name].index,
model.bones[bone_name].position + MVector3D(r_size.x() / 2, 0, 0), MVector3D(), MVector3D(), MVector3D(),
MVector3D(), MVector3D(), MVector3D(0, 0, 0), MVector3D(0, 0, 0))
model.joints[joint.name] = joint
for xidx, (k, vs) in enumerate(model.vertices.items()):
for zidx, v in enumerate(vs):
target_xs = {}
target_zs = {}
for yi, y in enumerate(bone_zs):
if y - (r_size.z()) < v.position.z() < y + (r_size.z()):
target_zs[y] = yi
for xi, x in enumerate(bone_xs):
if x - (r_size.x()) < v.position.x() < x + (r_size.x()):
target_xs[x] = xi
r_min_vec = MVector3D(list(target_xs.keys())[0], 0, list(target_zs.keys())[-1])
r_max_vec = MVector3D(list(target_xs.keys())[-1], 0, list(target_zs.keys())[0])
left_top_weight = max(0, ((r_size.x() - (v.position.x() - r_min_vec.x())) / r_size.x()) * (((v.position.z() - r_min_vec.z())) / r_size.z()))
left_bottom_weight = max(0, ((r_size.x() - (v.position.x() - r_min_vec.x())) / r_size.x()) * (r_size.z() - (v.position.z() - r_min_vec.z())) / r_size.z())
right_bottom_weight = max(0, (((v.position.x() - r_min_vec.x())) / r_size.x()) * (r_size.z() - (v.position.z() - r_min_vec.z())) / r_size.z())
right_top_weight = max(0, (((v.position.x() - r_min_vec.x())) / r_size.x()) * (((v.position.z() - r_min_vec.z())) / r_size.z()))
total_weights = np.array([left_top_weight, right_top_weight, left_bottom_weight, right_bottom_weight])
weight_values = total_weights / total_weights.sum(axis=0, keepdims=1)
left_top_name = f'{model_prefix}{(target_xs[r_min_vec.x()]):02d}-{(target_zs[r_max_vec.z()]):02d}'
right_top_name = f'{model_prefix}{(target_xs[r_max_vec.x()]):02d}-{(target_zs[r_max_vec.z()]):02d}'
left_bottom_name = f'{model_prefix}{(target_xs[r_min_vec.x()]):02d}-{(target_zs[r_min_vec.z()]):02d}'
right_bottom_name = f'{model_prefix}{(target_xs[r_max_vec.x()]):02d}-{(target_zs[r_min_vec.z()]):02d}'
weight_names = np.array([left_top_name, right_top_name, left_bottom_name, right_bottom_name])
target_names = weight_names[np.nonzero(weight_values)]
if np.count_nonzero(weight_values) == 1:
v.deform = Bdef1(model.bones[target_names[0]].index)
elif np.count_nonzero(weight_values) == 2:
v.deform = Bdef2(model.bones[target_names[0]].index, model.bones[target_names[1]].index, weight_values[weight_values.nonzero()][0])
else:
v.deform = Bdef4(model.bones[left_top_name].index, model.bones[right_top_name].index, model.bones[left_bottom_name].index, model.bones[right_bottom_name].index, \
weight_values[0], weight_values[1], weight_values[2], weight_values[3])
bone_name = '乗ダミー'
bone = Bone(bone_name, bone_name, MVector3D(), model.bones[f'{model_prefix}{center_cnt:02d}-{center_cnt:02d}'].index, 0, 0x0000 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x1000)
bone.index = len(list(model.bones.keys()))
model.bones[bone.name] = bone
model.name = f"空飛ぶ絨毯_v1.00"
model.comment = f"空飛ぶ絨毯: miu\r\n偽重力プラグイン: 千成様"
result_dir = "D:\\MMD\\MikuMikuDance_v926x64\\UserFile\\Accessory\\家具\\空飛ぶ絨毯 miu"
new_file_path = f"{result_dir}\\空飛ぶ絨毯_v1.00.pmx"
pmx_writer = PmxWriter()
pmx_writer.write(model, new_file_path)
logger.warning(f"出力終了: {new_file_path}")
if __name__ == '__main__':
exec()
| [
"numpy.mean",
"mmd.PmxReader.PmxReader",
"numpy.unique",
"pathlib.Path",
"utils.MLogger.MLogger",
"mmd.PmxData.Bdef1",
"numpy.max",
"module.MMath.MVector3D",
"numpy.array",
"numpy.count_nonzero",
"utils.MLogger.MLogger.initialize",
"math.radians",
"numpy.nonzero",
"numpy.min",
"mmd.PmxDa... | [((1120, 1178), 'utils.MLogger.MLogger.initialize', 'MLogger.initialize', ([], {'level': 'MLogger.DEBUG_INFO', 'is_file': '(True)'}), '(level=MLogger.DEBUG_INFO, is_file=True)\n', (1138, 1178), False, 'from utils.MLogger import MLogger\n'), ((1188, 1231), 'utils.MLogger.MLogger', 'MLogger', (['__name__'], {'level': 'MLogger.DEBUG_INFO'}), '(__name__, level=MLogger.DEBUG_INFO)\n', (1195, 1231), False, 'from utils.MLogger import MLogger\n'), ((2074, 2119), 'mmd.PmxData.DisplaySlot', 'DisplaySlot', (['display_name', 'display_name', '(0)', '(0)'], {}), '(display_name, display_name, 0, 0)\n', (2085, 2119), False, 'from mmd.PmxData import PmxModel, Vertex, Material, Bone, Morph, DisplaySlot, RigidBody, Joint, Bdef1, Bdef2, Bdef4\n'), ((13983, 13994), 'mmd.PmxWriter.PmxWriter', 'PmxWriter', ([], {}), '()\n', (13992, 13994), False, 'from mmd.PmxWriter import PmxWriter\n'), ((1793, 1815), 'numpy.unique', 'np.unique', (['vertices_xs'], {}), '(vertices_xs)\n', (1802, 1815), True, 'import numpy as np\n'), ((1843, 1865), 'numpy.unique', 'np.unique', (['vertices_zs'], {}), '(vertices_zs)\n', (1852, 1865), True, 'import numpy as np\n'), ((1892, 1920), 'numpy.max', 'np.max', (['vertices_vec'], {'axis': '(0)'}), '(vertices_vec, axis=0)\n', (1898, 1920), True, 'import numpy as np\n'), ((1946, 1974), 'numpy.min', 'np.min', (['vertices_vec'], {'axis': '(0)'}), '(vertices_vec, axis=0)\n', (1952, 1974), True, 'import numpy as np\n'), ((13521, 13532), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (13530, 13532), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((195, 217), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (207, 217), False, 'import pathlib\n'), ((1353, 1487), 'mmd.PmxReader.PmxReader', 'PmxReader', (['"""D:\\\\MMD\\\\MikuMikuDance_v926x64\\\\UserFile\\\\Accessory\\\\家具\\\\空飛ぶ絨毯 miu\\\\空飛ぶ絨毯2.pmx"""'], {'is_check': '(False)', 'is_sizing': '(False)'}), "(\n 'D:\\\\MMD\\\\MikuMikuDance_v926x64\\\\UserFile\\\\Accessory\\\\家具\\\\空飛ぶ絨毯 miu\\\\空飛ぶ絨毯2.pmx'\n , is_check=False, is_sizing=False)\n", (1362, 1487), False, 'from mmd.PmxReader import PmxReader\n'), ((12031, 12121), 'numpy.array', 'np.array', (['[left_top_weight, right_top_weight, left_bottom_weight, right_bottom_weight]'], {}), '([left_top_weight, right_top_weight, left_bottom_weight,\n right_bottom_weight])\n', (12039, 12121), True, 'import numpy as np\n'), ((12681, 12759), 'numpy.array', 'np.array', (['[left_top_name, right_top_name, left_bottom_name, right_bottom_name]'], {}), '([left_top_name, right_top_name, left_bottom_name, right_bottom_name])\n', (12689, 12759), True, 'import numpy as np\n'), ((2656, 2674), 'module.MMath.MVector3D', 'MVector3D', (['x', '(0)', 'z'], {}), '(x, 0, z)\n', (2665, 2674), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((4612, 4623), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (4621, 4623), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((6445, 6456), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (6454, 6456), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((12800, 12825), 'numpy.nonzero', 'np.nonzero', (['weight_values'], {}), '(weight_values)\n', (12810, 12825), True, 'import numpy as np\n'), ((12843, 12874), 'numpy.count_nonzero', 'np.count_nonzero', (['weight_values'], {}), '(weight_values)\n', (12859, 12874), True, 'import numpy as np\n'), ((12908, 12949), 'mmd.PmxData.Bdef1', 'Bdef1', (['model.bones[target_names[0]].index'], {}), '(model.bones[target_names[0]].index)\n', (12913, 12949), False, 'from mmd.PmxData import PmxModel, Vertex, Material, Bone, Morph, DisplaySlot, RigidBody, Joint, Bdef1, Bdef2, Bdef4\n'), ((4049, 4078), 'numpy.mean', 'np.mean', (['target_poses'], {'axis': '(0)'}), '(target_poses, axis=0)\n', (4056, 4078), True, 'import numpy as np\n'), ((5877, 5906), 'numpy.mean', 'np.mean', (['target_poses'], {'axis': '(0)'}), '(target_poses, axis=0)\n', (5884, 5906), True, 'import numpy as np\n'), ((7341, 7352), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (7350, 7352), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((7354, 7365), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (7363, 7365), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((7367, 7378), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (7376, 7378), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((7589, 7613), 'module.MMath.MVector3D', 'MVector3D', (['(100)', '(100)', '(100)'], {}), '(100, 100, 100)\n', (7598, 7613), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((7615, 7639), 'module.MMath.MVector3D', 'MVector3D', (['(100)', '(100)', '(100)'], {}), '(100, 100, 100)\n', (7624, 7639), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((8412, 8423), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (8421, 8423), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((8425, 8436), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (8434, 8436), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((8438, 8449), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (8447, 8449), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((8660, 8684), 'module.MMath.MVector3D', 'MVector3D', (['(100)', '(100)', '(100)'], {}), '(100, 100, 100)\n', (8669, 8684), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((8686, 8710), 'module.MMath.MVector3D', 'MVector3D', (['(100)', '(100)', '(100)'], {}), '(100, 100, 100)\n', (8695, 8710), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((9482, 9493), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (9491, 9493), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((9495, 9506), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (9504, 9506), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((9508, 9519), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (9517, 9519), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((9730, 9754), 'module.MMath.MVector3D', 'MVector3D', (['(100)', '(100)', '(100)'], {}), '(100, 100, 100)\n', (9739, 9754), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((9756, 9780), 'module.MMath.MVector3D', 'MVector3D', (['(100)', '(100)', '(100)'], {}), '(100, 100, 100)\n', (9765, 9780), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((10539, 10550), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (10548, 10550), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((10552, 10563), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (10561, 10563), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((10565, 10576), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (10574, 10576), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((10608, 10619), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (10617, 10619), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((10621, 10632), 'module.MMath.MVector3D', 'MVector3D', ([], {}), '()\n', (10630, 10632), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((10634, 10652), 'module.MMath.MVector3D', 'MVector3D', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (10643, 10652), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((10654, 10672), 'module.MMath.MVector3D', 'MVector3D', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (10663, 10672), False, 'from module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4\n'), ((12967, 12998), 'numpy.count_nonzero', 'np.count_nonzero', (['weight_values'], {}), '(weight_values)\n', (12983, 12998), True, 'import numpy as np\n'), ((13198, 13432), 'mmd.PmxData.Bdef4', 'Bdef4', (['model.bones[left_top_name].index', 'model.bones[right_top_name].index', 'model.bones[left_bottom_name].index', 'model.bones[right_bottom_name].index', 'weight_values[0]', 'weight_values[1]', 'weight_values[2]', 'weight_values[3]'], {}), '(model.bones[left_top_name].index, model.bones[right_top_name].index,\n model.bones[left_bottom_name].index, model.bones[right_bottom_name].\n index, weight_values[0], weight_values[1], weight_values[2],\n weight_values[3])\n', (13203, 13432), False, 'from mmd.PmxData import PmxModel, Vertex, Material, Bone, Morph, DisplaySlot, RigidBody, Joint, Bdef1, Bdef2, Bdef4\n'), ((7420, 7448), 'math.radians', 'math.radians', (['(-(max_cnt * 5))'], {}), '(-(max_cnt * 5))\n', (7432, 7448), False, 'import math\n'), ((7450, 7465), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (7462, 7465), False, 'import math\n'), ((7467, 7482), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (7479, 7482), False, 'import math\n'), ((7525, 7550), 'math.radians', 'math.radians', (['(max_cnt * 3)'], {}), '(max_cnt * 3)\n', (7537, 7550), False, 'import math\n'), ((7554, 7569), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (7566, 7569), False, 'import math\n'), ((7571, 7586), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (7583, 7586), False, 'import math\n'), ((8491, 8519), 'math.radians', 'math.radians', (['(-(max_cnt * 5))'], {}), '(-(max_cnt * 5))\n', (8503, 8519), False, 'import math\n'), ((8521, 8536), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (8533, 8536), False, 'import math\n'), ((8538, 8553), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (8550, 8553), False, 'import math\n'), ((8596, 8621), 'math.radians', 'math.radians', (['(max_cnt * 3)'], {}), '(max_cnt * 3)\n', (8608, 8621), False, 'import math\n'), ((8625, 8640), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (8637, 8640), False, 'import math\n'), ((8642, 8657), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (8654, 8657), False, 'import math\n'), ((9561, 9589), 'math.radians', 'math.radians', (['(-(max_cnt * 5))'], {}), '(-(max_cnt * 5))\n', (9573, 9589), False, 'import math\n'), ((9591, 9606), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (9603, 9606), False, 'import math\n'), ((9608, 9623), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (9620, 9623), False, 'import math\n'), ((9666, 9691), 'math.radians', 'math.radians', (['(max_cnt * 3)'], {}), '(max_cnt * 3)\n', (9678, 9691), False, 'import math\n'), ((9695, 9710), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (9707, 9710), False, 'import math\n'), ((9712, 9727), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (9724, 9727), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
This module contains routines related to output of data to netCDF file.
Uses CF conventions to ensure standardized output
"""
from collections import OrderedDict
import numpy as np
import netCDF4 as nc
import yaml
class NCOutVar(object):
"""
This class contains the relavent information about an atmospheric variable.
See the following site for information about the convention use:
http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html
Prototype variable properties dictionary (props)::
props = {'name': 'VAR_STANDARD_NAME', 'scale_factor': SCALE,
'descr': 'VARIABLE DESCR', 'units': 'UNITS',
'short_name': 'output_name', 'calendar': 'standard',
'timevar': 'time', 'levvar': 'lev', 'latvar': 'lat', 'lonvar': 'lon',
'time_units': 'hours since 1900-01-01 00:00', 'lev_units': 'Pa',
'lon_units': 'degrees_east', 'lat_units': 'degrees_north'}
Used in conjunction with writeTonetCDF() to make a standards
compliant(-ish) output file
"""
def __init__(self, data_in, props=None, coords=None):
"""
Initialise NCOutVar to be used by `write_to_netcdf`.
Parameters
----------
data_in : array_like
N-Dimensional input data to be written to netCDF file
props : dict
Dictionary as described above, containing properties of the <data_in>
coords : dict
Dictionary containing at least one of ['time', 'lev', 'lat', 'lon'] or
any combination of them, with associated coordinate variable
"""
# Pull in required data, (data, diminsion sizes, its name,
# description, units and netCDF output var)
if props is None:
self.gen_defualt_props()
else:
self.props = props
# Coordinate variables are ordered (slow -> fast or t, p, y, x ), use OrderedDict
# to make sure they stay that way
self.coords = OrderedDict()
self._set_coords(coords)
self.data = data_in
def _set_coords(self, coords_in):
"""Set coordinate variables based on `self.props` and `self.coords`."""
# For each possible coordinate variable, (time, lev, lat, lon)
# set up the coordinate array with its shape and units
for coord_type in ['time', 'lev', 'lat', 'lon']:
if coord_type in coords_in:
coord_var = self.props['{}var'.format(coord_type)]
coord_name = coord_type
coord_units = self.props['{}_units'.format(coord_type)]
if coord_var == 'lev' and coord_units in ['Pa', 'kPa', 'hPa', 'mb']:
coord_name = 'air_pressure'
elif coord_var == 'lev' and coord_units == 'K':
coord_name = 'air_potential_temperature'
self.coords[coord_var] = {'cdata': coords_in[coord_type],
'name': coord_name, 'units': coord_units}
def gen_defualt_props(self):
"""
Generate default set of variable properties.
Used in case that properties are not passed to __init__ method, and props is None
"""
props = {'name': 'VAR_STANDARD_NAME', 'scale_factor': 1.0,
'descr': 'VARIABLE DESCR', 'units': 'UNITS', 'short_name': 'DATA',
'calendar': 'standard', 'time_units': 'hours since 1900-01-01 00:00',
'timevar': 'time', 'levvar': 'lev', 'latvar': 'lat', 'lonvar': 'lon',
'lev_units': 'Pa', 'lon_units': 'degrees_east',
'lat_units': 'degrees_north'}
self.props = props
def get_props_from(self, copy_from):
"""
Copy properties to another.
Parameters
----------
copy_from : NCOutVar
NCOutVar from which to get properties
"""
self.props = dict(copy_from.props)
self.coords = dict(copy_from.coords)
self._set_coords()
def set_prop(self, prop_name, prop_in=None):
"""
Change a single property if prop name is string, or multiple if dict.
Parameters
----------
prop_name : string
Name of property to change
prop_in : any
Set self.props[prop_name] = prop_in
"""
self.props[prop_name] = prop_in
def set_props(self, prop_dict=None):
"""
Set multiple properties at once.
Parameters
----------
prop_dict: dict
Format of {'prop_name': new prop value, ...}
"""
for prop in prop_dict:
self.set_prop(prop, prop_dict[prop])
def write_to_netcdf(data_in, out_file, file_attrs=None):
"""
Write (a list of) NCOutVar variable(s) to a netCDF file.
Uses zlib compression level 5.
Parameters
----------
data_in : list of :py:meth:`~NCOutVar`
List of NCOutVars to write to file
out_file : string
Name of file to write output
"""
if not isinstance(data_in, list):
data_in = [data_in] # Just in case someone forgets to pass a list of variables
# Open netCDF file for writing
ncfile = nc.Dataset(out_file, mode='w')
# Loop over coordinates, create those dimensions and variables in the netCDF file
for coord_name in data_in[0].coords:
if coord_name == 'time':
dtype = np.dtype('double').char
ncfile.createDimension(coord_name, size=None)
else:
dtype = np.dtype('float32').char
ncfile.createDimension(coord_name,
size=len(data_in[0].coords[coord_name]['cdata']))
cvi = ncfile.createVariable(coord_name, dtype, (coord_name), zlib=True,
complevel=5)
if coord_name == 'time':
cvi.calendar = data_in[0].props['calendar']
cvi.units = data_in[0].coords[coord_name]['units']
cvi.standard_name = data_in[0].coords[coord_name]['name']
cvi[:] = data_in[0].coords[coord_name]['cdata']
# Loop over output variables (must all be on same coordinates), write
# data and attributes to file <out_file>
for data in data_in:
# Make sure our variable's short name isn't already in the file, if it is,
# append _ to the name, until it's unique, but warn, because maybe this is bad
if data.props['short_name'] in ncfile.variables.keys():
print("WARNING: {} in file already".format(data.props['short_name']))
while data.props['short_name'] in ncfile.variables.keys():
data.props['short_name'] += "_"
print(" USING: {}".format(data.props['short_name']))
out_data = ncfile.createVariable(data.props['short_name'],
np.dtype('float32').char,
(list(data.coords.keys())), zlib=True,
complevel=5)
out_data.units = data.props['units']
out_data.standard_name = data.props['name']
out_data.description = data.props['descr']
if 'scale_factor' in data.props:
out_data.scale_factor = data.props['scale_factor']
if 'offset' in data.props:
out_data.add_offset = data.props['offset']
if 'long_name' in data.props:
out_data.long_name = data.props['long_name']
out_data[:] = data.data
# Set CF-conventions attribute
ncfile.setncattr('Conventions', 'CF-1.6')
# Set other arbitrary file-wide attributes
if file_attrs is not None:
for attr, value in file_attrs:
if isinstance(value, dict):
# Can't write a dict to netCDF attribute, so
# make it a yaml-parseable string
value = yaml.dump(value)
ncfile.setncattr(attr, value)
ncfile.close()
| [
"numpy.dtype",
"collections.OrderedDict",
"netCDF4.Dataset",
"yaml.dump"
] | [((5274, 5304), 'netCDF4.Dataset', 'nc.Dataset', (['out_file'], {'mode': '"""w"""'}), "(out_file, mode='w')\n", (5284, 5304), True, 'import netCDF4 as nc\n'), ((2049, 2062), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2060, 2062), False, 'from collections import OrderedDict\n'), ((5485, 5503), 'numpy.dtype', 'np.dtype', (['"""double"""'], {}), "('double')\n", (5493, 5503), True, 'import numpy as np\n'), ((5601, 5620), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (5609, 5620), True, 'import numpy as np\n'), ((6916, 6935), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (6924, 6935), True, 'import numpy as np\n'), ((7922, 7938), 'yaml.dump', 'yaml.dump', (['value'], {}), '(value)\n', (7931, 7938), False, 'import yaml\n')] |
from abc import ABCMeta, abstractmethod
import numpy as np
from utils.function_helpers import *
class Base_Loss(metaclass=ABCMeta):
@abstractmethod
def __init__(self) -> None:
"""
The Base_Loss class is an abstract class for all loss functions.
All loss functions must inherit from Base_Loss.
"""
pass
@abstractmethod
def map_data(self, y_true:np.ndarray, y_pred:np.ndarray) -> np.ndarray:
"""
map_data() takes some data and applies a mathematical mapping to it.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Return:
output : np.ndarray : An n dimensional numpy array of the mapped data.
"""
return output
@abstractmethod
def calculate_gradients(self, y_true:np.ndarray, y_pred:np.ndarray) -> np.ndarray:
"""
calculate_gradients returns the derivative of the loss function W.R.T the data.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Return:
output : np.ndarray : An n dimensional numpy array of gradients.
"""
return output
class Mean_Squared_Error(Base_Loss):
def __init__(self) -> None:
"""
The MSE class is a commonly used regression loss function.
"""
pass
@accepts(self="any", y_true=np.ndarray, y_pred=np.ndarray)
def map_data(self, y_true, y_pred) -> np.ndarray:
"""
Calculates the squared distance between y_true and y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : An n dimensional numpy array of the mean squared distance between y_true and y_pred.
"""
return (y_pred-y_true)**2/2
@accepts(self="any", y_true=np.ndarray, y_pred=np.ndarray)
def calculate_gradients(self, y_true, y_pred) -> np.ndarray:
"""
Calculates the derivatives of the function W.R.T y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : An n dimensional numpy array of the calculated derivatives of the function W.R.T y_pred.
"""
return y_pred - y_true
class Binary_Crossentropy(Base_Loss):
def __init__(self) -> None:
"""
The Binary_crossentropy class measures the performance of a classification model whose output is a probability value between 0 and 1,
and where the number of outputs is less than 3.
"""
pass
@accepts(self="any", y_true=np.ndarray, y_pred=np.ndarray)
def map_data(self, y_true, y_pred) -> np.ndarray:
"""
Calculates the distance between y_true and y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : The mean squared distance between y_true and y_pred.
"""
part1 = y_true*np.log(y_pred+1.0e-8) # I add 1.0e-8 to make sure 0 isn't going into np.log
part2 = (1-y_true)*np.log(1-y_pred+1.0e-8)
return -(part1 + part2)
@accepts(self="any", y_true=np.ndarray, y_pred=np.ndarray)
def calculate_gradients(self, y_true, y_pred) -> np.ndarray:
"""
Calculates the derivatives of the function W.R.T y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : An n dimensional numpy array of the calculated derivatives of the function W.R.T y_pred.
"""
return division_check(y_true,y_pred) - division_check(1-y_true, 1-y_pred)
class Crossentropy(Base_Loss):
def __init__(self) -> None:
"""
The Crossentropy class measures the performance of a classification model whose output is a probability value between 0 and 1,
and where the number of outputs is more than 2.
"""
pass
@accepts(self="any", y_true=np.ndarray, y_pred=np.ndarray)
def map_data(self, y_true, y_pred) -> np.ndarray:
"""
Calculates the distance between y_true and y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : The mean squared distance between y_true and y_pred.
"""
return -(y_true*np.log(y_pred+1.0e-8))
def calculate_gradients(self, y_pred:np.ndarray, y_true:np.ndarray) -> np.ndarray:
"""
Calculates the derivatives of the function W.R.T y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : An n dimensional numpy array of the calculated derivatives of the function W.R.T y_pred.
"""
return division_check(y_true, y_pred)
def get(loss) -> Base_Loss:
"""
Finds and returns the correct loss function.
Arguments:
loss : Base_Loss/str : The loss function the user wants to use.
Returns:
loss : Base_Loss : The correct loss function.
"""
if isinstance(loss, str):
if loss.lower() in ("mse", "mean_squared_error"):
return Mean_Squared_Error()
elif loss.lower() in ("bc", "bce", "binary_crossentropy"):
return Binary_Crossentropy()
elif loss.lower() in ("ce", "crossentropy"):
return Crossentropy()
else:
print("At losses.get(): '%s' is not an available loss function. Has been set to 'Mean_squared_error' by default" % loss)
return Mean_squared_error()
elif isinstance(loss, Base_Loss):
return loss
else:
raise ValueError("At losses.get(): Expected 'class inheriting from Base_Loss' or 'str' for the argument 'loss', recieved '%s'" % type(loss))
| [
"numpy.log"
] | [((3570, 3592), 'numpy.log', 'np.log', (['(y_pred + 1e-08)'], {}), '(y_pred + 1e-08)\n', (3576, 3592), True, 'import numpy as np\n'), ((3668, 3694), 'numpy.log', 'np.log', (['(1 - y_pred + 1e-08)'], {}), '(1 - y_pred + 1e-08)\n', (3674, 3694), True, 'import numpy as np\n'), ((5178, 5200), 'numpy.log', 'np.log', (['(y_pred + 1e-08)'], {}), '(y_pred + 1e-08)\n', (5184, 5200), True, 'import numpy as np\n')] |
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): <NAME>
Copyright (C) 2020 Inria, Copyright (C) 2020 FUjitsu Laboratories Ltd.
Modification(s):
- YYYY/MM Author: Description of the modification
"""
from gudhi.dtm_rips_complex import DTMRipsComplex
from gudhi import RipsComplex
import numpy as np
from math import sqrt
import pytest
def test_dtm_rips_complex():
pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]])
dtm_rips = DTMRipsComplex(points=pts, k=2)
st = dtm_rips.create_simplex_tree(max_dimension=2)
st.persistence()
persistence_intervals0 = st.persistence_intervals_in_dimension(0)
assert persistence_intervals0 == pytest.approx(np.array([[3.16227766, 5.39834564],[3.16227766, 5.39834564], [3.16227766, float("inf")]]))
def test_compatibility_with_rips():
distance_matrix = np.array([[0, 1, 1, sqrt(2)], [1, 0, sqrt(2), 1], [1, sqrt(2), 0, 1], [sqrt(2), 1, 1, 0]])
dtm_rips = DTMRipsComplex(distance_matrix=distance_matrix, max_filtration=42)
st = dtm_rips.create_simplex_tree(max_dimension=1)
rips_complex = RipsComplex(distance_matrix=distance_matrix, max_edge_length=42)
st_from_rips = rips_complex.create_simplex_tree(max_dimension=1)
assert list(st.get_filtration()) == list(st_from_rips.get_filtration())
| [
"gudhi.dtm_rips_complex.DTMRipsComplex",
"numpy.array",
"gudhi.RipsComplex",
"math.sqrt"
] | [((552, 598), 'numpy.array', 'np.array', (['[[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]]'], {}), '([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]])\n', (560, 598), True, 'import numpy as np\n'), ((614, 645), 'gudhi.dtm_rips_complex.DTMRipsComplex', 'DTMRipsComplex', ([], {'points': 'pts', 'k': '(2)'}), '(points=pts, k=2)\n', (628, 645), False, 'from gudhi.dtm_rips_complex import DTMRipsComplex\n'), ((1103, 1169), 'gudhi.dtm_rips_complex.DTMRipsComplex', 'DTMRipsComplex', ([], {'distance_matrix': 'distance_matrix', 'max_filtration': '(42)'}), '(distance_matrix=distance_matrix, max_filtration=42)\n', (1117, 1169), False, 'from gudhi.dtm_rips_complex import DTMRipsComplex\n'), ((1244, 1308), 'gudhi.RipsComplex', 'RipsComplex', ([], {'distance_matrix': 'distance_matrix', 'max_edge_length': '(42)'}), '(distance_matrix=distance_matrix, max_edge_length=42)\n', (1255, 1308), False, 'from gudhi import RipsComplex\n'), ((1017, 1024), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1021, 1024), False, 'from math import sqrt\n'), ((1034, 1041), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1038, 1041), False, 'from math import sqrt\n'), ((1051, 1058), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1055, 1058), False, 'from math import sqrt\n'), ((1068, 1075), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1072, 1075), False, 'from math import sqrt\n')] |
from unittest import TestCase
import numpy as np
from IPython import embed
from teafacto.examples.dummy import Dummy
class TestExternalValidators(TestCase):
def test_external_validator(self):
vocabsize = 1000
m = Dummy(indim=vocabsize, dim=10, outdim=2000)
numbats = 20
lr = 0.8
data = np.arange(0, vocabsize).astype("int32")
gdata = np.random.randint(0, 2000, (vocabsize,))
mpredf = m.predict
def extacc(*sampleinp):
pred = mpredf(*sampleinp[:-1])
ret = np.sum(np.argmax(pred, axis=1) == sampleinp[-1])
return [ret * 1. / sampleinp[-1].shape[0]]
_, err, verr, _, _ = \
m.train([data], gdata).adadelta(lr=lr).cross_entropy() \
.autovalidate().cross_entropy().accuracy().extvalid(extacc) \
.train(numbats=numbats, epochs=10, returnerrors=True)
verr = np.asarray(verr)
verr = verr[:, 1] + verr[:, 2]
self.assertTrue(np.allclose(verr, np.ones_like(verr)))
| [
"numpy.ones_like",
"numpy.asarray",
"numpy.argmax",
"numpy.random.randint",
"teafacto.examples.dummy.Dummy",
"numpy.arange"
] | [((235, 278), 'teafacto.examples.dummy.Dummy', 'Dummy', ([], {'indim': 'vocabsize', 'dim': '(10)', 'outdim': '(2000)'}), '(indim=vocabsize, dim=10, outdim=2000)\n', (240, 278), False, 'from teafacto.examples.dummy import Dummy\n'), ((388, 428), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2000)', '(vocabsize,)'], {}), '(0, 2000, (vocabsize,))\n', (405, 428), True, 'import numpy as np\n'), ((913, 929), 'numpy.asarray', 'np.asarray', (['verr'], {}), '(verr)\n', (923, 929), True, 'import numpy as np\n'), ((332, 355), 'numpy.arange', 'np.arange', (['(0)', 'vocabsize'], {}), '(0, vocabsize)\n', (341, 355), True, 'import numpy as np\n'), ((1011, 1029), 'numpy.ones_like', 'np.ones_like', (['verr'], {}), '(verr)\n', (1023, 1029), True, 'import numpy as np\n'), ((558, 581), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (567, 581), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write SCAN training tasks to TFRecord dataset."""
import os
import random
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
from latent_programmer.tasks.scan import sample_random
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_work_units', 1, 'Total number of work units.')
flags.DEFINE_integer('seed', None, 'Fixed random seed.')
flags.DEFINE_integer('num_tasks', 100000, 'Number of tasks to write.')
flags.DEFINE_string('save_dir', '/tmp/decomposition/scan',
'Directory to save results to.')
flags.DEFINE_boolean('output_separators', True,
'Whether to add separators between parts of the output.')
flags.DEFINE_enum('split', None, ['train', 'valid', 'test', 'finetune'],
'Which split of the dataset to generate.')
flags.DEFINE_enum('experiment', 'NONE',
[e.name for e in sample_random.ScanExperiment],
'Kind of experiment (see ScanExperiment for descriptions).')
def main(_):
if FLAGS.seed is not None:
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
if not gfile.isdir(FLAGS.save_dir):
gfile.makedirs(FLAGS.save_dir)
worker_fname = os.path.join(
FLAGS.save_dir,
'program_tasks_{}.tf_records-00000-of-00001'.format(FLAGS.split))
sample_random.write_examples(
filename=worker_fname,
num_tasks=FLAGS.num_tasks,
experiment=FLAGS.experiment,
split=FLAGS.split,
output_separators=FLAGS.output_separators)
if __name__ == '__main__':
app.run(main)
| [
"tensorflow.random.set_seed",
"absl.flags.DEFINE_integer",
"latent_programmer.tasks.scan.sample_random.write_examples",
"absl.flags.DEFINE_boolean",
"absl.app.run",
"random.seed",
"numpy.random.seed",
"absl.flags.DEFINE_enum",
"absl.flags.DEFINE_string"
] | [((874, 946), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_work_units"""', '(1)', '"""Total number of work units."""'], {}), "('num_work_units', 1, 'Total number of work units.')\n", (894, 946), False, 'from absl import flags\n'), ((947, 1003), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""', 'None', '"""Fixed random seed."""'], {}), "('seed', None, 'Fixed random seed.')\n", (967, 1003), False, 'from absl import flags\n'), ((1004, 1074), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_tasks"""', '(100000)', '"""Number of tasks to write."""'], {}), "('num_tasks', 100000, 'Number of tasks to write.')\n", (1024, 1074), False, 'from absl import flags\n'), ((1075, 1170), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""save_dir"""', '"""/tmp/decomposition/scan"""', '"""Directory to save results to."""'], {}), "('save_dir', '/tmp/decomposition/scan',\n 'Directory to save results to.')\n", (1094, 1170), False, 'from absl import flags\n'), ((1187, 1296), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""output_separators"""', '(True)', '"""Whether to add separators between parts of the output."""'], {}), "('output_separators', True,\n 'Whether to add separators between parts of the output.')\n", (1207, 1296), False, 'from absl import flags\n'), ((1314, 1433), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""split"""', 'None', "['train', 'valid', 'test', 'finetune']", '"""Which split of the dataset to generate."""'], {}), "('split', None, ['train', 'valid', 'test', 'finetune'],\n 'Which split of the dataset to generate.')\n", (1331, 1433), False, 'from absl import flags\n'), ((1448, 1605), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""experiment"""', '"""NONE"""', '[e.name for e in sample_random.ScanExperiment]', '"""Kind of experiment (see ScanExperiment for descriptions)."""'], {}), "('experiment', 'NONE', [e.name for e in sample_random.\n ScanExperiment],\n 'Kind of experiment (see ScanExperiment for descriptions).')\n", (1465, 1605), False, 'from absl import flags\n'), ((1975, 2153), 'latent_programmer.tasks.scan.sample_random.write_examples', 'sample_random.write_examples', ([], {'filename': 'worker_fname', 'num_tasks': 'FLAGS.num_tasks', 'experiment': 'FLAGS.experiment', 'split': 'FLAGS.split', 'output_separators': 'FLAGS.output_separators'}), '(filename=worker_fname, num_tasks=FLAGS.\n num_tasks, experiment=FLAGS.experiment, split=FLAGS.split,\n output_separators=FLAGS.output_separators)\n', (2003, 2153), False, 'from latent_programmer.tasks.scan import sample_random\n'), ((2207, 2220), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (2214, 2220), False, 'from absl import app\n'), ((1682, 1712), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (1700, 1712), True, 'import tensorflow as tf\n'), ((1717, 1743), 'numpy.random.seed', 'np.random.seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (1731, 1743), True, 'import numpy as np\n'), ((1748, 1771), 'random.seed', 'random.seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (1759, 1771), False, 'import random\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 17:09:02 2020
@author: minjie
"""
import globalvar as gl
import os.path as osp
from config import cfg
from tqdm import tqdm
import torchvision
import torch
import numpy as np
from collections import Counter
from data.transforms.build import get_transform
from pathlib import Path
from modeling import build_model
#cfg.merge_from_file('./configs/resnet50_reid_bird_ave_centerloss.yaml')
#cfg.MISC.OUT_DIR = '../checkpoint/resnet50_reid_flower_ave_centern'
#cfg.merge_from_file('./configs/resnet50_reid_flower_ave_centerloss.yaml')
#cfg.MISC.OUT_DIR = '../checkpoint/resnet50_reid_flower_ave_centern1_lbs'
cfg.merge_from_file('./configs/resnet50_reid_flower_ave_centerloss.yaml')
cfg.MISC.OUT_DIR = '../checkpoint/resnet50_reid_flower_ave_centern1'
cfg.DATASETS.K_FOLD = 1
gl._init()
gl.set_value('cfg',cfg)
#%% hawaii dataset and transform
fd_hawaiiflower = '../data/Hawaii_flowers'
flower_names = Path(fd_hawaiiflower).glob('*')
#
tfms = get_transform((448,448), phase='valid')
ds = torchvision.datasets.ImageFolder(root=fd_hawaiiflower, transform=tfms)
labels = ds.classes
n_class = len(labels)
#%% build model
model = build_model(cfg)
model.eval()
best_model_fn = osp.join(cfg.MISC.OUT_DIR, f"{cfg.MODEL.NAME}-best.pth")
model.load_state_dict(torch.load(best_model_fn))
#%% inference get feature
cls_feats = list()
lbs = list()
for data in tqdm(ds):
img,target = data
img = img.cuda()
with torch.no_grad():
feat_out =model(img[None,...],target)
cls_feats.append(feat_out[1][0].cpu().numpy())
lbs.append(target)
#%% calc 1-center for one flower during querying (feat_m) this is not used, only for comparsion
countDict = Counter(lbs)
cls_feats = np.array(cls_feats)
lbs = np.array(lbs)
feat_m = list()
for cid in range(n_class):
feat = cls_feats[lbs ==cid]
feat = feat/np.linalg.norm(feat,axis = 1,keepdims = True)
feat_m.append(feat.mean(axis = 0))
cos_vals = np.zeros((cls_feats.shape[0],n_class))
for cid in range(n_class):
for imgid in range(cls_feats.shape[0]):
feat = cls_feats[imgid]/np.linalg.norm(cls_feats[imgid],axis = 0,keepdims = True)
cos_vals[imgid,cid] = np.dot(feat_m[cid],feat)
#%% 1-center acc and cm
from sklearn.metrics import confusion_matrix
np.set_printoptions(precision=4,suppress = False)
pred_label = np.argmax(cos_vals,axis = 1)
cm = confusion_matrix(lbs.astype('int64'), pred_label.astype('int64'))
(np.argmax(cos_vals,axis = 1) == lbs).sum()/cls_feats.shape[0]
print(cm)
print('acc top1 w/o th and center=1 for one class= {:03.03%}'.format(cm.diagonal().sum()/cm.sum()))
#%% test in-class dist
for cid in range(n_class):
feat = cls_feats[lbs ==cid]
feat = feat/np.linalg.norm(feat,axis = 1,keepdims = True)
featm = feat.mean(axis = 0,keepdims = True)
dists = np.dot(feat,featm.T)
print(cid)
print(dists.mean(),dists.min())
#%% now, calc k-center for one flower during querying (feat_m) this is for debug
from sklearn.cluster import KMeans
def calc_corr_diff_k(feat,max_k = 5):
feat = feat/np.linalg.norm(feat,axis = 1,keepdims = True)
min_corr,mean_corr = list(),list()
for nk in range(max_k):
if nk >0:
y_pred = KMeans(n_clusters=nk+1).fit_predict(feat)
else:
y_pred = np.zeros(feat.shape[0],dtype = 'int32')
featm_k = list()
for k in range(nk+1):
featm = feat[y_pred ==k].mean(axis = 0,keepdims = True)
featm = featm/np.linalg.norm(featm,axis = 1,keepdims = True)
featm_k.append(featm)
if nk==max_k-1:
featm_k_out = featm_k
for k in range(nk+1):
if k==0:
cos_v = np.dot(feat,featm_k[k].T)
else:
cos_v = np.maximum(cos_v,np.dot(feat,featm_k[k].T))
min_corr.append(cos_v.min())
mean_corr.append(cos_v.mean())
min_corr = np.array(min_corr)
mean_corr = np.array(mean_corr)
return min_corr,mean_corr,featm_k_out
min_corr_all,mean_corr_all = list(),list()
for cid in range(n_class):
feat = cls_feats[lbs ==cid]
min_corr,mean_corr,_ = calc_corr_diff_k(feat,max_k = 5)
min_corr_all.append(min_corr)
mean_corr_all.append(mean_corr)
#%% calc k-center (k=3) features for query, this is used
featm_k_all = list()
nk = 3
th_eval = 0.5
for cid in range(n_class):
feat = cls_feats[lbs ==cid]
#feat = feat/np.linalg.norm(feat,axis = 1,keepdims = True)
#featm = feat.mean(axis = 0,keepdims = True)
_,_,featm_k = calc_corr_diff_k(feat,max_k = nk)
featm_k_all.append(featm_k)
cos_vals = -1.0*np.ones((cls_feats.shape[0],n_class))
for cid in range(n_class):
for imgid in range(cls_feats.shape[0]):
feat = cls_feats[imgid]/np.linalg.norm(cls_feats[imgid],axis = 0,keepdims = True)
for k in range(nk):
corr_val = np.dot(featm_k_all[cid][k],feat)
if corr_val>cos_vals[imgid,cid]:
cos_vals[imgid,cid] = corr_val
pred_label = np.argmax(cos_vals,axis = 1)
cm = confusion_matrix(lbs.astype('int64'), pred_label.astype('int64'))
# this is the case when non-flower not considered
(np.argmax(cos_vals,axis = 1) == lbs).sum()/cls_feats.shape[0]
print(cm)
#print(cm.diagonal().sum()/cm.sum())
print('acc top1 w/o th = {:03.03%}'.format(cm.diagonal().sum()/cm.sum()))
# this is the case when non-flower considered
cos_vals_p = np.hstack((np.ones((cos_vals.shape[0],1))*th_eval,cos_vals))
import torch.nn.functional as F
probs = F.softmax(torch.from_numpy(cos_vals_p)*25,dim=1) # THIS IS THE PROBS of all the images, the first is non-flower
pred_label_withnobj = probs.argmax(dim=1).numpy()
acc_th = (pred_label_withnobj==lbs+1).sum()/len(lbs)
print('acc top1 with th = {:03.03%}'.format(acc_th))
#prob
#
#
#torch.topk(torch.from_numpy(cos_vals),2,dim=1)
#%% a test of coco set val17 if it is returned as fp
#flist = list(Path('../data/coco/val2017').glob('*.jpg'))
cls_feats_noobj = list()
ds_noobj = torchvision.datasets.ImageFolder(root='../data/coco', transform=tfms)
for data in tqdm(ds_noobj):
img,target = data
img = img.cuda()
with torch.no_grad():
feat_out =model(img[None,...],target)
cls_feats_noobj.append(feat_out[1][0].cpu().numpy())
#%% Here, we tested how many coco val images return top-1 with given flower
cls_feats_noobj = np.array(cls_feats_noobj)
cos_vals_noobj = -1.0*np.ones((cls_feats_noobj.shape[0],n_class))
for cid in range(n_class):
for imgid in range(cos_vals_noobj.shape[0]):
feat = cls_feats_noobj[imgid]/np.linalg.norm(cls_feats_noobj[imgid],axis = 0,keepdims = True)
for k in range(nk):
corr_val = np.dot(featm_k_all[cid][k],feat)
if corr_val>cos_vals_noobj[imgid,cid]:
cos_vals_noobj[imgid,cid] = corr_val
cos_vals_p_nonobj = np.hstack((np.ones((cos_vals_noobj.shape[0],1))*th_eval,cos_vals_noobj))
probs = F.softmax(torch.from_numpy(cos_vals_p_nonobj)*25,dim=1) # THIS IS THE PROBS of all the images, the first is non-flower
pred_label_withnobj_fp = probs.argmax(dim=1).numpy()
fp_prob = (pred_label_withnobj_fp!=0).sum()/cls_feats_noobj.shape[0]
print('fp for coco with th = {:03.03%}'.format(fp_prob))
fns_fp =np.array(ds_noobj.imgs)[cos_vals_noobj.max(axis = 1)>th_eval]
fns_fp[:,1] = cos_vals_noobj.argmax(axis = 1)[cos_vals_noobj.max(axis = 1)>th_eval]
| [
"globalvar.set_value",
"torch.from_numpy",
"numpy.array",
"numpy.linalg.norm",
"config.cfg.merge_from_file",
"pathlib.Path",
"torchvision.datasets.ImageFolder",
"numpy.dot",
"numpy.ones",
"numpy.argmax",
"numpy.set_printoptions",
"sklearn.cluster.KMeans",
"data.transforms.build.get_transform... | [((690, 763), 'config.cfg.merge_from_file', 'cfg.merge_from_file', (['"""./configs/resnet50_reid_flower_ave_centerloss.yaml"""'], {}), "('./configs/resnet50_reid_flower_ave_centerloss.yaml')\n", (709, 763), False, 'from config import cfg\n'), ((862, 872), 'globalvar._init', 'gl._init', ([], {}), '()\n', (870, 872), True, 'import globalvar as gl\n'), ((873, 897), 'globalvar.set_value', 'gl.set_value', (['"""cfg"""', 'cfg'], {}), "('cfg', cfg)\n", (885, 897), True, 'import globalvar as gl\n'), ((1038, 1078), 'data.transforms.build.get_transform', 'get_transform', (['(448, 448)'], {'phase': '"""valid"""'}), "((448, 448), phase='valid')\n", (1051, 1078), False, 'from data.transforms.build import get_transform\n'), ((1083, 1153), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ([], {'root': 'fd_hawaiiflower', 'transform': 'tfms'}), '(root=fd_hawaiiflower, transform=tfms)\n', (1115, 1153), False, 'import torchvision\n'), ((1223, 1239), 'modeling.build_model', 'build_model', (['cfg'], {}), '(cfg)\n', (1234, 1239), False, 'from modeling import build_model\n'), ((1270, 1326), 'os.path.join', 'osp.join', (['cfg.MISC.OUT_DIR', 'f"""{cfg.MODEL.NAME}-best.pth"""'], {}), "(cfg.MISC.OUT_DIR, f'{cfg.MODEL.NAME}-best.pth')\n", (1278, 1326), True, 'import os.path as osp\n'), ((1455, 1463), 'tqdm.tqdm', 'tqdm', (['ds'], {}), '(ds)\n', (1459, 1463), False, 'from tqdm import tqdm\n'), ((1771, 1783), 'collections.Counter', 'Counter', (['lbs'], {}), '(lbs)\n', (1778, 1783), False, 'from collections import Counter\n'), ((1796, 1815), 'numpy.array', 'np.array', (['cls_feats'], {}), '(cls_feats)\n', (1804, 1815), True, 'import numpy as np\n'), ((1822, 1835), 'numpy.array', 'np.array', (['lbs'], {}), '(lbs)\n', (1830, 1835), True, 'import numpy as np\n'), ((2027, 2066), 'numpy.zeros', 'np.zeros', (['(cls_feats.shape[0], n_class)'], {}), '((cls_feats.shape[0], n_class))\n', (2035, 2066), True, 'import numpy as np\n'), ((2366, 2414), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'suppress': '(False)'}), '(precision=4, suppress=False)\n', (2385, 2414), True, 'import numpy as np\n'), ((2430, 2457), 'numpy.argmax', 'np.argmax', (['cos_vals'], {'axis': '(1)'}), '(cos_vals, axis=1)\n', (2439, 2457), True, 'import numpy as np\n'), ((5219, 5246), 'numpy.argmax', 'np.argmax', (['cos_vals'], {'axis': '(1)'}), '(cos_vals, axis=1)\n', (5228, 5246), True, 'import numpy as np\n'), ((6208, 6277), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ([], {'root': '"""../data/coco"""', 'transform': 'tfms'}), "(root='../data/coco', transform=tfms)\n", (6240, 6277), False, 'import torchvision\n'), ((6290, 6304), 'tqdm.tqdm', 'tqdm', (['ds_noobj'], {}), '(ds_noobj)\n', (6294, 6304), False, 'from tqdm import tqdm\n'), ((6574, 6599), 'numpy.array', 'np.array', (['cls_feats_noobj'], {}), '(cls_feats_noobj)\n', (6582, 6599), True, 'import numpy as np\n'), ((1349, 1374), 'torch.load', 'torch.load', (['best_model_fn'], {}), '(best_model_fn)\n', (1359, 1374), False, 'import torch\n'), ((2919, 2940), 'numpy.dot', 'np.dot', (['feat', 'featm.T'], {}), '(feat, featm.T)\n', (2925, 2940), True, 'import numpy as np\n'), ((4082, 4100), 'numpy.array', 'np.array', (['min_corr'], {}), '(min_corr)\n', (4090, 4100), True, 'import numpy as np\n'), ((4121, 4140), 'numpy.array', 'np.array', (['mean_corr'], {}), '(mean_corr)\n', (4129, 4140), True, 'import numpy as np\n'), ((4819, 4857), 'numpy.ones', 'np.ones', (['(cls_feats.shape[0], n_class)'], {}), '((cls_feats.shape[0], n_class))\n', (4826, 4857), True, 'import numpy as np\n'), ((6622, 6666), 'numpy.ones', 'np.ones', (['(cls_feats_noobj.shape[0], n_class)'], {}), '((cls_feats_noobj.shape[0], n_class))\n', (6629, 6666), True, 'import numpy as np\n'), ((7469, 7492), 'numpy.array', 'np.array', (['ds_noobj.imgs'], {}), '(ds_noobj.imgs)\n', (7477, 7492), True, 'import numpy as np\n'), ((992, 1013), 'pathlib.Path', 'Path', (['fd_hawaiiflower'], {}), '(fd_hawaiiflower)\n', (996, 1013), False, 'from pathlib import Path\n'), ((1517, 1532), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1530, 1532), False, 'import torch\n'), ((1928, 1971), 'numpy.linalg.norm', 'np.linalg.norm', (['feat'], {'axis': '(1)', 'keepdims': '(True)'}), '(feat, axis=1, keepdims=True)\n', (1942, 1971), True, 'import numpy as np\n'), ((2266, 2291), 'numpy.dot', 'np.dot', (['feat_m[cid]', 'feat'], {}), '(feat_m[cid], feat)\n', (2272, 2291), True, 'import numpy as np\n'), ((2813, 2856), 'numpy.linalg.norm', 'np.linalg.norm', (['feat'], {'axis': '(1)', 'keepdims': '(True)'}), '(feat, axis=1, keepdims=True)\n', (2827, 2856), True, 'import numpy as np\n'), ((3165, 3208), 'numpy.linalg.norm', 'np.linalg.norm', (['feat'], {'axis': '(1)', 'keepdims': '(True)'}), '(feat, axis=1, keepdims=True)\n', (3179, 3208), True, 'import numpy as np\n'), ((5735, 5763), 'torch.from_numpy', 'torch.from_numpy', (['cos_vals_p'], {}), '(cos_vals_p)\n', (5751, 5763), False, 'import torch\n'), ((6358, 6373), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6371, 6373), False, 'import torch\n'), ((7170, 7205), 'torch.from_numpy', 'torch.from_numpy', (['cos_vals_p_nonobj'], {}), '(cos_vals_p_nonobj)\n', (7186, 7205), False, 'import torch\n'), ((2178, 2233), 'numpy.linalg.norm', 'np.linalg.norm', (['cls_feats[imgid]'], {'axis': '(0)', 'keepdims': '(True)'}), '(cls_feats[imgid], axis=0, keepdims=True)\n', (2192, 2233), True, 'import numpy as np\n'), ((3409, 3447), 'numpy.zeros', 'np.zeros', (['feat.shape[0]'], {'dtype': '"""int32"""'}), "(feat.shape[0], dtype='int32')\n", (3417, 3447), True, 'import numpy as np\n'), ((4969, 5024), 'numpy.linalg.norm', 'np.linalg.norm', (['cls_feats[imgid]'], {'axis': '(0)', 'keepdims': '(True)'}), '(cls_feats[imgid], axis=0, keepdims=True)\n', (4983, 5024), True, 'import numpy as np\n'), ((5079, 5112), 'numpy.dot', 'np.dot', (['featm_k_all[cid][k]', 'feat'], {}), '(featm_k_all[cid][k], feat)\n', (5085, 5112), True, 'import numpy as np\n'), ((5635, 5666), 'numpy.ones', 'np.ones', (['(cos_vals.shape[0], 1)'], {}), '((cos_vals.shape[0], 1))\n', (5642, 5666), True, 'import numpy as np\n'), ((6789, 6850), 'numpy.linalg.norm', 'np.linalg.norm', (['cls_feats_noobj[imgid]'], {'axis': '(0)', 'keepdims': '(True)'}), '(cls_feats_noobj[imgid], axis=0, keepdims=True)\n', (6803, 6850), True, 'import numpy as np\n'), ((6905, 6938), 'numpy.dot', 'np.dot', (['featm_k_all[cid][k]', 'feat'], {}), '(featm_k_all[cid][k], feat)\n', (6911, 6938), True, 'import numpy as np\n'), ((7090, 7127), 'numpy.ones', 'np.ones', (['(cos_vals_noobj.shape[0], 1)'], {}), '((cos_vals_noobj.shape[0], 1))\n', (7097, 7127), True, 'import numpy as np\n'), ((2534, 2561), 'numpy.argmax', 'np.argmax', (['cos_vals'], {'axis': '(1)'}), '(cos_vals, axis=1)\n', (2543, 2561), True, 'import numpy as np\n'), ((3629, 3673), 'numpy.linalg.norm', 'np.linalg.norm', (['featm'], {'axis': '(1)', 'keepdims': '(True)'}), '(featm, axis=1, keepdims=True)\n', (3643, 3673), True, 'import numpy as np\n'), ((3870, 3896), 'numpy.dot', 'np.dot', (['feat', 'featm_k[k].T'], {}), '(feat, featm_k[k].T)\n', (3876, 3896), True, 'import numpy as np\n'), ((5376, 5403), 'numpy.argmax', 'np.argmax', (['cos_vals'], {'axis': '(1)'}), '(cos_vals, axis=1)\n', (5385, 5403), True, 'import numpy as np\n'), ((3332, 3357), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(nk + 1)'}), '(n_clusters=nk + 1)\n', (3338, 3357), False, 'from sklearn.cluster import KMeans\n'), ((3955, 3981), 'numpy.dot', 'np.dot', (['feat', 'featm_k[k].T'], {}), '(feat, featm_k[k].T)\n', (3961, 3981), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import time
# https://github.com/UNSW-CEEM/Bill_Calculator
# Prepared by <NAME> (<EMAIL>)
# You can learn how to use this function by running the Tariff Calculation Example notebook in this repository
# Inputs: Tariff and Load profile (30 min interval, one year,
# timestamps are the end of time period: 12:30 is consumption from 12 to 12:30)
# If tariff rates include gst the result will be gst inclusive
# if discount applies to any rate, it should be considered before calling the function
def bill_calculator(load_profile, tariff, network_load=None, fit=True):
# Treating load profile
load_profile = load_profile.fillna(0)
def time_select(load_profile_s, par):
load_profile_s_t_a = pd.DataFrame()
for k2_1, v2_1, in par['TimeIntervals'].items():
if v2_1[0][0:2] == '24':
v2_1[0] = v2_1[1].replace("24", "00")
if v2_1[1][0:2] == '24':
v2_1[1] = v2_1[1].replace("24", "00")
if v2_1[0] != v2_1[1]:
load_profile_s_t = load_profile_s.between_time(start_time=v2_1[0], end_time=v2_1[1], include_start=False,
include_end=True)
else:
load_profile_s_t = load_profile_s.copy()
if not par['Weekday']:
load_profile_s_t = load_profile_s_t.loc[load_profile_s_t.index.weekday >= 5].copy()
if not par['Weekend']:
load_profile_s_t = load_profile_s_t.loc[load_profile_s_t.index.weekday < 5].copy()
load_profile_s_t = load_profile_s_t.loc[load_profile_s_t.index.month.isin(par['Month']), :].copy()
load_profile_s_t_a = pd.concat([load_profile_s_t_a, load_profile_s_t])
return load_profile_s_t_a
# Calculate imports and exports
results = {}
Temp_imp = load_profile.values
Temp_exp = Temp_imp.copy()
Temp_imp[Temp_imp < 0] = 0
Temp_exp[Temp_exp > 0] = 0
load_profile_import = pd.DataFrame(Temp_imp, columns=load_profile.columns, index=load_profile.index)
load_profile_export = pd.DataFrame(Temp_exp, columns=load_profile.columns, index=load_profile.index)
results['LoadInfo'] = pd.DataFrame(index=[col for col in load_profile.columns],
data=np.sum(load_profile_import.values, axis=0), columns=['Annual_kWh'])
if fit:
results['LoadInfo']['Annual_kWh_exp'] = -1 * np.sum(load_profile_export.values, axis=0)
# If it is retailer put retailer as a component to make it similar to network tariffs
if tariff['ProviderType'] == 'Retailer':
tariff_temp = tariff.copy()
del tariff_temp['Parameters']
tariff_temp['Parameters'] = {'Retailer': tariff['Parameters']}
tariff = tariff_temp.copy()
for TarComp, TarCompVal in tariff['Parameters'].items():
results[TarComp] = pd.DataFrame(index=results['LoadInfo'].index)
# Calculate the FiT
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'FiT' in TarCompVal.keys():
results[TarComp]['Charge_FiT_Rebate'] = -1 * results['LoadInfo']['Annual_kWh_exp'] * TarCompVal['FiT']['Value']
elif 'FiT_TOU' in TarCompVal.keys():
load_profile_ti_exp = pd.DataFrame()
load_profile_ti_exp_charge = pd.DataFrame()
for k, v in TarCompVal['FiT_TOU'].items():
this_part = v.copy()
if 'Weekday' not in this_part:
this_part['Weekday'] = True
this_part['Weekend'] = True
if 'TimeIntervals' not in this_part:
this_part['TimeIntervals'] = {'T1': ['00:00', '00:00']}
if 'Month' not in this_part:
this_part['Month'] = list(range(1, 13))
load_profile_t_a = time_select(load_profile_export, this_part)
load_profile_ti_exp[k] = load_profile_t_a.sum()
results[TarComp]['kWh_Exp' + k] = load_profile_ti_exp[k].copy()
load_profile_ti_exp_charge[k] = this_part['Value'] * load_profile_ti_exp[k]
results[TarComp]['FiT_C_TOU' + k] = load_profile_ti_exp_charge[k].copy()
results[TarComp]['Charge_FiT_Rebate'] = load_profile_ti_exp_charge.sum(axis=1)
# Check if daily exists and calculate the charge
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'Daily' in TarCompVal.keys():
num_days = (len(load_profile.index.normalize().unique()) - 1)
break
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'Daily' in TarCompVal.keys():
results[TarComp]['Charge_Daily'] = num_days * TarCompVal['Daily']['Value']
# Energy
# Flat Rate:
# Check if flat rate charge exists and calculate the charge
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'FlatRate' in TarCompVal.keys():
results[TarComp]['Charge_FlatRate'] = results['LoadInfo']['Annual_kWh'] * TarCompVal['FlatRate']['Value']
# Block Annual:
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockAnnual' in TarCompVal.keys():
block_use = results['LoadInfo'][['Annual_kWh']].copy()
block_use_charge = block_use.copy()
# separating the blocks of usage
lim = 0
for k, v in TarCompVal['BlockAnnual'].items():
block_use[k] = block_use['Annual_kWh']
block_use[k][block_use[k] > float(v['HighBound'])] = float(v['HighBound'])
block_use[k] = block_use[k] - lim
block_use[k][block_use[k] < 0] = 0
lim = float(v['HighBound'])
block_use_charge[k] = block_use[k] * v['Value']
del block_use['Annual_kWh']
del block_use_charge['Annual_kWh']
results[TarComp]['Charge_BlockAnnual'] = block_use_charge.sum(axis=1)
# Block Quarterly:
# check if it has quarterly and if yes calculate the quarterly energy
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockQuarterly' in TarCompVal.keys():
for Q in range(1, 5):
load_profile_q = load_profile_import.loc[
load_profile_import.index.month.isin(list(range((Q - 1) * 3 + 1, Q * 3 + 1))), :]
results['LoadInfo']['kWh_Q' + str(Q)] = [
np.nansum(load_profile_q[col].values[load_profile_q[col].values > 0])
for col in load_profile_q.columns]
break
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockQuarterly' in TarCompVal.keys():
for Q in range(1, 5):
block_use = results['LoadInfo'][['kWh_Q' + str(Q)]].copy()
block_use_charge = block_use.copy()
lim = 0
for k, v in TarCompVal['BlockQuarterly'].items():
block_use[k] = block_use['kWh_Q' + str(Q)]
block_use[k][block_use[k] > float(v['HighBound'])] = float(v['HighBound'])
block_use[k] = block_use[k] - lim
block_use[k][block_use[k] < 0] = 0
lim = float(v['HighBound'])
block_use_charge[k] = block_use[k] * v['Value']
del block_use['kWh_Q' + str(Q)]
del block_use_charge['kWh_Q' + str(Q)]
results[TarComp]['C_Q' + str(Q)] = block_use_charge.sum(axis=1)
results[TarComp]['Charge_BlockQuarterly'] = results[TarComp][
['C_Q' + str(Q) for Q in range(1, 5)]].sum(axis=1)
# Block Monthly:
# check if it has Monthly and if yes calculate the Monthly energy
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockMonthly' in TarCompVal.keys():
for m in range(1, 13):
load_profile_m = load_profile_import.loc[load_profile_import.index.month == m, :]
results['LoadInfo']['kWh_m' + str(m)] = [
np.nansum(load_profile_m[col].values[load_profile_m[col].values > 0])
for col in load_profile_m.columns]
break
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockMonthly' in TarCompVal.keys():
for Q in range(1, 13):
block_use = results['LoadInfo'][['kWh_m' + str(Q)]].copy()
block_use_charge = block_use.copy()
lim = 0
for k, v in TarCompVal['BlockMonthly'].items():
block_use[k] = block_use['kWh_m' + str(Q)]
block_use[k][block_use[k] > float(v['HighBound'])] = float(v['HighBound'])
block_use[k] = block_use[k] - lim
block_use[k][block_use[k] < 0] = 0
lim = float(v['HighBound'])
block_use_charge[k] = block_use[k] * v['Value']
del block_use['kWh_m' + str(Q)]
del block_use_charge['kWh_m' + str(Q)]
results[TarComp]['C_m' + str(Q)] = block_use_charge.sum(axis=1)
results[TarComp]['Charge_BlockMonthly'] = results[TarComp][['C_m' + str(Q) for Q in range(1, 13)]].sum(
axis=1)
# Block Daily:
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'BlockDaily' in TarCompVal.keys():
DailykWh = load_profile_import.resample('D').sum()
block_use_temp_charge = DailykWh.copy()
block_use_temp_charge.iloc[:, :] = 0
lim = 0
for k, v in TarCompVal['BlockDaily'].items():
block_use_temp = DailykWh.copy()
block_use_temp[block_use_temp > float(v['HighBound'])] = float(v['HighBound'])
block_use_temp = block_use_temp - lim
block_use_temp[block_use_temp < 0] = 0
lim = float(v['HighBound'])
block_use_temp_charge = block_use_temp_charge + block_use_temp * v['Value']
results[TarComp]['Charge_BlockDaily'] = block_use_temp_charge.sum(axis=0)
# TOU energy
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'TOU' in TarCompVal.keys():
load_profile_ti = pd.DataFrame()
load_profile_ti_charge = pd.DataFrame()
for k, v in TarCompVal['TOU'].items():
this_part = v.copy()
if 'Weekday' not in this_part:
this_part['Weekday'] = True
this_part['Weekend'] = True
if 'TimeIntervals' not in this_part:
this_part['TimeIntervals'] = {'T1': ['00:00', '00:00']}
if 'Month' not in this_part:
this_part['Month'] = list(range(1, 13))
load_profile_t_a = time_select(load_profile_import, this_part)
load_profile_ti[k] = load_profile_t_a.sum()
results[TarComp]['kWh_' + k] = load_profile_ti[k].copy()
load_profile_ti_charge[k] = this_part['Value'] * load_profile_ti[k]
results[TarComp]['C_' + k] = load_profile_ti_charge[k].copy()
results[TarComp]['Charge_TOU'] = load_profile_ti_charge.sum(axis=1)
# Demand charge:
for TarComp, TarCompVal in tariff['Parameters'].items():
if 'Demand' in TarCompVal.keys():
for DemCharComp, DemCharCompVal in TarCompVal['Demand'].items():
ts_num = DemCharCompVal['Demand Window Length'] # number of timestamp
num_of_peaks = DemCharCompVal['Number of Peaks']
if ts_num > 1:
load_profile_r = load_profile_import.rolling(ts_num, min_periods=1).mean()
else:
load_profile_r = load_profile_import.copy()
load_profile_f = time_select(load_profile_r, DemCharCompVal)
# if capacity charge is applied meaning the charge only applies when you exceed the capacity for
# a certain number of times
if 'Capacity' in DemCharCompVal:
# please note the capacity charge only works with user's demand peak (not coincident peak)
# Customers can exceed their capacity level on x separate days per month during each interval
# (day or night). If they exceed more than x times, they will be charged for the highest
# exceedance of their capacity the capacity charge (if they don't exceed) is already included
# in the fixed charge so they only pay for the difference
capacity = DemCharCompVal['Capacity']['Value']
if 'Capacity Exceeded No' in DemCharCompVal:
cap_exc_no = DemCharCompVal['Capacity Exceeded No']
else:
cap_exc_no = 0
load_profile_f = load_profile_f - (capacity / 2)
load_profile_f = load_profile_f.clip(lower=0)
load_profile_f_g = load_profile_f.groupby(load_profile_f.index.normalize()).max()
for m in range(1, 13):
arr = load_profile_f_g.loc[load_profile_f_g.index.month == m, :].copy().values
cap_exc_no_val = np.sum(arr > 0, axis=0)
load_profile_f.loc[load_profile_f.index.month == m, cap_exc_no_val <= cap_exc_no] = 0
load_profile_f2 = load_profile_f.copy()
else:
load_profile_f2 = load_profile_f.copy()
based_on_network_peak = False
if 'Based on Network Peak' in DemCharCompVal:
if DemCharCompVal['Based on Network Peak']:
based_on_network_peak = True
# minimum demand or demand charge
min_dem1 = 0
min_dem2 = 0
if 'Min Demand (kW)' in DemCharCompVal:
min_dem1 = DemCharCompVal['Min Demand (kW)']
if 'Min Demand Charge ($)' in DemCharCompVal:
if DemCharCompVal['Value'] > 0:
min_dem2 = DemCharCompVal['Min Demand Charge ($)'] / DemCharCompVal['Value']
min_dem = min(min_dem1, min_dem2)
if based_on_network_peak:
new_load = pd.merge(load_profile_f2, network_load, left_index=True, right_index=True)
average_peaks_all = np.empty((0, new_load.shape[1] - 1), dtype=float)
for m in DemCharCompVal['Month']:
new_load2 = new_load.loc[new_load.index.month == m, :].copy()
new_load2.sort_values(by='NetworkLoad', inplace=True, ascending=False)
average_peaks_all = np.append(average_peaks_all,
[2 * new_load2.iloc[:num_of_peaks, :-1].values.mean(axis=0)],
axis=0)
average_peaks_all = np.clip(average_peaks_all, a_min=min_dem, a_max=None)
average_peaks_all_sum = average_peaks_all.sum(axis=0)
else:
average_peaks_all = np.empty((0, load_profile_f.shape[1]), dtype=float)
for m in DemCharCompVal['Month']:
arr = load_profile_f.loc[load_profile_f.index.month == m, :].copy().values
arr.sort(axis=0)
arr = arr[::-1]
average_peaks_all = np.append(average_peaks_all, [2 * arr[:num_of_peaks, :].mean(axis=0)],
axis=0)
average_peaks_all = np.clip(average_peaks_all, a_min=min_dem, a_max=None)
average_peaks_all_sum = average_peaks_all.sum(axis=0)
results[TarComp]['Avg_kW_' + DemCharComp] = average_peaks_all_sum / len(DemCharCompVal['Month'])
results[TarComp]['C_' + DemCharComp] = average_peaks_all_sum * DemCharCompVal['Value']
results[TarComp]['Charge_Demand'] = results[TarComp][
[col for col in results[TarComp] if col.startswith('C_')]].sum(axis=1)
for k, v in results.items():
if k != 'LoadInfo':
results[k]['Bill'] = results[k][[col for col in results[k].columns if col.startswith('Charge')]].sum(axis=1)
energy_comp_list = ['BlockAnnual', 'BlockQuarterly', 'BlockMonthly', 'BlockDaily', 'FlatRate', 'TOU']
tariff_comp_list = []
for TarComp, TarCompVal in tariff['Parameters'].items():
for TarComp2, TarCompVal2 in tariff['Parameters'][TarComp].items():
tariff_comp_list.append(TarComp2)
tariff_comp_list = list(set(tariff_comp_list))
energy_lst = [value for value in tariff_comp_list if value in energy_comp_list]
if len(energy_lst) < 1:
raise ValueError("There is no energy charge component. Please fix the tariff and try again!")
elif len(energy_lst) > 1:
raise ValueError( "There are more than one energy charge component. Please fix the tariff and try again!")
else:
return results
| [
"numpy.clip",
"pandas.merge",
"numpy.sum",
"numpy.empty",
"pandas.DataFrame",
"numpy.nansum",
"pandas.concat"
] | [((2024, 2102), 'pandas.DataFrame', 'pd.DataFrame', (['Temp_imp'], {'columns': 'load_profile.columns', 'index': 'load_profile.index'}), '(Temp_imp, columns=load_profile.columns, index=load_profile.index)\n', (2036, 2102), True, 'import pandas as pd\n'), ((2129, 2207), 'pandas.DataFrame', 'pd.DataFrame', (['Temp_exp'], {'columns': 'load_profile.columns', 'index': 'load_profile.index'}), '(Temp_exp, columns=load_profile.columns, index=load_profile.index)\n', (2141, 2207), True, 'import pandas as pd\n'), ((751, 765), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (763, 765), True, 'import pandas as pd\n'), ((2921, 2966), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "results['LoadInfo'].index"}), "(index=results['LoadInfo'].index)\n", (2933, 2966), True, 'import pandas as pd\n'), ((1731, 1780), 'pandas.concat', 'pd.concat', (['[load_profile_s_t_a, load_profile_s_t]'], {}), '([load_profile_s_t_a, load_profile_s_t])\n', (1740, 1780), True, 'import pandas as pd\n'), ((2337, 2379), 'numpy.sum', 'np.sum', (['load_profile_import.values'], {'axis': '(0)'}), '(load_profile_import.values, axis=0)\n', (2343, 2379), True, 'import numpy as np\n'), ((2471, 2513), 'numpy.sum', 'np.sum', (['load_profile_export.values'], {'axis': '(0)'}), '(load_profile_export.values, axis=0)\n', (2477, 2513), True, 'import numpy as np\n'), ((10299, 10313), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10311, 10313), True, 'import pandas as pd\n'), ((10351, 10365), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10363, 10365), True, 'import pandas as pd\n'), ((3295, 3309), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3307, 3309), True, 'import pandas as pd\n'), ((3351, 3365), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3363, 3365), True, 'import pandas as pd\n'), ((6472, 6541), 'numpy.nansum', 'np.nansum', (['load_profile_q[col].values[load_profile_q[col].values > 0]'], {}), '(load_profile_q[col].values[load_profile_q[col].values > 0])\n', (6481, 6541), True, 'import numpy as np\n'), ((8097, 8166), 'numpy.nansum', 'np.nansum', (['load_profile_m[col].values[load_profile_m[col].values > 0]'], {}), '(load_profile_m[col].values[load_profile_m[col].values > 0])\n', (8106, 8166), True, 'import numpy as np\n'), ((14427, 14501), 'pandas.merge', 'pd.merge', (['load_profile_f2', 'network_load'], {'left_index': '(True)', 'right_index': '(True)'}), '(load_profile_f2, network_load, left_index=True, right_index=True)\n', (14435, 14501), True, 'import pandas as pd\n'), ((14542, 14591), 'numpy.empty', 'np.empty', (['(0, new_load.shape[1] - 1)'], {'dtype': 'float'}), '((0, new_load.shape[1] - 1), dtype=float)\n', (14550, 14591), True, 'import numpy as np\n'), ((15118, 15171), 'numpy.clip', 'np.clip', (['average_peaks_all'], {'a_min': 'min_dem', 'a_max': 'None'}), '(average_peaks_all, a_min=min_dem, a_max=None)\n', (15125, 15171), True, 'import numpy as np\n'), ((15308, 15359), 'numpy.empty', 'np.empty', (['(0, load_profile_f.shape[1])'], {'dtype': 'float'}), '((0, load_profile_f.shape[1]), dtype=float)\n', (15316, 15359), True, 'import numpy as np\n'), ((15811, 15864), 'numpy.clip', 'np.clip', (['average_peaks_all'], {'a_min': 'min_dem', 'a_max': 'None'}), '(average_peaks_all, a_min=min_dem, a_max=None)\n', (15818, 15864), True, 'import numpy as np\n'), ((13359, 13382), 'numpy.sum', 'np.sum', (['(arr > 0)'], {'axis': '(0)'}), '(arr > 0, axis=0)\n', (13365, 13382), True, 'import numpy as np\n')] |
# coding=utf-8
#
# Copyright 2020 <NAME> Duesseldorf
#
# Part of this code is based on the source code of BERT-DST
# (arXiv:1907.03040)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import sys
import numpy as np
import re
def load_dataset_config(dataset_config):
with open(dataset_config, "r", encoding="utf-8") as f:
raw_config = json.load(f)
return raw_config["class_types"], raw_config["slots"], raw_config["label_maps"]
def tokenize(text):
if "\u0120" in text:
text = re.sub(" ", "", text)
text = re.sub("\u0120", " ", text)
text = text.strip()
return " ".join([tok for tok in map(str.strip, re.split("(\W+)", text)) if len(tok) > 0])
def is_in_list(tok, value):
found = False
tok_list = [item for item in map(str.strip, re.split("(\W+)", tok)) if len(item) > 0]
value_list = [item for item in map(str.strip, re.split("(\W+)", value)) if len(item) > 0]
tok_len = len(tok_list)
value_len = len(value_list)
for i in range(tok_len + 1 - value_len):
if tok_list[i : i + value_len] == value_list:
found = True
break
return found
def check_slot_inform(value_label, inform_label, label_maps):
value = inform_label
if value_label == inform_label:
value = value_label
elif is_in_list(inform_label, value_label):
value = value_label
elif is_in_list(value_label, inform_label):
value = value_label
elif inform_label in label_maps:
for inform_label_variant in label_maps[inform_label]:
if value_label == inform_label_variant:
value = value_label
break
elif is_in_list(inform_label_variant, value_label):
value = value_label
break
elif is_in_list(value_label, inform_label_variant):
value = value_label
break
elif value_label in label_maps:
for value_label_variant in label_maps[value_label]:
if value_label_variant == inform_label:
value = value_label
break
elif is_in_list(inform_label, value_label_variant):
value = value_label
break
elif is_in_list(value_label_variant, inform_label):
value = value_label
break
return value
def get_joint_slot_correctness(
fp,
class_types,
label_maps,
key_class_label_id="class_label_id",
key_class_prediction="class_prediction",
key_start_pos="start_pos",
key_start_prediction="start_prediction",
key_end_pos="end_pos",
key_end_prediction="end_prediction",
key_refer_id="refer_id",
key_refer_prediction="refer_prediction",
key_slot_groundtruth="slot_groundtruth",
key_slot_prediction="slot_prediction",
):
with open(fp) as f:
preds = json.load(f)
class_correctness = [[] for cl in range(len(class_types) + 1)]
confusion_matrix = [
[[] for cl_b in range(len(class_types))] for cl_a in range(len(class_types))
]
pos_correctness = []
refer_correctness = []
val_correctness = []
total_correctness = []
c_tp = {ct: 0 for ct in range(len(class_types))}
c_tn = {ct: 0 for ct in range(len(class_types))}
c_fp = {ct: 0 for ct in range(len(class_types))}
c_fn = {ct: 0 for ct in range(len(class_types))}
for pred in preds:
guid = pred["guid"] # List: set_type, dialogue_idx, turn_idx
turn_gt_class = pred[key_class_label_id]
turn_pd_class = pred[key_class_prediction]
gt_start_pos = pred[key_start_pos]
pd_start_pos = pred[key_start_prediction]
gt_end_pos = pred[key_end_pos]
pd_end_pos = pred[key_end_prediction]
gt_refer = pred[key_refer_id]
pd_refer = pred[key_refer_prediction]
gt_slot = pred[key_slot_groundtruth]
pd_slot = pred[key_slot_prediction]
gt_slot = tokenize(gt_slot)
pd_slot = tokenize(pd_slot)
# Make sure the true turn labels are contained in the prediction json file!
joint_gt_slot = gt_slot
if guid[-1] == "0": # First turn, reset the slots
joint_pd_slot = "none"
# If turn_pd_class or a value to be copied is "none", do not update the dialog state.
if turn_pd_class == class_types.index("none"):
pass
elif turn_pd_class == class_types.index("dontcare"):
joint_pd_slot = "dontcare"
elif turn_pd_class == class_types.index("copy_value"):
joint_pd_slot = pd_slot
elif "true" in class_types and turn_pd_class == class_types.index("true"):
joint_pd_slot = "true"
elif "false" in class_types and turn_pd_class == class_types.index("false"):
joint_pd_slot = "false"
elif "refer" in class_types and turn_pd_class == class_types.index("refer"):
if pd_slot[0:3] == "§§ ":
if pd_slot[3:] != "none":
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[3:], label_maps)
elif pd_slot[0:2] == "§§":
if pd_slot[2:] != "none":
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[2:], label_maps)
elif pd_slot != "none":
joint_pd_slot = pd_slot
elif "inform" in class_types and turn_pd_class == class_types.index("inform"):
if pd_slot[0:3] == "§§ ":
if pd_slot[3:] != "none":
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[3:], label_maps)
elif pd_slot[0:2] == "§§":
if pd_slot[2:] != "none":
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[2:], label_maps)
else:
print("ERROR: Unexpected slot value format. Aborting.")
exit()
else:
print("ERROR: Unexpected class_type. Aborting.")
exit()
total_correct = True
# Check the per turn correctness of the class_type prediction
if turn_gt_class == turn_pd_class:
class_correctness[turn_gt_class].append(1.0)
class_correctness[-1].append(1.0)
c_tp[turn_gt_class] += 1
for cc in range(len(class_types)):
if cc != turn_gt_class:
c_tn[cc] += 1
# Only where there is a span, we check its per turn correctness
if turn_gt_class == class_types.index("copy_value"):
if gt_start_pos == pd_start_pos and gt_end_pos == pd_end_pos:
pos_correctness.append(1.0)
else:
pos_correctness.append(0.0)
# Only where there is a referral, we check its per turn correctness
if "refer" in class_types and turn_gt_class == class_types.index("refer"):
if gt_refer == pd_refer:
refer_correctness.append(1.0)
print(" [%s] Correct referral: %s | %s" % (guid, gt_refer, pd_refer))
else:
refer_correctness.append(0.0)
print(" [%s] Incorrect referral: %s | %s" % (guid, gt_refer, pd_refer))
else:
if turn_gt_class == class_types.index("copy_value"):
pos_correctness.append(0.0)
if "refer" in class_types and turn_gt_class == class_types.index("refer"):
refer_correctness.append(0.0)
class_correctness[turn_gt_class].append(0.0)
class_correctness[-1].append(0.0)
confusion_matrix[turn_gt_class][turn_pd_class].append(1.0)
c_fn[turn_gt_class] += 1
c_fp[turn_pd_class] += 1
# Check the joint slot correctness.
# If the value label is not none, then we need to have a value prediction.
# Even if the class_type is 'none', there can still be a value label,
# it might just not be pointable in the current turn. It might however
# be referrable and thus predicted correctly.
if joint_gt_slot == joint_pd_slot:
val_correctness.append(1.0)
elif (
joint_gt_slot != "none"
and joint_gt_slot != "dontcare"
and joint_gt_slot != "true"
and joint_gt_slot != "false"
and joint_gt_slot in label_maps
):
no_match = True
for variant in label_maps[joint_gt_slot]:
if variant == joint_pd_slot:
no_match = False
break
if no_match:
val_correctness.append(0.0)
total_correct = False
print(
" [%s] Incorrect value (variant): %s (turn class: %s) | %s (turn class: %s)"
% (guid, joint_gt_slot, turn_gt_class, joint_pd_slot, turn_pd_class)
)
else:
val_correctness.append(1.0)
else:
val_correctness.append(0.0)
total_correct = False
print(
" [%s] Incorrect value: %s (turn class: %s) | %s (turn class: %s)"
% (guid, joint_gt_slot, turn_gt_class, joint_pd_slot, turn_pd_class)
)
total_correctness.append(1.0 if total_correct else 0.0)
# Account for empty lists (due to no instances of spans or referrals being seen)
if pos_correctness == []:
pos_correctness.append(1.0)
if refer_correctness == []:
refer_correctness.append(1.0)
for ct in range(len(class_types)):
if c_tp[ct] + c_fp[ct] > 0:
precision = c_tp[ct] / (c_tp[ct] + c_fp[ct])
else:
precision = 1.0
if c_tp[ct] + c_fn[ct] > 0:
recall = c_tp[ct] / (c_tp[ct] + c_fn[ct])
else:
recall = 1.0
if precision + recall > 0:
f1 = 2 * ((precision * recall) / (precision + recall))
else:
f1 = 1.0
if c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct] > 0:
acc = (c_tp[ct] + c_tn[ct]) / (c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct])
else:
acc = 1.0
print(
"Performance for class '%s' (%s): Recall: %.2f (%d of %d), Precision: %.2f, F1: %.2f, Accuracy: %.2f (TP/TN/FP/FN: %d/%d/%d/%d)"
% (
class_types[ct],
ct,
recall,
np.sum(class_correctness[ct]),
len(class_correctness[ct]),
precision,
f1,
acc,
c_tp[ct],
c_tn[ct],
c_fp[ct],
c_fn[ct],
)
)
print("Confusion matrix:")
for cl in range(len(class_types)):
print(" %s" % (cl), end="")
print("")
for cl_a in range(len(class_types)):
print("%s " % (cl_a), end="")
for cl_b in range(len(class_types)):
if len(class_correctness[cl_a]) > 0:
print(
"%.2f "
% (np.sum(confusion_matrix[cl_a][cl_b]) / len(class_correctness[cl_a])),
end="",
)
else:
print("---- ", end="")
print("")
return (
np.asarray(total_correctness),
np.asarray(val_correctness),
np.asarray(class_correctness),
np.asarray(pos_correctness),
np.asarray(refer_correctness),
np.asarray(confusion_matrix),
c_tp,
c_tn,
c_fp,
c_fn,
)
if __name__ == "__main__":
acc_list = []
acc_list_v = []
key_class_label_id = "class_label_id_%s"
key_class_prediction = "class_prediction_%s"
key_start_pos = "start_pos_%s"
key_start_prediction = "start_prediction_%s"
key_end_pos = "end_pos_%s"
key_end_prediction = "end_prediction_%s"
key_refer_id = "refer_id_%s"
key_refer_prediction = "refer_prediction_%s"
key_slot_groundtruth = "slot_groundtruth_%s"
key_slot_prediction = "slot_prediction_%s"
dataset = sys.argv[1].lower()
dataset_config = sys.argv[2].lower()
if dataset not in ["woz2", "sim-m", "sim-r", "multiwoz21"]:
raise ValueError("Task not found: %s" % (dataset))
class_types, slots, label_maps = load_dataset_config(dataset_config)
# Prepare label_maps
label_maps_tmp = {}
for v in label_maps:
label_maps_tmp[tokenize(v)] = [tokenize(nv) for nv in label_maps[v]]
label_maps = label_maps_tmp
for fp in sorted(glob.glob(sys.argv[3])):
print(fp)
goal_correctness = 1.0
cls_acc = [[] for cl in range(len(class_types))]
cls_conf = [[[] for cl_b in range(len(class_types))] for cl_a in range(len(class_types))]
c_tp = {ct: 0 for ct in range(len(class_types))}
c_tn = {ct: 0 for ct in range(len(class_types))}
c_fp = {ct: 0 for ct in range(len(class_types))}
c_fn = {ct: 0 for ct in range(len(class_types))}
for slot in slots:
(
tot_cor,
joint_val_cor,
cls_cor,
pos_cor,
ref_cor,
conf_mat,
ctp,
ctn,
cfp,
cfn,
) = get_joint_slot_correctness(
fp,
class_types,
label_maps,
key_class_label_id=(key_class_label_id % slot),
key_class_prediction=(key_class_prediction % slot),
key_start_pos=(key_start_pos % slot),
key_start_prediction=(key_start_prediction % slot),
key_end_pos=(key_end_pos % slot),
key_end_prediction=(key_end_prediction % slot),
key_refer_id=(key_refer_id % slot),
key_refer_prediction=(key_refer_prediction % slot),
key_slot_groundtruth=(key_slot_groundtruth % slot),
key_slot_prediction=(key_slot_prediction % slot),
)
print(
"%s: joint slot acc: %g, joint value acc: %g, turn class acc: %g, turn position acc: %g, turn referral acc: %g"
% (
slot,
np.mean(tot_cor),
np.mean(joint_val_cor),
np.mean(cls_cor[-1]),
np.mean(pos_cor),
np.mean(ref_cor),
)
)
goal_correctness *= tot_cor
for cl_a in range(len(class_types)):
cls_acc[cl_a] += cls_cor[cl_a]
for cl_b in range(len(class_types)):
cls_conf[cl_a][cl_b] += list(conf_mat[cl_a][cl_b])
c_tp[cl_a] += ctp[cl_a]
c_tn[cl_a] += ctn[cl_a]
c_fp[cl_a] += cfp[cl_a]
c_fn[cl_a] += cfn[cl_a]
for ct in range(len(class_types)):
if c_tp[ct] + c_fp[ct] > 0:
precision = c_tp[ct] / (c_tp[ct] + c_fp[ct])
else:
precision = 1.0
if c_tp[ct] + c_fn[ct] > 0:
recall = c_tp[ct] / (c_tp[ct] + c_fn[ct])
else:
recall = 1.0
if precision + recall > 0:
f1 = 2 * ((precision * recall) / (precision + recall))
else:
f1 = 1.0
if c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct] > 0:
acc = (c_tp[ct] + c_tn[ct]) / (c_tp[ct] + c_tn[ct] + c_fp[ct] + c_fn[ct])
else:
acc = 1.0
print(
"Performance for class '%s' (%s): Recall: %.2f (%d of %d), Precision: %.2f, F1: %.2f, Accuracy: %.2f (TP/TN/FP/FN: %d/%d/%d/%d)"
% (
class_types[ct],
ct,
recall,
np.sum(cls_acc[ct]),
len(cls_acc[ct]),
precision,
f1,
acc,
c_tp[ct],
c_tn[ct],
c_fp[ct],
c_fn[ct],
)
)
print("Confusion matrix:")
for cl in range(len(class_types)):
print(" %s" % (cl), end="")
print("")
for cl_a in range(len(class_types)):
print("%s " % (cl_a), end="")
for cl_b in range(len(class_types)):
if len(cls_acc[cl_a]) > 0:
print("%.2f " % (np.sum(cls_conf[cl_a][cl_b]) / len(cls_acc[cl_a])), end="")
else:
print("---- ", end="")
print("")
acc = np.mean(goal_correctness)
acc_list.append((fp, acc))
acc_list_s = sorted(acc_list, key=lambda tup: tup[1], reverse=True)
for (fp, acc) in acc_list_s:
# import pdb; pdb.set_trace()
print("Joint goal acc: %g, %s" % (acc, fp))
| [
"numpy.mean",
"re.split",
"numpy.asarray",
"numpy.sum",
"json.load",
"re.sub",
"glob.glob"
] | [((870, 882), 'json.load', 'json.load', (['f'], {}), '(f)\n', (879, 882), False, 'import json\n'), ((1029, 1050), 're.sub', 're.sub', (['""" """', '""""""', 'text'], {}), "(' ', '', text)\n", (1035, 1050), False, 'import re\n'), ((1066, 1088), 're.sub', 're.sub', (['"""Ġ"""', '""" """', 'text'], {}), "('Ġ', ' ', text)\n", (1072, 1088), False, 'import re\n'), ((3402, 3414), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3411, 3414), False, 'import json\n'), ((13878, 13900), 'glob.glob', 'glob.glob', (['sys.argv[3]'], {}), '(sys.argv[3])\n', (13887, 13900), False, 'import glob\n'), ((18010, 18035), 'numpy.mean', 'np.mean', (['goal_correctness'], {}), '(goal_correctness)\n', (18017, 18035), True, 'import numpy as np\n'), ((12576, 12605), 'numpy.asarray', 'np.asarray', (['total_correctness'], {}), '(total_correctness)\n', (12586, 12605), True, 'import numpy as np\n'), ((12619, 12646), 'numpy.asarray', 'np.asarray', (['val_correctness'], {}), '(val_correctness)\n', (12629, 12646), True, 'import numpy as np\n'), ((12660, 12689), 'numpy.asarray', 'np.asarray', (['class_correctness'], {}), '(class_correctness)\n', (12670, 12689), True, 'import numpy as np\n'), ((12703, 12730), 'numpy.asarray', 'np.asarray', (['pos_correctness'], {}), '(pos_correctness)\n', (12713, 12730), True, 'import numpy as np\n'), ((12744, 12773), 'numpy.asarray', 'np.asarray', (['refer_correctness'], {}), '(refer_correctness)\n', (12754, 12773), True, 'import numpy as np\n'), ((12787, 12815), 'numpy.asarray', 'np.asarray', (['confusion_matrix'], {}), '(confusion_matrix)\n', (12797, 12815), True, 'import numpy as np\n'), ((1312, 1335), 're.split', 're.split', (['"""(\\\\W+)"""', 'tok'], {}), "('(\\\\W+)', tok)\n", (1320, 1335), False, 'import re\n'), ((1404, 1429), 're.split', 're.split', (['"""(\\\\W+)"""', 'value'], {}), "('(\\\\W+)', value)\n", (1412, 1429), False, 'import re\n'), ((1173, 1197), 're.split', 're.split', (['"""(\\\\W+)"""', 'text'], {}), "('(\\\\W+)', text)\n", (1181, 1197), False, 'import re\n'), ((11609, 11638), 'numpy.sum', 'np.sum', (['class_correctness[ct]'], {}), '(class_correctness[ct])\n', (11615, 11638), True, 'import numpy as np\n'), ((15587, 15603), 'numpy.mean', 'np.mean', (['tot_cor'], {}), '(tot_cor)\n', (15594, 15603), True, 'import numpy as np\n'), ((15625, 15647), 'numpy.mean', 'np.mean', (['joint_val_cor'], {}), '(joint_val_cor)\n', (15632, 15647), True, 'import numpy as np\n'), ((15669, 15689), 'numpy.mean', 'np.mean', (['cls_cor[-1]'], {}), '(cls_cor[-1])\n', (15676, 15689), True, 'import numpy as np\n'), ((15711, 15727), 'numpy.mean', 'np.mean', (['pos_cor'], {}), '(pos_cor)\n', (15718, 15727), True, 'import numpy as np\n'), ((15749, 15765), 'numpy.mean', 'np.mean', (['ref_cor'], {}), '(ref_cor)\n', (15756, 15765), True, 'import numpy as np\n'), ((17201, 17220), 'numpy.sum', 'np.sum', (['cls_acc[ct]'], {}), '(cls_acc[ct])\n', (17207, 17220), True, 'import numpy as np\n'), ((12335, 12371), 'numpy.sum', 'np.sum', (['confusion_matrix[cl_a][cl_b]'], {}), '(confusion_matrix[cl_a][cl_b])\n', (12341, 12371), True, 'import numpy as np\n'), ((17848, 17876), 'numpy.sum', 'np.sum', (['cls_conf[cl_a][cl_b]'], {}), '(cls_conf[cl_a][cl_b])\n', (17854, 17876), True, 'import numpy as np\n')] |
import time
import numpy as np
def force_feedback(dev_obj, env):
""" give user some degree of force feedback in control device if it is available --
currently only designed to work with Thing Ros envs and vr"""
norm = np.linalg.norm
high_force = 10
high_torque = 1
ft = env.latest_ft_raw
f_norm = norm(ft[:3])
t_norm = norm(ft[3:])
if f_norm < high_force: f_norm = 0
if t_norm < high_torque: t_norm = 0
if f_norm == 0 and t_norm == 0:
dev_obj.force_feedback_dur = 0
return
f_scaled = f_norm / high_force
t_scaled = t_norm / high_torque
f_nonlin = min(np.exp(f_scaled - 4), 1.0)
t_nonlin = min(np.exp(t_scaled - 4), 1.0)
# f_feedback = min(max_force, f_norm) / max_force
# t_feedback = min(max_torque, t_norm) / max_torque
# dom_feedback = max(f_feedback, t_feedback)
dom_feedback = max(f_nonlin, t_nonlin)
feedback_dur = int(dom_feedback * 3999)
dev_obj.force_feedback_dur = feedback_dur
# user controlled booleans for recording state
class Button():
def __init__(self, hold_time_length):
self.state = False
self.last_state = False
self.hold_state = False
self.hold_time_start = time.time()
self.last_hold_state = False
self.hold_time_length = hold_time_length
self.stored_state = dict(re=False, fe=False, rhe=False, fhe=False)
def get_update(self, raw_state, cur_time):
"""
Update the button state and hold state and return the rising and falling edges.
:param raw_state: The raw state of the button from its source.
:return: Whether there is a rising edge, falling edge, rising edge of being held, and falling edge of
being held.
"""
self.last_hold_state = self.hold_state
self.last_state = self.state
self.state = raw_state
if self.state and not self.last_state:
self.hold_time_start = cur_time
rising_edge = True
else:
rising_edge = False
if not self.state and self.last_state:
falling_edge = True
else:
falling_edge = False
# hold state stuff
if cur_time - self.hold_time_start > self.hold_time_length and self.state:
self.hold_state = True
else:
self.hold_state = False
if self.hold_state and not self.last_hold_state:
hold_rising_edge = True
else:
hold_rising_edge = False
if not self.hold_state and self.last_hold_state:
hold_falling_edge = True
else:
hold_falling_edge = False
return rising_edge, falling_edge, hold_rising_edge, hold_falling_edge
def get_and_store_update(self, raw_state, cur_time):
""" Only allows changing False to True, stores between calls to reset_state. """
re, fe, hre, hfe = self.get_update(raw_state, cur_time)
self.stored_state['re'] = re or self.stored_state['re']
self.stored_state['fe'] = fe or self.stored_state['fe']
self.stored_state['rhe'] = hre or self.stored_state['rhe']
self.stored_state['fhe'] = hfe or self.stored_state['fhe']
def reset_state(self):
for k in self.stored_state:
self.stored_state[k] = False
class CollectDevice:
BUTTONS = ['start_save_cancel', 'delete', 'reset_save', 'success_fb_fail']
BUTTONS_TABLE = dict(
keyboard=['enter', 'backspace', 'r_shift'],
gamepad=['B', 'Y', 'X'],
vr=['trackpad_right_click', 'trackpad_up_click', 'trackpad_left_click', 'trackpad_down_click']
)
def __init__(self, device, valid_t_dof=(1, 1, 1), valid_r_dof=(1, 1, 1), output_grip=True,
action_multiplier=1.0, des_forward_axis=(0, 0, 1), des_up_axis=(0, -1, 0)):
self.dev_type = device
self.dev = self.initialize_device(device, des_forward_axis, des_up_axis)
self.recording = False
self.buttons = dict()
for b in CollectDevice.BUTTONS:
self.buttons[b] = Button(hold_time_length=2)
self.valid_t_dof = np.array(valid_t_dof)
self.valid_r_dof = np.array(valid_r_dof)
self.output_grip = output_grip
self.action_multiplier = action_multiplier
def initialize_device(self, device, des_forward_axis, des_up_axis):
if device == 'keyboard':
from manipulator_learning.learning.imitation.devices.keyboard_control import KeyboardSteer
dev = KeyboardSteer()
elif device == 'gamepad':
from manipulator_learning.learning.imitation.devices.gamepad_control import GamepadSteer
dev = GamepadSteer()
elif device == 'vr':
from manipulator_learning.learning.imitation.devices.vr_control import VRSteer
dev = VRSteer(des_forward_axis=des_forward_axis, des_up_axis=des_up_axis)
return dev
def update_and_get_state(self):
cur_time = time.time()
cancel, save, start, reset, delete, success_fb_suc, success_fb_fail = False, False, False, False, False, False, False
self.dev.process_events()
if self.dev_type == 'vr':
button_edges_dict = self.dev.get_latest_button_state()
for but, actual in zip(CollectDevice.BUTTONS, CollectDevice.BUTTONS_TABLE[self.dev_type]):
if self.dev_type == 'vr':
re = button_edges_dict[actual]['re']
fe = button_edges_dict[actual]['fe']
rhe = button_edges_dict[actual]['rhe']
fhe = button_edges_dict[actual]['fhe']
else:
re, fe, rhe, fhe = self.buttons[but].get_update(self.dev.btn_state[actual], cur_time)
if but == 'start_save_cancel':
if fhe:
if self.recording:
if self.dev_type == 'vr':
self.dev.trigger_haptic()
cancel = True
self.recording = False
elif fe:
if not self.recording:
if self.dev_type == 'vr':
self.dev.trigger_haptic()
start = True
self.recording = True
print("-----------------")
print("RECORDING START!!")
elif but == 'reset_save':
if fe:
if self.dev_type == 'vr':
self.dev.trigger_haptic()
reset = True
if self.recording:
save = True
self.recording = False
elif but == 'delete':
if fhe:
if self.dev_type == 'vr':
self.dev.trigger_haptic()
delete = True
elif fe:
if self.dev_type == 'vr':
self.dev.trigger_haptic()
success_fb_suc = True
elif but == 'success_fb_fail':
if fe:
if self.dev_type == 'vr':
self.dev.trigger_haptic()
success_fb_fail = True
return cancel, save, start, reset, delete, success_fb_suc, success_fb_fail
def get_ee_vel_action(self, ee_pose=None, base_pose=None, vr_p_mult=10.0, grip_mag=.05):
""" poses used for vr actions, given as 7-dim arrays with 3 for pos and 4 for xyzw quat.
grip_mag should be set to be approximately the same as the mean action from the other
dimenstions, since the grip action will be the same regardless. """
if self.dev_type == 'gamepad':
trans_vel = self.action_multiplier * np.array([-self.dev.normalized_btn_state['LY'],
-self.dev.normalized_btn_state['LX'],
self.dev.normalized_btn_state['LT'] - self.dev.normalized_btn_state['RT']])
rot_vel = self.action_multiplier * np.array([self.dev.normalized_btn_state['RX'],
-self.dev.normalized_btn_state['RY'],
self.dev.btn_state['LB'] - self.dev.btn_state['RB']])
# grip = self.dev.btn_state['A']
grip = self.dev.btn_state['RT']
elif self.dev_type == 'keyboard':
trans_vel = self.action_multiplier * np.array([self.dev.btn_state['d'] - self.dev.btn_state['a'],
self.dev.btn_state['w'] - self.dev.btn_state['s'],
self.dev.btn_state['e'] - self.dev.btn_state['q']])
rot_vel = self.action_multiplier * np.array([self.dev.btn_state['u'] - self.dev.btn_state['j'],
self.dev.btn_state['i'] - self.dev.btn_state['k'],
self.dev.btn_state['o'] - self.dev.btn_state['l']])
grip = self.dev.btn_state['space']
elif self.dev_type == 'vr':
# since vr needs to output a position, output a position, and use a simple p(id) controller
# to output a velocity to match the position
if base_pose is not None:
trans_vel, rot_vel, grip = self.dev.move_robot(ee_pose[:3], ee_pose[3:],
base_pose[:3], base_pose[3:], output_vel=True, output_vel_p=vr_p_mult)
else:
trans_vel, rot_vel, grip = self.dev.move_robot(ee_pose[:3], ee_pose[3:], output_vel=True,
output_vel_p=vr_p_mult)
trans_vel = trans_vel[self.valid_t_dof.nonzero()[0]]
rot_vel = rot_vel[self.valid_r_dof.nonzero()[0]]
return_act = np.concatenate((trans_vel, rot_vel))
if self.output_grip:
if grip:
grip = grip_mag
else:
grip = -grip_mag
return_act = np.concatenate((return_act, np.array((grip,))))
# return_act = (return_act, int(grip))
return return_act
def force_feedback(self, env):
""" give user some degree of force feedback in control device if it is available --
currently only designed to work with Thing Ros envs and vr"""
force_feedback(self.dev, env)
def get_ee_pos_action(self, cur_pose, cur_base_pose):
raise NotImplementedError()
| [
"manipulator_learning.learning.imitation.devices.gamepad_control.GamepadSteer",
"manipulator_learning.learning.imitation.devices.vr_control.VRSteer",
"numpy.exp",
"numpy.array",
"numpy.concatenate",
"time.time",
"manipulator_learning.learning.imitation.devices.keyboard_control.KeyboardSteer"
] | [((626, 646), 'numpy.exp', 'np.exp', (['(f_scaled - 4)'], {}), '(f_scaled - 4)\n', (632, 646), True, 'import numpy as np\n'), ((672, 692), 'numpy.exp', 'np.exp', (['(t_scaled - 4)'], {}), '(t_scaled - 4)\n', (678, 692), True, 'import numpy as np\n'), ((1221, 1232), 'time.time', 'time.time', ([], {}), '()\n', (1230, 1232), False, 'import time\n'), ((4125, 4146), 'numpy.array', 'np.array', (['valid_t_dof'], {}), '(valid_t_dof)\n', (4133, 4146), True, 'import numpy as np\n'), ((4174, 4195), 'numpy.array', 'np.array', (['valid_r_dof'], {}), '(valid_r_dof)\n', (4182, 4195), True, 'import numpy as np\n'), ((4978, 4989), 'time.time', 'time.time', ([], {}), '()\n', (4987, 4989), False, 'import time\n'), ((9776, 9812), 'numpy.concatenate', 'np.concatenate', (['(trans_vel, rot_vel)'], {}), '((trans_vel, rot_vel))\n', (9790, 9812), True, 'import numpy as np\n'), ((4513, 4528), 'manipulator_learning.learning.imitation.devices.keyboard_control.KeyboardSteer', 'KeyboardSteer', ([], {}), '()\n', (4526, 4528), False, 'from manipulator_learning.learning.imitation.devices.keyboard_control import KeyboardSteer\n'), ((4682, 4696), 'manipulator_learning.learning.imitation.devices.gamepad_control.GamepadSteer', 'GamepadSteer', ([], {}), '()\n', (4694, 4696), False, 'from manipulator_learning.learning.imitation.devices.gamepad_control import GamepadSteer\n'), ((7785, 7956), 'numpy.array', 'np.array', (["[-self.dev.normalized_btn_state['LY'], -self.dev.normalized_btn_state['LX'],\n self.dev.normalized_btn_state['LT'] - self.dev.normalized_btn_state['RT']]"], {}), "([-self.dev.normalized_btn_state['LY'], -self.dev.\n normalized_btn_state['LX'], self.dev.normalized_btn_state['LT'] - self.\n dev.normalized_btn_state['RT']])\n", (7793, 7956), True, 'import numpy as np\n'), ((8044, 8192), 'numpy.array', 'np.array', (["[self.dev.normalized_btn_state['RX'], -self.dev.normalized_btn_state['RY'],\n self.dev.btn_state['LB'] - self.dev.btn_state['RB']]"], {}), "([self.dev.normalized_btn_state['RX'], -self.dev.\n normalized_btn_state['RY'], self.dev.btn_state['LB'] - self.dev.\n btn_state['RB']])\n", (8052, 8192), True, 'import numpy as np\n'), ((4835, 4902), 'manipulator_learning.learning.imitation.devices.vr_control.VRSteer', 'VRSteer', ([], {'des_forward_axis': 'des_forward_axis', 'des_up_axis': 'des_up_axis'}), '(des_forward_axis=des_forward_axis, des_up_axis=des_up_axis)\n', (4842, 4902), False, 'from manipulator_learning.learning.imitation.devices.vr_control import VRSteer\n'), ((8427, 8599), 'numpy.array', 'np.array', (["[self.dev.btn_state['d'] - self.dev.btn_state['a'], self.dev.btn_state['w'] -\n self.dev.btn_state['s'], self.dev.btn_state['e'] - self.dev.btn_state['q']]"], {}), "([self.dev.btn_state['d'] - self.dev.btn_state['a'], self.dev.\n btn_state['w'] - self.dev.btn_state['s'], self.dev.btn_state['e'] -\n self.dev.btn_state['q']])\n", (8435, 8599), True, 'import numpy as np\n'), ((8706, 8878), 'numpy.array', 'np.array', (["[self.dev.btn_state['u'] - self.dev.btn_state['j'], self.dev.btn_state['i'] -\n self.dev.btn_state['k'], self.dev.btn_state['o'] - self.dev.btn_state['l']]"], {}), "([self.dev.btn_state['u'] - self.dev.btn_state['j'], self.dev.\n btn_state['i'] - self.dev.btn_state['k'], self.dev.btn_state['o'] -\n self.dev.btn_state['l']])\n", (8714, 8878), True, 'import numpy as np\n'), ((9999, 10016), 'numpy.array', 'np.array', (['(grip,)'], {}), '((grip,))\n', (10007, 10016), True, 'import numpy as np\n')] |
import numpy as np
from auto_editor.audiotsm2.base import AnalysisSynthesisTSM
from auto_editor.audiotsm2.utils.windows import hanning
class WSOLAConverter():
"""
A Converter implementing the WSOLA (Waveform Similarity-based Overlap-Add)
time-scale modification procedure.
"""
def __init__(self, channels, frame_length, synthesis_hop, tolerance):
self._channels = channels
self._frame_length = frame_length
self._synthesis_hop = synthesis_hop
self._tolerance = tolerance
self._synthesis_frame = np.empty((channels, frame_length))
self._natural_progression = np.empty((channels, frame_length))
self._first = True
def clear(self):
self._first = True
def convert_frame(self, analysis_frame):
for k in range(0, self._channels):
if self._first:
delta = 0
else:
cross_correlation = np.correlate(
analysis_frame[k, :-self._synthesis_hop],
self._natural_progression[k])
delta = np.argmax(cross_correlation)
del cross_correlation
# Copy the shifted analysis frame to the synthesis frame buffer
np.copyto(self._synthesis_frame[k],
analysis_frame[k, delta:delta + self._frame_length])
# Save the natural progression (what the next synthesis frame would
# be at normal speed)
delta += self._synthesis_hop
np.copyto(self._natural_progression[k],
analysis_frame[k, delta:delta + self._frame_length])
self._first = False
return self._synthesis_frame
def wsola(channels, speed=1., frame_length=1024, analysis_hop=None, synthesis_hop=None,
tolerance=None):
if synthesis_hop is None:
synthesis_hop = frame_length // 2
if analysis_hop is None:
analysis_hop = int(synthesis_hop * speed)
if tolerance is None:
tolerance = frame_length // 2
analysis_window = None
synthesis_window = hanning(frame_length)
converter = WSOLAConverter(channels, frame_length, synthesis_hop, tolerance)
return AnalysisSynthesisTSM(
converter, channels, frame_length, analysis_hop, synthesis_hop,
analysis_window, synthesis_window, tolerance,
tolerance + synthesis_hop)
| [
"numpy.copyto",
"auto_editor.audiotsm2.base.AnalysisSynthesisTSM",
"numpy.argmax",
"auto_editor.audiotsm2.utils.windows.hanning",
"numpy.correlate",
"numpy.empty"
] | [((2084, 2105), 'auto_editor.audiotsm2.utils.windows.hanning', 'hanning', (['frame_length'], {}), '(frame_length)\n', (2091, 2105), False, 'from auto_editor.audiotsm2.utils.windows import hanning\n'), ((2200, 2365), 'auto_editor.audiotsm2.base.AnalysisSynthesisTSM', 'AnalysisSynthesisTSM', (['converter', 'channels', 'frame_length', 'analysis_hop', 'synthesis_hop', 'analysis_window', 'synthesis_window', 'tolerance', '(tolerance + synthesis_hop)'], {}), '(converter, channels, frame_length, analysis_hop,\n synthesis_hop, analysis_window, synthesis_window, tolerance, tolerance +\n synthesis_hop)\n', (2220, 2365), False, 'from auto_editor.audiotsm2.base import AnalysisSynthesisTSM\n'), ((558, 592), 'numpy.empty', 'np.empty', (['(channels, frame_length)'], {}), '((channels, frame_length))\n', (566, 592), True, 'import numpy as np\n'), ((629, 663), 'numpy.empty', 'np.empty', (['(channels, frame_length)'], {}), '((channels, frame_length))\n', (637, 663), True, 'import numpy as np\n'), ((1243, 1336), 'numpy.copyto', 'np.copyto', (['self._synthesis_frame[k]', 'analysis_frame[k, delta:delta + self._frame_length]'], {}), '(self._synthesis_frame[k], analysis_frame[k, delta:delta + self.\n _frame_length])\n', (1252, 1336), True, 'import numpy as np\n'), ((1522, 1618), 'numpy.copyto', 'np.copyto', (['self._natural_progression[k]', 'analysis_frame[k, delta:delta + self._frame_length]'], {}), '(self._natural_progression[k], analysis_frame[k, delta:delta +\n self._frame_length])\n', (1531, 1618), True, 'import numpy as np\n'), ((937, 1026), 'numpy.correlate', 'np.correlate', (['analysis_frame[k, :-self._synthesis_hop]', 'self._natural_progression[k]'], {}), '(analysis_frame[k, :-self._synthesis_hop], self.\n _natural_progression[k])\n', (949, 1026), True, 'import numpy as np\n'), ((1087, 1115), 'numpy.argmax', 'np.argmax', (['cross_correlation'], {}), '(cross_correlation)\n', (1096, 1115), True, 'import numpy as np\n')] |
##############################################
# Allow this code to be run from the examples
# directory without orbital installed.
from pathlib import Path
import sys
examples_dir = Path(__file__).parent.resolve()
orbital_dir = examples_dir.parent
sys.path.append(str(orbital_dir))
##############################################
from copy import copy
from numpy import cos, degrees, radians, sin, sqrt
from orbital import earth, KeplerianElements
from scipy.constants import kilo
import orbital.utilities as util
try:
from tabulate import tabulate
except ImportError:
print("This example requires the 'tabulate' package, please run:\n"
'$ pip install tabulate')
exit()
"""
A 800 kg spacecraft is orbiting the Earth on an elliptical orbit with a
semi-major axis of 46000 km and an eccentricity of 0.65, an inclination of 35
degrees, a right ascension of the ascending node of 80 degrees and an argument
of the pericentre of 0.
At 11 hours from the last passage at the pericentre the engine is misfired due
to a malfunction. The thrust has a modulus of 600 N. The telemetry onboard says
that the thrust has an out of plane component and an in plane component
directed against the velocity. The out of plane component is 30 % of the total
thrust. The engine is on for 5 minutes.
Assuming the total variation of velocity is instantaneous, compute the
difference between the nominal position and velocity of the spacecraft 4 hours
after the misfire and its actual position and velocity. Then compute the
difference in orbital parameters.
"""
orbit = KeplerianElements(
a=46000 * kilo,
e=0.65,
i=radians(35),
raan=radians(80),
body=earth)
orbit.t += 11 * 60 * 60
print('After 11 h,')
print(orbit)
print('\nOrbital state vector:')
print(orbit.r, orbit.v, '', sep='\n')
thrust_total = 300 # N
mass = 800 # kg
# 30 % of thrust is out of plane
thrust_W = 0.3 * thrust_total
# remaining thrust is directed against the velocity
thrust_in_plane = -sqrt(thrust_total ** 2 - thrust_W ** 2)
# Get in-plane components using flight path angle
thrust_U = thrust_in_plane * sin(orbit.fpa)
thrust_V = thrust_in_plane * cos(orbit.fpa)
thrust_duration = 5 * 60
dv_U = util.impulse_from_finite(thrust_U / mass, duration=thrust_duration)
dv_V = util.impulse_from_finite(thrust_V / mass, duration=thrust_duration)
dv_W = util.impulse_from_finite(thrust_W / mass, duration=thrust_duration)
v_U = dv_U * orbit.U
v_V = dv_V * orbit.V
v_W = dv_W * orbit.W
orbit2 = copy(orbit)
orbit2.v += v_U + v_V + v_W
orbit.t += 4 * 60 * 60
orbit2.t += 4 * 60 * 60
print('After 4 more hours:')
print(tabulate(
[
['a', 'km', orbit.a / kilo, orbit2.a / kilo],
['e', '-', orbit.e, orbit2.e],
['i', 'deg', degrees(orbit.i), degrees(orbit2.i)],
['raan', 'deg', degrees(orbit.raan), degrees(orbit2.raan)],
['arg_pe', 'deg', degrees(orbit.arg_pe), degrees(orbit2.arg_pe)],
['f', 'deg', degrees(orbit.f), degrees(orbit2.f)]
],
headers=['', 'Unit', 'Nominal', 'Actual'],
floatfmt='.1f'))
print('\nNominal state vector:')
print(orbit.r, orbit.v, sep='\n')
print('\nActual state vector:')
print(orbit2.r, orbit2.v, sep='\n')
| [
"numpy.radians",
"numpy.sqrt",
"pathlib.Path",
"orbital.utilities.impulse_from_finite",
"numpy.cos",
"numpy.sin",
"numpy.degrees",
"copy.copy"
] | [((2203, 2270), 'orbital.utilities.impulse_from_finite', 'util.impulse_from_finite', (['(thrust_U / mass)'], {'duration': 'thrust_duration'}), '(thrust_U / mass, duration=thrust_duration)\n', (2227, 2270), True, 'import orbital.utilities as util\n'), ((2278, 2345), 'orbital.utilities.impulse_from_finite', 'util.impulse_from_finite', (['(thrust_V / mass)'], {'duration': 'thrust_duration'}), '(thrust_V / mass, duration=thrust_duration)\n', (2302, 2345), True, 'import orbital.utilities as util\n'), ((2353, 2420), 'orbital.utilities.impulse_from_finite', 'util.impulse_from_finite', (['(thrust_W / mass)'], {'duration': 'thrust_duration'}), '(thrust_W / mass, duration=thrust_duration)\n', (2377, 2420), True, 'import orbital.utilities as util\n'), ((2495, 2506), 'copy.copy', 'copy', (['orbit'], {}), '(orbit)\n', (2499, 2506), False, 'from copy import copy\n'), ((1991, 2030), 'numpy.sqrt', 'sqrt', (['(thrust_total ** 2 - thrust_W ** 2)'], {}), '(thrust_total ** 2 - thrust_W ** 2)\n', (1995, 2030), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((2111, 2125), 'numpy.sin', 'sin', (['orbit.fpa'], {}), '(orbit.fpa)\n', (2114, 2125), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((2155, 2169), 'numpy.cos', 'cos', (['orbit.fpa'], {}), '(orbit.fpa)\n', (2158, 2169), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((1632, 1643), 'numpy.radians', 'radians', (['(35)'], {}), '(35)\n', (1639, 1643), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((1654, 1665), 'numpy.radians', 'radians', (['(80)'], {}), '(80)\n', (1661, 1665), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((184, 198), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (188, 198), False, 'from pathlib import Path\n'), ((2750, 2766), 'numpy.degrees', 'degrees', (['orbit.i'], {}), '(orbit.i)\n', (2757, 2766), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((2768, 2785), 'numpy.degrees', 'degrees', (['orbit2.i'], {}), '(orbit2.i)\n', (2775, 2785), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((2812, 2831), 'numpy.degrees', 'degrees', (['orbit.raan'], {}), '(orbit.raan)\n', (2819, 2831), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((2833, 2853), 'numpy.degrees', 'degrees', (['orbit2.raan'], {}), '(orbit2.raan)\n', (2840, 2853), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((2882, 2903), 'numpy.degrees', 'degrees', (['orbit.arg_pe'], {}), '(orbit.arg_pe)\n', (2889, 2903), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((2905, 2927), 'numpy.degrees', 'degrees', (['orbit2.arg_pe'], {}), '(orbit2.arg_pe)\n', (2912, 2927), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((2951, 2967), 'numpy.degrees', 'degrees', (['orbit.f'], {}), '(orbit.f)\n', (2958, 2967), False, 'from numpy import cos, degrees, radians, sin, sqrt\n'), ((2969, 2986), 'numpy.degrees', 'degrees', (['orbit2.f'], {}), '(orbit2.f)\n', (2976, 2986), False, 'from numpy import cos, degrees, radians, sin, sqrt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' Resistive-capacitive model implementation.
The class have methods for setting potential of streamer heads,
and for relaxing the potential each iteration.
The class have methods to get RC-factors for
resistance in channel, capacitance towards the plane,
breakdown in channel, conduction due to dissociation.
'''
# General imports
import numpy as np
import logging
from scipy.special import iv as bessel_iv # bessel function
# Import from project files
from ..core import coordinate_functions
from .streamer_head import SHList
from .streamer_head import StreamerHead
# settings
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
eps = np.finfo(float).eps # 2.22e-16 for double
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# RC #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class RC(object):
def __init__(self,
origin, # usually the needle
tau0, # tau = RCtau0
U_grad, # minimum E-field within channel
resistance, # how to model channel resistance
capacitance, # how to model capacitance
breakdown, # threshold for breakdown in channel
breakdown_factor, # tau *= bdf, when there is a breakdown
onsager, # if true, enable Onsager model
potential_merged, # potential model to use
potential_branched, # potential model to use
):
self.origin = origin
self.U_grad = U_grad
self.tau0 = tau0
self.resistance = resistance
self.capacitance = capacitance
self.breakdown_threshold = breakdown
self.breakdown_factor = breakdown_factor
self.onsager = onsager
self.potential_merged = potential_merged
self.potential_branched = potential_branched
logger.debug('Initiated RC')
logger.log(5, 'RC.__dict__')
for k, v in self.__dict__.items():
logger.log(5, ' "{}": {}'.format(k, v))
@staticmethod
def _cap_factor_constant(heads):
# constant capacitance -- no dependence on anything
return np.ones_like(heads.d)
@staticmethod
def _cap_factor_plane(heads):
# model each streamer heads as a parallel plate capacitor
# scale by gap length
return (1 / heads.d)
@staticmethod
def _cap_factor_hyperbole(heads, origin):
# model each streamer heads as a hyperboloid capacitor
den = 4 * heads.a / heads.rp
return 1 / np.log(den)
@staticmethod
def _cap_factor_sphere(heads, origin):
# model capacitance as an expanding sphere, see Crowley 2008
d = origin.d
rp = origin.rp
z = heads.d
r = (d + 2 * rp - z) / 2 # sphere radius
return r * (1 + 0.5 * np.log(1 + r / z))
@staticmethod
def _cap_factor_half_sphere(heads, origin):
# model capacitance as an expanding half-sphere, see Crowley 2008
d = origin.d
rp = origin.rp
z = heads.d
r = (d + rp - z) # half sphere radius
# note: the half sphere is about twice the size of the sphere
return r * (1 + 0.5 * np.log(1 + r / z))
def _get_cap_factor(self, heads, origin, cdm):
# return unscaled capacitance of each head, given origin and model
# choose model for capacitance towards plane
if (cdm == 'constant') or (cdm == '1') or (cdm == 1):
return self._cap_factor_constant(heads)
elif cdm == 'plane':
return self._cap_factor_plane(heads)
elif cdm == 'hyperbole':
return self._cap_factor_hyperbole(heads, origin)
elif cdm == 'sphere':
return self._cap_factor_sphere(heads, origin)
elif cdm == 'half_sphere':
return self._cap_factor_half_sphere(heads, origin)
else:
msg = 'Error! Unknown capacitance model: {}'
logger.error(msg.format(cdm))
raise SystemExit
def get_cap_factor(self, heads, origin=None, cdm=None):
# return capacitance of the heads, scaled by the needle capacitance
if origin is None:
origin = self.origin # the needle
if cdm is None: # capacitance dependence model
cdm = self.capacitance
c_origin = self._get_cap_factor(
heads=origin, origin=origin, cdm=cdm)
c_heads = self._get_cap_factor(
heads=heads, origin=origin, cdm=cdm)
return c_heads / c_origin
def get_res_factor(self, heads, origin=None, ldm=None):
# return length/resistance dependence, scaled by the gap distance
if origin is None:
origin = self.origin # the needle
if ldm is None: # length dependence model
ldm = self.resistance
# choose model for resistance in channel
if ldm == 'constant':
# constant resistance -- no dependence on anything
return np.ones_like(heads.d)
elif ldm == 'linear':
# scale resistance with length of channel
length = origin.dist_to(heads.pos)
return length / origin.z
else:
msg = 'Error! Unknown resistance model: {}'
logger.error(msg.format(ldm))
raise SystemExit
def get_breakdown_factor(self, heads, origin=None, bdt=None, bdf=None):
# return low resistance if/where breakdown in channel
if origin is None:
origin = self.origin
if bdt is None: # breakdown threshold value
bdt = self.breakdown_threshold
if bdf is None: # breakdown factor
bdf = self.breakdown_factor
length = origin.dist_to(heads.pos)
estr = (origin.U0 - heads.U0) / (length + eps) # eps for safe needle
bd = np.ones_like(length) # default to factor 1
bd[estr > bdt] = bdf # set to bdf for breakdown
return bd
def get_onsager_faktor(self, heads):
# enhanced conductance from ion dissociation, see Gäfvert 1992
# note: this model was implemented to demonstrate non-linear effects
# it is for a liquid, not for a gas/plasma
# the temperature and the permittivity could be changed later
if not self.onsager:
return np.ones_like(heads.d)
# field in channel
length = self.origin.dist_to(heads.pos)
# eps for safe needle
estr = (self.origin.U0 - heads.U0) / (length + eps)
# standard parameters
T = 293 # K
kb_J = 1.381e-23 # J/K
e0 = 8.85e-12 # F/m, vacuum permittivity
er = 2 * e0
ec = 1.6e-19 # C, elementary charge
# calculate dissociation
estr = estr + eps
_n = ec**3 * estr # nominator
_d = 16 * np.pi * er * T**2 * kb_J**2 # denominator * 2
_b = np.sqrt(_n / _d) # sq(b/2) ==> 2sq(b/2)=sq(2b)
_f = bessel_iv(1, 4 * _b) / (2 * _b) # 4sq(b/2)=sq(8b)
h = 1 / _f # increased conductance implies lower tau-factor here
return h
def relax(self, streamer, needle, dt):
''' Calculate the time constant and
relax the potential of each streamer head.
'''
# get factors for time constant
_ld = self.get_res_factor(streamer.heads)
_cd = self.get_cap_factor(streamer.heads)
_bd = self.get_breakdown_factor(streamer.heads)
_od = self.get_onsager_faktor(streamer.heads)
# combine all the factors
tau = self.tau0
tau *= _ld # channel length dependence
tau *= _cd # capacitance dependence
tau *= _bd # breakdown in channel?
tau *= _od # Onsager dissociation
tau = np.minimum(tau, 1 / eps) # ensure tau < inf
tau = np.maximum(tau, eps) # ensure tau > 0
# final potential
Uf = self.get_final_potential(streamer.heads)
# potentials differences
diff_prev = Uf - streamer.heads.U0
diff_new = diff_prev * np.exp(- dt / tau)
diff_diff = diff_prev - diff_new
if diff_diff.max() > 100:
msg = 'Relaxed potential, max {:.1f} kV'
logger.log(5, msg.format(diff_diff.max() * 1e-3))
# set relaxed potentials
streamer.heads.U0 = Uf - diff_new
def _set_potential(self, streamer, heads, model):
''' Modify the potential of the heads,
and possibly the streamer,
depending on the chosen model.
'''
if isinstance(heads, (StreamerHead,)):
heads = [heads]
heads = SHList(heads) # ensure streamer head list
if model == 'zero': # set all potentials to 0
heads.U0 = 0
elif model == 'previous': # use potential at current position
U0 = streamer.heads.epot(heads.pos)
heads.U0 = U0
elif model == 'propagate': # propagate charge
self.propagate_charge(streamer, heads)
elif model == 'share_charge': # share charge
self.share_charge(streamer, heads)
elif model == 'final': # relax fully
U0 = self.get_final_potential(heads)
heads.U0 = U0
else:
msg = 'Error! Unknown potential model! ({})'
logger.error(msg.format(model))
raise SystemExit
def set_potential_merged(self, streamer, heads):
# apply the correct model to set potential for merged heads
self._set_potential(streamer, heads, model=self.potential_merged)
def set_potential_branched(self, streamer, heads):
# apply the correct model to set potential for branched heads
self._set_potential(streamer, heads, model=self.potential_branched)
def propagate_charge(self, streamer, heads):
''' Set potential of the heads by propagate charge
from the nearest existing head.
'''
for head in heads:
# find nearest head
(nn_idx, nn_dst) = head.find_nearest(streamer.heads.pos)
nn_head = streamer.heads[int(nn_idx)]
# get the relative capacitance, which is ok
c_nn = self.get_cap_factor(SHList([nn_head]))
c_h = self.get_cap_factor(SHList([head]))
# u' = q' / c' = u * c / c'
c_frac = c_nn / c_h
# ensure that the potential does not increase
head.U0 = nn_head.U0 * min(1, c_frac)
msg = 'Propagating head set to {:.1f} kV'
logger.log(5, msg.format(head.U0 * 1e-3))
if c_frac > 1:
msg = 'Propagating potential capped.'
logger.log(5, msg)
def share_charge(self, streamer, heads):
''' Set potential of each given head and the closest existing head,
by sharing charge between them.
'''
# Note: this routine may change the voltage of the needle!
for head in heads:
# find (the) nearest head(s)
(nn_idx, nn_dst) = head.find_nearest(streamer.heads.pos)
nn_head = streamer.heads[int(nn_idx)]
shl = SHList([nn_head]) # should work for several neighbors also
# shl = SHList(streamer.heads) # to test with all heads
# find total charge
k = shl.calc_scale_nnls()
c = self.get_cap_factor(shl)
u = shl.U0
q_tot = sum(ki * ci * ui for ki, ci, ui in zip(k, c, u))
# append the new head and find scale
shl.append(head)
shl.k = [1 for _ in shl]
shl.U0 = [1 for _ in shl]
k = shl.calc_scale_nnls()
# calculate the new individual potentials
c = self.get_cap_factor(shl)
ci_ki = sum(ki * ci for ki, ci in zip(k, c))
v = [ki * q_tot / ci_ki for ki in k]
shl.U0 = v
# set the new (shared) potential
epot = shl.epot(shl.pos)
if epot.max() > max(u):
epot[:] = max(u)
msg = 'Warning! Charge sharing increasing potential prevented.'
logger.warning(msg)
if not np.allclose(epot[0], epot):
logger.warning('Warning! Charge sharing issue!')
diff = epot.max() / epot.min() - 1
msg = 'Maximum relative difference, {:2.0f} %'
logger.debug(msg.format(diff * 100))
logger.log(5, 'Potentials {}'.format(epot))
shl.U0 = epot # note: they should all be equal
msg = 'Branching heads set to {:.1f} kV'
logger.log(5, msg.format(epot[0] * 1e-3))
def get_final_potential(self, heads):
# final potential (needle minus field in channel)
length = self.origin.dist_to(heads.pos)
length = length + eps # prevents errors for needle
return self.origin.U0 - length * self.U_grad
#
| [
"logging.getLogger",
"numpy.ones_like",
"logging.NullHandler",
"numpy.allclose",
"numpy.sqrt",
"numpy.minimum",
"numpy.log",
"numpy.exp",
"scipy.special.iv",
"numpy.finfo",
"numpy.maximum"
] | [((663, 690), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (680, 690), False, 'import logging\n'), ((709, 730), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (728, 730), False, 'import logging\n'), ((739, 754), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (747, 754), True, 'import numpy as np\n'), ((2381, 2402), 'numpy.ones_like', 'np.ones_like', (['heads.d'], {}), '(heads.d)\n', (2393, 2402), True, 'import numpy as np\n'), ((6064, 6084), 'numpy.ones_like', 'np.ones_like', (['length'], {}), '(length)\n', (6076, 6084), True, 'import numpy as np\n'), ((7173, 7189), 'numpy.sqrt', 'np.sqrt', (['(_n / _d)'], {}), '(_n / _d)\n', (7180, 7189), True, 'import numpy as np\n'), ((8072, 8096), 'numpy.minimum', 'np.minimum', (['tau', '(1 / eps)'], {}), '(tau, 1 / eps)\n', (8082, 8096), True, 'import numpy as np\n'), ((8135, 8155), 'numpy.maximum', 'np.maximum', (['tau', 'eps'], {}), '(tau, eps)\n', (8145, 8155), True, 'import numpy as np\n'), ((2765, 2776), 'numpy.log', 'np.log', (['den'], {}), '(den)\n', (2771, 2776), True, 'import numpy as np\n'), ((5218, 5239), 'numpy.ones_like', 'np.ones_like', (['heads.d'], {}), '(heads.d)\n', (5230, 5239), True, 'import numpy as np\n'), ((6563, 6584), 'numpy.ones_like', 'np.ones_like', (['heads.d'], {}), '(heads.d)\n', (6575, 6584), True, 'import numpy as np\n'), ((7251, 7271), 'scipy.special.iv', 'bessel_iv', (['(1)', '(4 * _b)'], {}), '(1, 4 * _b)\n', (7260, 7271), True, 'from scipy.special import iv as bessel_iv\n'), ((8370, 8387), 'numpy.exp', 'np.exp', (['(-dt / tau)'], {}), '(-dt / tau)\n', (8376, 8387), True, 'import numpy as np\n'), ((12499, 12525), 'numpy.allclose', 'np.allclose', (['epot[0]', 'epot'], {}), '(epot[0], epot)\n', (12510, 12525), True, 'import numpy as np\n'), ((3052, 3069), 'numpy.log', 'np.log', (['(1 + r / z)'], {}), '(1 + r / z)\n', (3058, 3069), True, 'import numpy as np\n'), ((3431, 3448), 'numpy.log', 'np.log', (['(1 + r / z)'], {}), '(1 + r / z)\n', (3437, 3448), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from scipy.signal import decimate
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
data = np.load('data/mfcc-heart.npz', allow_pickle=True) # load audio data
x_data, y_data = data['out_x'], data['out_y'] # load into np arrays
seed = 1000
# split data into Train, Validation and Test
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, train_size=0.8, random_state=seed, shuffle=True)
wake_word_index = 3 # for murmur
y_train = np.equal(y_train, wake_word_index).astype('float64')
y_test = np.equal(y_test, wake_word_index).astype('float64')
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(52, (2, 2), activation='relu', input_shape=x_train.shape[1:]),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(52, (2, 2), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (2, 2), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.summary()
model.compile(loss=tf.keras.losses.BinaryCrossentropy(), optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
metrics=['acc'])
history = model.fit(x_train, y_train, epochs=100, batch_size=20, validation_data=(x_test, y_test))
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
fig, axs = plt.subplots(2, 1)
# plot loss
axs[0].plot(epochs, loss, 'bo', label='Training loss')
axs[0].plot(epochs, val_loss, 'b', label='Validation loss')
axs[0].set_xlabel('Epoch')
axs[0].set_ylabel('Loss')
axs[0].grid(True)
# plot accuracy
axs[1].plot(epochs, acc, 'bo', label='Training acc')
axs[1].plot(epochs, val_acc, 'b', label='Validation acc')
axs[1].set_xlabel('Epoch')
axs[1].set_ylabel('Accuracy')
axs[1].grid(True)
plt.show() | [
"tensorflow.keras.layers.Conv2D",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.keras.layers.Dropout",
"numpy.equal",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dense",
"tensorflow.keras... | [((215, 264), 'numpy.load', 'np.load', (['"""data/mfcc-heart.npz"""'], {'allow_pickle': '(True)'}), "('data/mfcc-heart.npz', allow_pickle=True)\n", (222, 264), True, 'import numpy as np\n'), ((446, 532), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_data', 'y_data'], {'train_size': '(0.8)', 'random_state': 'seed', 'shuffle': '(True)'}), '(x_data, y_data, train_size=0.8, random_state=seed, shuffle\n =True)\n', (462, 532), False, 'from sklearn.model_selection import train_test_split\n'), ((1820, 1838), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (1832, 1838), True, 'import matplotlib.pyplot as plt\n'), ((2239, 2249), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2247, 2249), True, 'import matplotlib.pyplot as plt\n'), ((573, 607), 'numpy.equal', 'np.equal', (['y_train', 'wake_word_index'], {}), '(y_train, wake_word_index)\n', (581, 607), True, 'import numpy as np\n'), ((635, 668), 'numpy.equal', 'np.equal', (['y_test', 'wake_word_index'], {}), '(y_test, wake_word_index)\n', (643, 668), True, 'import numpy as np\n'), ((884, 973), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(52)', '(2, 2)'], {'activation': '"""relu"""', 'input_shape': 'x_train.shape[1:]'}), "(52, (2, 2), activation='relu', input_shape=x_train.\n shape[1:])\n", (906, 973), True, 'import tensorflow as tf\n'), ((974, 1008), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1002, 1008), True, 'import tensorflow as tf\n'), ((1014, 1067), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(52)', '(2, 2)'], {'activation': '"""relu"""'}), "(52, (2, 2), activation='relu')\n", (1036, 1067), True, 'import tensorflow as tf\n'), ((1073, 1107), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1101, 1107), True, 'import tensorflow as tf\n'), ((1113, 1166), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(2, 2)'], {'activation': '"""relu"""'}), "(32, (2, 2), activation='relu')\n", (1135, 1166), True, 'import tensorflow as tf\n'), ((1172, 1206), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1200, 1206), True, 'import tensorflow as tf\n'), ((1213, 1238), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (1236, 1238), True, 'import tensorflow as tf\n'), ((1244, 1288), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (1265, 1288), True, 'import tensorflow as tf\n'), ((1294, 1322), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (1317, 1322), True, 'import tensorflow as tf\n'), ((1328, 1374), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1349, 1374), True, 'import tensorflow as tf\n'), ((1414, 1450), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {}), '()\n', (1448, 1450), True, 'import tensorflow as tf\n'), ((1462, 1507), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1486, 1507), True, 'import tensorflow as tf\n')] |
import numpy as np
from numpy import *
x0=np.ones(10)
x1=np.array([64.3,99.6,145.45,63.75,135.46,92.85,86.97,144.76,59.3,116.03])
x2=np.array([2,3,4,2,3,4,2,4,1,3])
y=np.array([62.55,82.42,132.62,73.31,131.05,86.57,85.49,127.44,55.25,104.84])
X=np.stack((x0,x1,x2),axis=1)
Y=y.reshape(10,1)
X=mat(X)
y=mat(Y)
W=(np.linalg.inv(np.transpose(X)*X))*(np.transpose(X))*Y
print("X")
print(X)
print("Y")
print(Y)
print("W")
print(W)
print("W的shape属性结果为:")
arr=W.shape
print(arr) | [
"numpy.array",
"numpy.transpose",
"numpy.ones",
"numpy.stack"
] | [((44, 55), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (51, 55), True, 'import numpy as np\n'), ((60, 146), 'numpy.array', 'np.array', (['[64.3, 99.6, 145.45, 63.75, 135.46, 92.85, 86.97, 144.76, 59.3, 116.03]'], {}), '([64.3, 99.6, 145.45, 63.75, 135.46, 92.85, 86.97, 144.76, 59.3, \n 116.03])\n', (68, 146), True, 'import numpy as np\n'), ((137, 177), 'numpy.array', 'np.array', (['[2, 3, 4, 2, 3, 4, 2, 4, 1, 3]'], {}), '([2, 3, 4, 2, 3, 4, 2, 4, 1, 3])\n', (145, 177), True, 'import numpy as np\n'), ((172, 260), 'numpy.array', 'np.array', (['[62.55, 82.42, 132.62, 73.31, 131.05, 86.57, 85.49, 127.44, 55.25, 104.84]'], {}), '([62.55, 82.42, 132.62, 73.31, 131.05, 86.57, 85.49, 127.44, 55.25,\n 104.84])\n', (180, 260), True, 'import numpy as np\n'), ((251, 281), 'numpy.stack', 'np.stack', (['(x0, x1, x2)'], {'axis': '(1)'}), '((x0, x1, x2), axis=1)\n', (259, 281), True, 'import numpy as np\n'), ((357, 372), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (369, 372), True, 'import numpy as np\n'), ((336, 351), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (348, 351), True, 'import numpy as np\n')] |
from __future__ import absolute_import
def test_1d_acoustics():
"""test_1d_acoustics
tests against known classic, sharpclaw, and high-order weno results """
from . import acoustics_1d
def verify_expected(expected):
""" binds the expected value to the acoustics_verify methods """
def acoustics_verify(claw):
from clawpack.pyclaw.util import check_diff
import numpy as np
# tests are done across the entire domain of q normally
q0 = claw.frames[0].state.get_q_global()
qfinal = claw.frames[claw.num_output_times].state.get_q_global()
# and q_global is only returned on process 0
if q0 is not None and qfinal is not None:
q0 = q0.reshape([-1])
qfinal = qfinal.reshape([-1])
dx = claw.solution.domain.grid.delta[0]
test = dx*np.sum(np.abs(qfinal-q0))
return check_diff(expected, test, abstol=1e-4)
else:
return
return acoustics_verify
from clawpack.pyclaw.util import gen_variants
classic_tests = gen_variants(acoustics_1d.setup, verify_expected(0.001049),
kernel_languages=('Python', 'Fortran'),
solver_type='classic', disable_output=True)
time_step_test = gen_variants(acoustics_1d.setup, verify_expected(0.002020),
kernel_languages=('Python',),
solver_type='classic', disable_output=True,
output_style=(3))
ptwise_tests = gen_variants(acoustics_1d.setup, verify_expected(0.001049),
kernel_languages=('Fortran',), ptwise=True,
solver_type='classic', disable_output=True)
sharp_tests_rk = gen_variants(acoustics_1d.setup, verify_expected(0.000299),
kernel_languages=('Python', 'Fortran'),
solver_type='sharpclaw',
time_integrator='SSP104', disable_output=True)
sharp_tests_lmm = gen_variants(acoustics_1d.setup,
verify_expected(0.000231),
kernel_languages=('Python', 'Fortran'),
solver_type='sharpclaw',
time_integrator='SSPLMMk3',
disable_output=True)
weno_tests = gen_variants(acoustics_1d.setup, verify_expected(0.000153),
kernel_languages=('Fortran',),
solver_type='sharpclaw', time_integrator='SSP104',
weno_order=17, disable_output=True)
from itertools import chain
for test in chain(classic_tests, time_step_test, ptwise_tests,
sharp_tests_rk, sharp_tests_lmm, weno_tests):
yield test
if __name__ == "__main__":
import nose
nose.main()
| [
"itertools.chain",
"numpy.abs",
"nose.main",
"clawpack.pyclaw.util.check_diff"
] | [((2864, 2963), 'itertools.chain', 'chain', (['classic_tests', 'time_step_test', 'ptwise_tests', 'sharp_tests_rk', 'sharp_tests_lmm', 'weno_tests'], {}), '(classic_tests, time_step_test, ptwise_tests, sharp_tests_rk,\n sharp_tests_lmm, weno_tests)\n', (2869, 2963), False, 'from itertools import chain\n'), ((3051, 3062), 'nose.main', 'nose.main', ([], {}), '()\n', (3060, 3062), False, 'import nose\n'), ((958, 999), 'clawpack.pyclaw.util.check_diff', 'check_diff', (['expected', 'test'], {'abstol': '(0.0001)'}), '(expected, test, abstol=0.0001)\n', (968, 999), False, 'from clawpack.pyclaw.util import check_diff\n'), ((916, 935), 'numpy.abs', 'np.abs', (['(qfinal - q0)'], {}), '(qfinal - q0)\n', (922, 935), True, 'import numpy as np\n')] |
import copy,os
import numpy as np
import pandas as pd
from collections import OrderedDict
from pypospack.pyposmat.visualization.parallel_plot_qoi import PyposmatQoiParallelCoordinatesPlot
pypospack_root_dir = [v for v in os.environ['PYTHONPATH'].split(':') if v.endswith('pypospack')][0]
# -----------------------------------------------------------------------------
# DEFINE WHERE TO FIND ALL THE DATA
# -----------------------------------------------------------------------------
datafile_fn = os.path.join(pypospack_root_dir,'data/MgO_pareto_data/qoiplus_005.out')
config_fn = os.path.join(pypospack_root_dir,'examples/MgO__buck__add_additional_qoi/data/pyposmat.config.in')
# -----------------------------------------------------------------------------
# DEFINE WHERE TO PUT ALL THE OUTPUT
# -----------------------------------------------------------------------------
output_directory = "./"
output_plot_fn = os.path.join(
output_directory,
'qoi_parallelplot_MgO_buck.png'
)
if __name__ == "__main__":
o_plot = PyposmatQoiParallelCoordinatesPlot()
o_plot.read_configuration(filename=config_fn)
o_plot.read_datafile(filename=datafile_fn)
o_plot.plot_legend_location = 'best'
o_plot.make_plot(
filename=output_plot_fn,
include_qois=True,
include_qois_v=True,
qoi_excluded_names=None)
exit()
output_plot_fn = os.path.join(output_directory,'rugplot_MgO_buck.eps')
config=PyposmatConfigurationFile()
config.read(filename=config_fn)
# read the associated datafile
if Path(datafile_fn).is_file():
print("[OK] data file:{}:found".format(datafile_fn))
else:
print("[FAIL] data file:{}:not found".format(datafile_fn))
exit()
datafile=PyposmatDataFile()
datafile.read(filename=datafile_fn)
# output d
output_directory = "./"
plot_fn = os.path.join(output_directory,'parallelcoordinates_fs.png')
excluded_qoi_names = []
qoi_names = [q for q in config.qoi_names if q not in excluded_qoi_names]
if excluded_qoi_names is []:
print('no excluded quantities of interest')
print(80*'=')
print('QOI_NAMES')
print(80*'=')
for qn in qoi_names:
print('\t{}'.format(qn))
print('qoi_names is length:{}'.format(len(qoi_names)))
error_names = ["{}.err".format(q) for q in qoi_names]
normed_error_names = ["{}.nerr".format(q) for q in qoi_names]
qoi_targets = config.qoi_targets
# calculate normalized error for sampled data
for iqn,qn in enumerate(qoi_names):
en = "{}.err".format(qn)
nen = "{}.nerr".format(qn)
q = qoi_targets[qn]
datafile.df[nen] = datafile.df[qn]/q-1
(nrows,ncols) = datafile.df.shape
normederr_names = ['{}.nerr'.format(q) for q in qoi_names]
datafile.df['d_metric'] = np.sqrt(np.square(datafile.df[normederr_names]).sum(axis=1))
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from pandas.plotting import parallel_coordinates
fig, ax = plt.subplots()
reference_df = pd.DataFrame(list(ref_data.values()))
for iqn,qn in enumerate(qoi_names):
en = "{}.err".format(qn)
nen = "{}.nerr".format(qn)
q = qoi_targets[qn]
reference_df[nen] = reference_df[qn]/q-1
reference_df['sim_id'] = list(ref_data.keys())
#reference_df.set_index('sim_id')
data_df = copy.deepcopy(datafile.df)
#data_df.set_index('sim_id')
print(data_df[['sim_id'] + normed_error_names].columns)
subselect_df = datafile.df.nsmallest(30,'d_metric')
#subselect_df.set_index('sim_id')
is_plot_all_data = False
if is_plot_all_data:
print("data_df:{}".format(data_df[normed_error_names].shape))
column_names = ['sim_id'] + normed_error_names
parallel_coordinates(
column_names,
base_qoi,
color='grey'
)
subselect_color = 'grey'
is_plot_subselect = True
if is_plot_subselect:
print('subselect_df:{}'.format(
subselect_df[['sim_id'] + normed_error_names].shape)
)
column_names = ['sim_id'] + normed_error_names
parallel_coordinates(
subselect_df[column_names],
base_qoi,
color=subselect_color)
# plot reference data
column_names = ['sim_id'] + normed_error_names
parallel_coordinates(
reference_df[column_names],
base_qoi,
)
plt.gca().legend_.remove()
plt.xticks(rotation=90)
#fig.savefig(plot_fn)
fig.tight_layout()
plt.show()
exit()
(nr,nc)=df.shape
print("We have {} potentials...".format(nr))
for iqn,qn in enumerate(qoi_names):
nen = '{}.nerr'.format(qn)
x = df[nen]
y = nr*[len(qoi_names)-iqn]
ax.scatter(x,y,
marker='|',
#s=10.,
color='grey')
for ref_data_name,ref_data_dict in ref_data.items():
for iqn,qn in enumerate(qoi_names):
q = qoi_targets[qn]
x = ref_data_dict[qn]/q-1
y = len(qoi_names)-iqn
ax.scatter(
x,
y,
s=300,
marker='|',
color=ref_data_colors[ref_data_name]
)
plt.axvline(0,color='k',linestyle='-',linewidth=.1)
ax.set_xlabel('Pct Error Difference')
yticks_labels = [config.latex_labels[qn]['name'] for qn in qoi_names]
print("length of yticks_labels:{}".format(len(yticks_labels)))
plt.sca(ax)
plt.yticks(
list(range(1,len(qoi_names)+1)),
list(reversed(yticks_labels))
)
ax.set_xticks(rotation=90)
plt.show()
#vals = ax.get_xticks()
#ax.set_xticklabels(
# ['{:.1%}'.format(int(x*100)) for x in vals]
| [
"pandas.plotting.parallel_coordinates",
"matplotlib.pyplot.xticks",
"pypospack.pyposmat.visualization.parallel_plot_qoi.PyposmatQoiParallelCoordinatesPlot",
"matplotlib.pyplot.gca",
"os.path.join",
"matplotlib.pyplot.sca",
"numpy.square",
"matplotlib.pyplot.axvline",
"copy.deepcopy",
"matplotlib.p... | [((499, 571), 'os.path.join', 'os.path.join', (['pypospack_root_dir', '"""data/MgO_pareto_data/qoiplus_005.out"""'], {}), "(pypospack_root_dir, 'data/MgO_pareto_data/qoiplus_005.out')\n", (511, 571), False, 'import copy, os\n'), ((583, 685), 'os.path.join', 'os.path.join', (['pypospack_root_dir', '"""examples/MgO__buck__add_additional_qoi/data/pyposmat.config.in"""'], {}), "(pypospack_root_dir,\n 'examples/MgO__buck__add_additional_qoi/data/pyposmat.config.in')\n", (595, 685), False, 'import copy, os\n'), ((920, 983), 'os.path.join', 'os.path.join', (['output_directory', '"""qoi_parallelplot_MgO_buck.png"""'], {}), "(output_directory, 'qoi_parallelplot_MgO_buck.png')\n", (932, 983), False, 'import copy, os\n'), ((1043, 1079), 'pypospack.pyposmat.visualization.parallel_plot_qoi.PyposmatQoiParallelCoordinatesPlot', 'PyposmatQoiParallelCoordinatesPlot', ([], {}), '()\n', (1077, 1079), False, 'from pypospack.pyposmat.visualization.parallel_plot_qoi import PyposmatQoiParallelCoordinatesPlot\n'), ((1411, 1465), 'os.path.join', 'os.path.join', (['output_directory', '"""rugplot_MgO_buck.eps"""'], {}), "(output_directory, 'rugplot_MgO_buck.eps')\n", (1423, 1465), False, 'import copy, os\n'), ((1912, 1972), 'os.path.join', 'os.path.join', (['output_directory', '"""parallelcoordinates_fs.png"""'], {}), "(output_directory, 'parallelcoordinates_fs.png')\n", (1924, 1972), False, 'import copy, os\n'), ((3074, 3088), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3086, 3088), True, 'import matplotlib.pyplot as plt\n'), ((3438, 3464), 'copy.deepcopy', 'copy.deepcopy', (['datafile.df'], {}), '(datafile.df)\n', (3451, 3464), False, 'import copy, os\n'), ((4451, 4509), 'pandas.plotting.parallel_coordinates', 'parallel_coordinates', (['reference_df[column_names]', 'base_qoi'], {}), '(reference_df[column_names], base_qoi)\n', (4471, 4509), False, 'from pandas.plotting import parallel_coordinates\n'), ((4587, 4610), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (4597, 4610), True, 'import matplotlib.pyplot as plt\n'), ((4664, 4674), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4672, 4674), True, 'import matplotlib.pyplot as plt\n'), ((5394, 5449), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""k"""', 'linestyle': '"""-"""', 'linewidth': '(0.1)'}), "(0, color='k', linestyle='-', linewidth=0.1)\n", (5405, 5449), True, 'import matplotlib.pyplot as plt\n'), ((5633, 5644), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (5640, 5644), True, 'import matplotlib.pyplot as plt\n'), ((5793, 5803), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5801, 5803), True, 'import matplotlib.pyplot as plt\n'), ((3845, 3903), 'pandas.plotting.parallel_coordinates', 'parallel_coordinates', (['column_names', 'base_qoi'], {'color': '"""grey"""'}), "(column_names, base_qoi, color='grey')\n", (3865, 3903), False, 'from pandas.plotting import parallel_coordinates\n'), ((4237, 4323), 'pandas.plotting.parallel_coordinates', 'parallel_coordinates', (['subselect_df[column_names]', 'base_qoi'], {'color': 'subselect_color'}), '(subselect_df[column_names], base_qoi, color=\n subselect_color)\n', (4257, 4323), False, 'from pandas.plotting import parallel_coordinates\n'), ((2876, 2915), 'numpy.square', 'np.square', (['datafile.df[normederr_names]'], {}), '(datafile.df[normederr_names])\n', (2885, 2915), True, 'import numpy as np\n'), ((4555, 4564), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4562, 4564), True, 'import matplotlib.pyplot as plt\n')] |
from math import ceil
import numpy as np
from decibel.data_fusion import data_fusion
from decibel.music_objects.chord_alphabet import ChordAlphabet
from decibel.music_objects.chord_vocabulary import ChordVocabulary
from decibel.music_objects.song import Song
from decibel.import_export import filehandler
from decibel.evaluator.evaluator import evaluate
import matplotlib as mpl
import matplotlib.colors
import matplotlib.pyplot as plt
def _get_segmentation(song: Song):
"""
Get the segmentation of the song (only for visualisation purposes)
:param song: Song from which we want the segmentation information
:return: Segmentation information (start time and description) from Isophonics dataset
"""
segmentation_file_path = song.full_segmentation_labs_path
result = []
with open(segmentation_file_path, 'r') as read_file:
input_lines = read_file.readlines()
for input_line in input_lines:
parts = input_line.split()
result.append((float(parts[0]), parts[2].rstrip()))
return result
def _show_chord_sequences(song: Song, all_chords, best_indices, names, results, alphabet):
"""
Return plot of chord sequences of this song
:param song: Song for which we need the chord sequence visualisation
:param all_chords: Chord matrix for each lab
:param best_indices: Indices of best MIDI and tab
:param names: Names of labs
:param results: Evaluation results of labs
:param alphabet: Chord vocabulary
:return: Plot of chord sequences
"""
# Information for legend
all_chords.append(range(25))
names.append('Legend')
results.append('')
# Prepare plot
c_map = mpl.colors.ListedColormap(['#242424',
'#FE2712', '#FC600A', '#FB9902', '#FCCC1A', '#FEFE33', '#B2D732', '#66B032',
'#347C98', '#0247FE', '#4424D6', '#8601AF', '#C21460',
'#7f0b01', '#7e2d01', '#7e4c01', '#7e6302', '#7f7f01', '#586a15', '#3a631c',
'#214d5f', '#01227f', '#23126d', '#61017f', '#730c39'])
fig, axes = plt.subplots(len(all_chords) + 1, figsize=(18, len(all_chords)))
plt.suptitle(song.title.split(' - ')[-1] + ' (Index: ' + str(song.key) + ')', fontsize=25,
y=list(axes[0].get_position().bounds)[1] + 2 * list(axes[0].get_position().bounds)[3])
lab_font_size = 20
# Add Chord Sequences (and legend) one by one
for i in range(len(all_chords)):
# Chord sequence bar
new_chords = all_chords[i]
new_chords = np.vstack((new_chords, new_chords))
axes[i].imshow(new_chords, aspect='auto', cmap=c_map, vmin=0, vmax=24)
# Text: name on left side, results (CSR, ovS, unS, Seg) on right side
pos = list(axes[i].get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3] / 2.
if i in best_indices:
fig.text(x_text, y_text, names[i], va='center', ha='right', fontsize=lab_font_size, fontweight='bold')
else:
fig.text(x_text, y_text, names[i], va='center', ha='right', fontsize=lab_font_size)
fig.text(pos[0] + pos[2] + 0.01, y_text, results[i], va='center', ha='left', fontsize=lab_font_size)
# Remove axes
axes[i].set_axis_off()
# Add text to legend (note names for each color)
for j in range(len(alphabet)):
axes[len(all_chords) - 1].text(j, 0.5, alphabet[j], ha="center", va="center", color="w", fontsize=18)
# Add segmentation bar
segmentation = _get_segmentation(song)
segment_starts = np.zeros(int(ceil(song.duration * 100)))
for i in range(len(segmentation)):
start_x = int(ceil(segmentation[i][0] * 100))
for offset in range(50):
if start_x + offset < len(segment_starts):
segment_starts[start_x + offset] = 1
axes[len(all_chords)].text(start_x + 100, 0.2 + 0.6 * (i % 2), segmentation[i][1], va="center", fontsize=12)
segment_starts = np.vstack((segment_starts, segment_starts))
axes[len(all_chords)].imshow(segment_starts, aspect='auto', cmap='Greys')
pos = list(axes[len(all_chords)].get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3] / 2.
fig.text(x_text, y_text, 'Segmentation', va='center', ha='right', fontsize=lab_font_size)
# Set song duration in seconds on x-axis
ticks = [100 * x for x in range(int(song.duration) + 1)]
ticks = [x for x in ticks if x % 1000 == 0]
ticks.append(int(song.duration * 100))
axes[len(all_chords)].set_xticks(ticks)
axes[len(all_chords)].set_xticklabels([str(x / 100) for x in ticks])
axes[len(all_chords)].get_yaxis().set_visible(False)
return plt
def export_result_image(song: Song, chords_vocabulary: ChordVocabulary, midi: bool = True, tab: bool = True,
audio: str = 'CHF_2017', df: bool = True):
"""
Export visualisation to a png file.
:param song: Song for which we want to export the visualisation
:param chords_vocabulary: Chord vocabulary
:param midi: Show MIDI files?
:param tab: Show Tab files?
:param audio: Audio ACE method
:param df: Show all DF results?
"""
if filehandler.file_exists(filehandler.get_lab_visualisation_path(song, audio)):
return song.title + " was already visualised for the ACE method " + audio + "."
nr_of_samples = int(ceil(song.duration * 100))
alphabet = ChordAlphabet(chords_vocabulary)
# Select labs based on parameter setting
label_data = [{'name': 'Ground truth', 'index': 0, 'lab_path': song.full_ground_truth_chord_labs_path,
'csr': 1.0, 'ovs': 1.0, 'uns': 1.0, 'seg': 1.0}]
i = 1
best_indices = [] # For expected best MIDI and tab
if midi:
duplicate_midis = filehandler.find_duplicate_midis(song)
best_midi_name, best_segmentation = data_fusion.get_expected_best_midi(song)
full_midi_paths = song.full_midi_paths
full_midi_paths.sort()
for full_midi_path in full_midi_paths:
midi_name = filehandler.get_file_name_from_full_path(full_midi_path)
for segmentation_method in ['bar', 'beat']:
full_midi_chords_path = filehandler.get_full_midi_chord_labs_path(midi_name, segmentation_method)
if filehandler.file_exists(full_midi_chords_path) \
and midi_name not in duplicate_midis:
# Evaluate song
csr, ovs, uns, seg = evaluate(song.full_ground_truth_chord_labs_path, full_midi_chords_path)
# Save evaluation values to label_data
label_data.append({'name': 'MIDI ' + midi_name + ' | ' + segmentation_method,
'index': i, 'lab_path': full_midi_chords_path,
'csr': csr, 'ovs': ovs, 'uns': uns, 'seg': seg})
# Check if this is the expected best MIDI & segmentation method for this song
if midi_name == best_midi_name and segmentation_method == best_segmentation:
best_indices.append(i)
i += 1
if tab:
best_tab = data_fusion.get_expected_best_tab_lab(song)
for tab_counter, full_tab_path in enumerate(song.full_tab_paths, 1):
tab_chord_labs_path = filehandler.get_full_tab_chord_labs_path(full_tab_path)
if filehandler.file_exists(tab_chord_labs_path):
# Evaluate song
csr, ovs, uns, seg = evaluate(song.full_ground_truth_chord_labs_path, tab_chord_labs_path)
# Save evaluation values to label_data
label_data.append({'name': 'Tab ' + str(tab_counter),
'index': i, 'lab_path': tab_chord_labs_path,
'csr': csr, 'ovs': ovs, 'uns': uns, 'seg': seg})
if tab_chord_labs_path == best_tab:
best_indices.append(i)
i += 1
if df:
csr, ovs, uns, seg = evaluate(song.full_ground_truth_chord_labs_path,
filehandler.get_full_mirex_chord_labs_path(song, audio))
label_data.append({'name': audio, 'index': i,
'lab_path': filehandler.get_full_mirex_chord_labs_path(song, audio),
'csr': csr, 'ovs': ovs, 'uns': uns, 'seg': seg})
for selection_name in 'all', 'best':
for combination_name in 'rnd', 'mv', 'df':
df_lab_path = filehandler.get_data_fusion_path(song.key, combination_name, selection_name, audio)
csr, ovs, uns, seg = evaluate(song.full_ground_truth_chord_labs_path, df_lab_path)
label_data.append({'name': audio + '-' + combination_name.upper() + '-' + selection_name.upper(),
'index': i, 'lab_path': df_lab_path,
'csr': csr, 'ovs': ovs, 'uns': uns, 'seg': seg})
# Fill a numpy array with chord labels for each of the lab files
chord_matrix = np.zeros((len(label_data), nr_of_samples), dtype=int)
for lab_nr in range(len(label_data)):
data_fusion.load_lab_file_into_chord_matrix(label_data[lab_nr]['lab_path'], lab_nr, chord_matrix,
alphabet, nr_of_samples)
all_chords = [chord_matrix[x] for x in range(len(label_data))]
# Find names
names = [label_dict['name'] for label_dict in label_data]
# Find results
results = ['CSR OvS UnS Seg']
for label_dict in label_data[1:]:
results.append(' '.join([str(round(label_dict[measure], 2)).ljust(4, '0')
for measure in ['csr', 'ovs', 'uns', 'seg']]))
# Show result
plt1 = _show_chord_sequences(song, all_chords, best_indices, names, results, alphabet)
plt1.savefig(filehandler.get_lab_visualisation_path(song, audio), bbox_inches="tight", pad_inches=0)
return song.title + " was visualised for the ACE method " + audio + "."
| [
"decibel.import_export.filehandler.get_file_name_from_full_path",
"decibel.import_export.filehandler.find_duplicate_midis",
"decibel.import_export.filehandler.file_exists",
"math.ceil",
"decibel.import_export.filehandler.get_full_midi_chord_labs_path",
"decibel.import_export.filehandler.get_data_fusion_pa... | [((1694, 2012), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (["['#242424', '#FE2712', '#FC600A', '#FB9902', '#FCCC1A', '#FEFE33',\n '#B2D732', '#66B032', '#347C98', '#0247FE', '#4424D6', '#8601AF',\n '#C21460', '#7f0b01', '#7e2d01', '#7e4c01', '#7e6302', '#7f7f01',\n '#586a15', '#3a631c', '#214d5f', '#01227f', '#23126d', '#61017f', '#730c39'\n ]"], {}), "(['#242424', '#FE2712', '#FC600A', '#FB9902',\n '#FCCC1A', '#FEFE33', '#B2D732', '#66B032', '#347C98', '#0247FE',\n '#4424D6', '#8601AF', '#C21460', '#7f0b01', '#7e2d01', '#7e4c01',\n '#7e6302', '#7f7f01', '#586a15', '#3a631c', '#214d5f', '#01227f',\n '#23126d', '#61017f', '#730c39'])\n", (1719, 2012), True, 'import matplotlib as mpl\n'), ((4065, 4108), 'numpy.vstack', 'np.vstack', (['(segment_starts, segment_starts)'], {}), '((segment_starts, segment_starts))\n', (4074, 4108), True, 'import numpy as np\n'), ((5517, 5549), 'decibel.music_objects.chord_alphabet.ChordAlphabet', 'ChordAlphabet', (['chords_vocabulary'], {}), '(chords_vocabulary)\n', (5530, 5549), False, 'from decibel.music_objects.chord_alphabet import ChordAlphabet\n'), ((2630, 2665), 'numpy.vstack', 'np.vstack', (['(new_chords, new_chords)'], {}), '((new_chords, new_chords))\n', (2639, 2665), True, 'import numpy as np\n'), ((5308, 5359), 'decibel.import_export.filehandler.get_lab_visualisation_path', 'filehandler.get_lab_visualisation_path', (['song', 'audio'], {}), '(song, audio)\n', (5346, 5359), False, 'from decibel.import_export import filehandler\n'), ((5475, 5500), 'math.ceil', 'ceil', (['(song.duration * 100)'], {}), '(song.duration * 100)\n', (5479, 5500), False, 'from math import ceil\n'), ((5875, 5913), 'decibel.import_export.filehandler.find_duplicate_midis', 'filehandler.find_duplicate_midis', (['song'], {}), '(song)\n', (5907, 5913), False, 'from decibel.import_export import filehandler\n'), ((5958, 5998), 'decibel.data_fusion.data_fusion.get_expected_best_midi', 'data_fusion.get_expected_best_midi', (['song'], {}), '(song)\n', (5992, 5998), False, 'from decibel.data_fusion import data_fusion\n'), ((7286, 7329), 'decibel.data_fusion.data_fusion.get_expected_best_tab_lab', 'data_fusion.get_expected_best_tab_lab', (['song'], {}), '(song)\n', (7323, 7329), False, 'from decibel.data_fusion import data_fusion\n'), ((9291, 9417), 'decibel.data_fusion.data_fusion.load_lab_file_into_chord_matrix', 'data_fusion.load_lab_file_into_chord_matrix', (["label_data[lab_nr]['lab_path']", 'lab_nr', 'chord_matrix', 'alphabet', 'nr_of_samples'], {}), "(label_data[lab_nr]['lab_path'],\n lab_nr, chord_matrix, alphabet, nr_of_samples)\n", (9334, 9417), False, 'from decibel.data_fusion import data_fusion\n'), ((9999, 10050), 'decibel.import_export.filehandler.get_lab_visualisation_path', 'filehandler.get_lab_visualisation_path', (['song', 'audio'], {}), '(song, audio)\n', (10037, 10050), False, 'from decibel.import_export import filehandler\n'), ((3665, 3690), 'math.ceil', 'ceil', (['(song.duration * 100)'], {}), '(song.duration * 100)\n', (3669, 3690), False, 'from math import ceil\n'), ((3754, 3784), 'math.ceil', 'ceil', (['(segmentation[i][0] * 100)'], {}), '(segmentation[i][0] * 100)\n', (3758, 3784), False, 'from math import ceil\n'), ((6148, 6204), 'decibel.import_export.filehandler.get_file_name_from_full_path', 'filehandler.get_file_name_from_full_path', (['full_midi_path'], {}), '(full_midi_path)\n', (6188, 6204), False, 'from decibel.import_export import filehandler\n'), ((7441, 7496), 'decibel.import_export.filehandler.get_full_tab_chord_labs_path', 'filehandler.get_full_tab_chord_labs_path', (['full_tab_path'], {}), '(full_tab_path)\n', (7481, 7496), False, 'from decibel.import_export import filehandler\n'), ((7512, 7556), 'decibel.import_export.filehandler.file_exists', 'filehandler.file_exists', (['tab_chord_labs_path'], {}), '(tab_chord_labs_path)\n', (7535, 7556), False, 'from decibel.import_export import filehandler\n'), ((8231, 8286), 'decibel.import_export.filehandler.get_full_mirex_chord_labs_path', 'filehandler.get_full_mirex_chord_labs_path', (['song', 'audio'], {}), '(song, audio)\n', (8273, 8286), False, 'from decibel.import_export import filehandler\n'), ((6301, 6374), 'decibel.import_export.filehandler.get_full_midi_chord_labs_path', 'filehandler.get_full_midi_chord_labs_path', (['midi_name', 'segmentation_method'], {}), '(midi_name, segmentation_method)\n', (6342, 6374), False, 'from decibel.import_export import filehandler\n'), ((7627, 7696), 'decibel.evaluator.evaluator.evaluate', 'evaluate', (['song.full_ground_truth_chord_labs_path', 'tab_chord_labs_path'], {}), '(song.full_ground_truth_chord_labs_path, tab_chord_labs_path)\n', (7635, 7696), False, 'from decibel.evaluator.evaluator import evaluate\n'), ((8381, 8436), 'decibel.import_export.filehandler.get_full_mirex_chord_labs_path', 'filehandler.get_full_mirex_chord_labs_path', (['song', 'audio'], {}), '(song, audio)\n', (8423, 8436), False, 'from decibel.import_export import filehandler\n'), ((8645, 8732), 'decibel.import_export.filehandler.get_data_fusion_path', 'filehandler.get_data_fusion_path', (['song.key', 'combination_name', 'selection_name', 'audio'], {}), '(song.key, combination_name, selection_name,\n audio)\n', (8677, 8732), False, 'from decibel.import_export import filehandler\n'), ((8766, 8827), 'decibel.evaluator.evaluator.evaluate', 'evaluate', (['song.full_ground_truth_chord_labs_path', 'df_lab_path'], {}), '(song.full_ground_truth_chord_labs_path, df_lab_path)\n', (8774, 8827), False, 'from decibel.evaluator.evaluator import evaluate\n'), ((6394, 6440), 'decibel.import_export.filehandler.file_exists', 'filehandler.file_exists', (['full_midi_chords_path'], {}), '(full_midi_chords_path)\n', (6417, 6440), False, 'from decibel.import_export import filehandler\n'), ((6582, 6653), 'decibel.evaluator.evaluator.evaluate', 'evaluate', (['song.full_ground_truth_chord_labs_path', 'full_midi_chords_path'], {}), '(song.full_ground_truth_chord_labs_path, full_midi_chords_path)\n', (6590, 6653), False, 'from decibel.evaluator.evaluator import evaluate\n')] |
#!/usr/bin/env python
from iri2016 import IRI2016Profile
#
import numpy as np
from matplotlib.pyplot import figure, show
""" Height Profile Example """
lat = -11.95
lon = -76.77
time = "2003-11-21T12"
altlim = [90.0, 200.0]
altstp = 2.0
sim = IRI2016Profile(
altlim=altlim, altstp=altstp, lat=lat, lon=lon, time=time, option="vertical", verbose=False
)
altbins = np.arange(altlim[0], altlim[1] + altstp, altstp)
index = range(altbins.size)
fig = figure(figsize=(16, 6))
axs = fig.subplots(1, 2)
ne = sim.a[0, index]
nO2p = sim.a[7, index] * ne * 1e-2
nNOp = sim.a[8, index] * ne * 1e-2
# nOp = sim.a[5, index] * ne * 1e-2
pn = axs[0]
pn.plot(ne, altbins, label="N$_e$")
pn.plot(nO2p, altbins, label="O$_2$$^+$")
pn.plot(nNOp, altbins, label="NO$^+$")
# pn.plot(nOp, altbins, label='O$^+$')
pn.set_title(sim.title1)
pn.set_xlabel("Density (m$^{-3}$)")
pn.set_ylabel("Altitude (km)")
pn.set_xscale("log")
pn = axs[1]
ti = sim.a[2, index]
te = sim.a[3, index]
pn.plot(ti, altbins, label="T$_i$")
pn.plot(te, altbins, label="T$_e$")
pn.set_title(sim.title2)
pn.set_xlabel(r"Temperature ($^\circ$K)")
pn.set_ylabel("Altitude (km)")
for a in axs:
a.legend(loc="best")
a.grid(True)
show()
| [
"iri2016.IRI2016Profile",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((247, 358), 'iri2016.IRI2016Profile', 'IRI2016Profile', ([], {'altlim': 'altlim', 'altstp': 'altstp', 'lat': 'lat', 'lon': 'lon', 'time': 'time', 'option': '"""vertical"""', 'verbose': '(False)'}), "(altlim=altlim, altstp=altstp, lat=lat, lon=lon, time=time,\n option='vertical', verbose=False)\n", (261, 358), False, 'from iri2016 import IRI2016Profile\n'), ((372, 420), 'numpy.arange', 'np.arange', (['altlim[0]', '(altlim[1] + altstp)', 'altstp'], {}), '(altlim[0], altlim[1] + altstp, altstp)\n', (381, 420), True, 'import numpy as np\n'), ((457, 480), 'matplotlib.pyplot.figure', 'figure', ([], {'figsize': '(16, 6)'}), '(figsize=(16, 6))\n', (463, 480), False, 'from matplotlib.pyplot import figure, show\n'), ((1201, 1207), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (1205, 1207), False, 'from matplotlib.pyplot import figure, show\n')] |
import numpy as np
import gurobipy as gp
from gurobipy import GRB
import time
from scipy.sparse.csgraph import shortest_path
class OptimalFlow:
def __init__(self, network, users):
self.network = network
self.users = users
self.num_edges = network.NumEdges
self.num_users = users.num_users
self.model = None
self.x_eu = None
self.x_e = None
self.define_model(network, users)
def solve(self):
self.model.optimize()
# If infeasible, terminate program
assert self.model.status != GRB.INFEASIBLE
# extract the solution flows
x = np.zeros((self.num_edges, self.num_users))
x_dict = self.model.getAttr('x', self.x_eu)
for e in range(self.num_edges):
for u in range(self.num_users):
x[e, u] = x_dict[e, u]
f = np.sum(x, axis=1)
return x, f
def set_obj(self, users):
# toll_obj = 0
for e in range(self.num_edges):
for u in range(self.num_users):
self.x_eu[e,u].Obj = users.data[u]['vot'] * self.network.edge_latency[e]
def define_model(self, network, users):
num_edges = network.NumEdges
num_users = users.num_users
# Model initialization
m = gp.Model('VoT')
m.setParam('OutputFlag', 0)
# decision variable
x_eu = m.addVars(num_edges, num_users, lb=0.0, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name="x_eu")
# introducing edge flows
x_e = m.addVars(num_edges, lb=0.0, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name="x_e")
m.addConstrs(x_eu.sum(e, '*') == x_e[e] for e in range(num_edges))
# demand from origin constraint
m.addConstrs(
sum([x_eu[e, u] for e in network.next(node=users.data[u]['orig'])]) == users.data[u]['vol']
for u in range(num_users))
m.addConstrs(
sum([x_eu[e, u] for e in network.prev(node=users.data[u]['orig'])]) == 0
for u in range(num_users))
# demand at destination constraint
m.addConstrs(
sum([x_eu[e, u] for e in network.prev(node=users.data[u]['dest'])]) ==
users.data[u]['vol']
for u in range(num_users))
m.addConstrs(
sum([x_eu[e, u] for e in network.next(node=users.data[u]['dest'])]) == 0
for u in range(num_users))
# flow conservation
for u in range(num_users):
exclude_od_nodes = [n for n in range(network.NumNodes)]
exclude_od_nodes.remove(users.data[u]['orig'])
exclude_od_nodes.remove(users.data[u]['dest'])
m.addConstrs(
sum(x_eu[g, u] for g in network.prev(node=n)) ==
sum(x_eu[g, u] for g in network.next(node=n))
for n in exclude_od_nodes)
# capacity constraints (testing the for loop so that we can extract duals later)
for e in range(num_edges):
m.addConstr(x_e[e] <= network.capacity[e], name='capacity' + str(e))
self.model = m
self.x_eu = x_eu
self.x_e = x_e
return None
| [
"numpy.sum",
"numpy.zeros",
"gurobipy.Model"
] | [((644, 686), 'numpy.zeros', 'np.zeros', (['(self.num_edges, self.num_users)'], {}), '((self.num_edges, self.num_users))\n', (652, 686), True, 'import numpy as np\n'), ((875, 892), 'numpy.sum', 'np.sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (881, 892), True, 'import numpy as np\n'), ((1303, 1318), 'gurobipy.Model', 'gp.Model', (['"""VoT"""'], {}), "('VoT')\n", (1311, 1318), True, 'import gurobipy as gp\n')] |
from crossSection import sigma_with_masses, PMNS_matrix, neutrino_masses, mfp_gpc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import random
import warnings
import sys
import os
warnings.filterwarnings("ignore")
plt.style.use('ja')
data_dir = '/Users/james/allMyStuff/Neutrinos/Constraints/highenergy/'
if __name__ == '__main__':
Enu_TeV_arr = [290., 3000., 6000.]
labels = ['290 TeV', '3 PeV', '6 PeV']
colors = ['#357DED', '#0D0221', '#0D0221']
lss = ['-', '--', ':']
plt.figure(figsize=(8, 5))
for idx, Enu_TeV in enumerate(Enu_TeV_arr):
min_nu_mass = 0.03 # GeV
hierarchy = 'normal'
nu_masses = neutrino_masses(min_nu_mass, hierarchy)
Ecom_MeV = np.sqrt(0.5*nu_masses*Enu_TeV)
s_arr = 4 * np.power(Ecom_MeV, 2)
ge = 0
gt = 3*np.power(10.0, -1)
gm = np.power(10.0, -2)
# Fix neutrino number density and mass (multiply by 3 in complex case assuming small splitting)
n_nu = 340 # cm^-3
n_eff = (340/6.0) * 3.0
# Conversion factors
cm = 3.240755 * np.power(10.0, -28) # Gpc
MeV = 8065.54429 * np.power(10.0, 6) # cm^-1
# Distance to blazar
D_blazar = 1.3 # Gpc
# PMNS matrix
t12 = 33.63
t23 = 47.2
t13 = 8.54
dcp = 234
pmns = PMNS_matrix(t12, t23, t13, dcp)
mn = np.linspace(0.0, 15.0, 500)
mp = np.linspace(0.0, 15.0, 500)
MN, MP = np.meshgrid(mn, mp)
sigma_cm = sigma_with_masses(s_arr, ge, gm, gt, MP, MN, pmns)*np.power(MeV, -2)
mfp = mfp_gpc(n_eff, sigma_cm, cm)
region = (MN <= MP)
mfp[region] = np.ma.masked
ctr = plt.contour(MP, MN, mfp,
colors=colors[idx],
levels=[D_blazar],
linewidths=0.0)
mp_trace, mn_trace = ctr.allsegs[0][0].T
mask = (mp_trace < 0.9*mn_trace)
plt.plot(mp_trace[mask], mn_trace[mask],
c=colors[idx],
ls=lss[idx],
label=labels[idx])
if idx == 0:
plt.plot([mp_trace[0], mp_trace[0]], [mn_trace[0], mp_trace[0]],
c=colors[idx],
ls=lss[idx],)
plt.fill(np.append(mp_trace, [0, mp_trace[0]]), np.append(mn_trace, [0, mp_trace[0]]),
color=colors[idx],
alpha=0.1)
plt.plot(np.linspace(0.1, 10), np.linspace(0.1, 10),
color='k',
ls='-',
lw=0.3)
plt.text(np.power(10.0, -0.7), np.power(10.0, -0.6), r'$m_N > m_\delta$', rotation=27.0)
plt.xlabel(r'$m_\delta \, \mathrm{[MeV]}$')
plt.ylabel(r'$m_N \, \mathrm{[MeV]}$')
plt.plot([3.9, 3.9], [0.0, 30.0],
label='',
c='k',
ls='-')
plt.xscale('log')
plt.yscale('log')
plt.text(4.3, np.power(10.0, -0.8), r'$\mathrm{N}_{\mathrm{eff}}$', rotation=90.0)
plt.plot([6.74, 6.74], [0.0, 30.0],
label='',
c='k',
ls='-')
plt.text(7.4, np.power(10.0, 0.5), r'$\mathrm{BBN} + \mathrm{Planck} + \mathrm{N}_{\mathrm{eff}} + Y_p$', rotation=90.0)
plt.text(np.power(10.0, -0.9), np.power(10.0, 1.1), r'$g_\mu = 10^{-2}, m_\nu^{\mathrm{min}} = 0.03 \, \mathrm{eV}$')
plt.text(np.power(10.0, -0.9), np.power(10.0, 1.3), r'$\mathrm{Normal}\,\,\mathrm{Hierarchy}$')
#plt.plot(np.linspace(0, 12), np.linspace(0, 12))
plt.legend(loc='lower center', fontsize=14, title='Neutrino Energy', title_fontsize=10)
plt.xlim([np.power(10.0, -1), np.power(10.0, 1)])
plt.ylim([np.power(10.0, -1), 30.0])
ax = plt.gca()
ax.tick_params(which='minor', length=1.5)
plt.savefig('mpmnconstraints_test.pdf')
| [
"crossSection.PMNS_matrix",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.contour",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"crossSection.mfp... | [((215, 248), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (238, 248), False, 'import warnings\n'), ((249, 268), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ja"""'], {}), "('ja')\n", (262, 268), True, 'import matplotlib.pyplot as plt\n'), ((513, 539), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (523, 539), True, 'import matplotlib.pyplot as plt\n'), ((2227, 2272), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$m_\\\\delta \\\\, \\\\mathrm{[MeV]}$"""'], {}), "('$m_\\\\delta \\\\, \\\\mathrm{[MeV]}$')\n", (2237, 2272), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2311), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$m_N \\\\, \\\\mathrm{[MeV]}$"""'], {}), "('$m_N \\\\, \\\\mathrm{[MeV]}$')\n", (2282, 2311), True, 'import matplotlib.pyplot as plt\n'), ((2312, 2370), 'matplotlib.pyplot.plot', 'plt.plot', (['[3.9, 3.9]', '[0.0, 30.0]'], {'label': '""""""', 'c': '"""k"""', 'ls': '"""-"""'}), "([3.9, 3.9], [0.0, 30.0], label='', c='k', ls='-')\n", (2320, 2370), True, 'import matplotlib.pyplot as plt\n'), ((2379, 2396), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2389, 2396), True, 'import matplotlib.pyplot as plt\n'), ((2398, 2415), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2408, 2415), True, 'import matplotlib.pyplot as plt\n'), ((2501, 2561), 'matplotlib.pyplot.plot', 'plt.plot', (['[6.74, 6.74]', '[0.0, 30.0]'], {'label': '""""""', 'c': '"""k"""', 'ls': '"""-"""'}), "([6.74, 6.74], [0.0, 30.0], label='', c='k', ls='-')\n", (2509, 2561), True, 'import matplotlib.pyplot as plt\n'), ((2959, 3050), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower center"""', 'fontsize': '(14)', 'title': '"""Neutrino Energy"""', 'title_fontsize': '(10)'}), "(loc='lower center', fontsize=14, title='Neutrino Energy',\n title_fontsize=10)\n", (2969, 3050), True, 'import matplotlib.pyplot as plt\n'), ((3142, 3151), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3149, 3151), True, 'import matplotlib.pyplot as plt\n'), ((3196, 3235), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""mpmnconstraints_test.pdf"""'], {}), "('mpmnconstraints_test.pdf')\n", (3207, 3235), True, 'import matplotlib.pyplot as plt\n'), ((649, 688), 'crossSection.neutrino_masses', 'neutrino_masses', (['min_nu_mass', 'hierarchy'], {}), '(min_nu_mass, hierarchy)\n', (664, 688), False, 'from crossSection import sigma_with_masses, PMNS_matrix, neutrino_masses, mfp_gpc\n'), ((702, 736), 'numpy.sqrt', 'np.sqrt', (['(0.5 * nu_masses * Enu_TeV)'], {}), '(0.5 * nu_masses * Enu_TeV)\n', (709, 736), True, 'import numpy as np\n'), ((813, 831), 'numpy.power', 'np.power', (['(10.0)', '(-2)'], {}), '(10.0, -2)\n', (821, 831), True, 'import numpy as np\n'), ((1218, 1249), 'crossSection.PMNS_matrix', 'PMNS_matrix', (['t12', 't23', 't13', 'dcp'], {}), '(t12, t23, t13, dcp)\n', (1229, 1249), False, 'from crossSection import sigma_with_masses, PMNS_matrix, neutrino_masses, mfp_gpc\n'), ((1258, 1285), 'numpy.linspace', 'np.linspace', (['(0.0)', '(15.0)', '(500)'], {}), '(0.0, 15.0, 500)\n', (1269, 1285), True, 'import numpy as np\n'), ((1293, 1320), 'numpy.linspace', 'np.linspace', (['(0.0)', '(15.0)', '(500)'], {}), '(0.0, 15.0, 500)\n', (1304, 1320), True, 'import numpy as np\n'), ((1333, 1352), 'numpy.meshgrid', 'np.meshgrid', (['mn', 'mp'], {}), '(mn, mp)\n', (1344, 1352), True, 'import numpy as np\n'), ((1444, 1472), 'crossSection.mfp_gpc', 'mfp_gpc', (['n_eff', 'sigma_cm', 'cm'], {}), '(n_eff, sigma_cm, cm)\n', (1451, 1472), False, 'from crossSection import sigma_with_masses, PMNS_matrix, neutrino_masses, mfp_gpc\n'), ((1533, 1612), 'matplotlib.pyplot.contour', 'plt.contour', (['MP', 'MN', 'mfp'], {'colors': 'colors[idx]', 'levels': '[D_blazar]', 'linewidths': '(0.0)'}), '(MP, MN, mfp, colors=colors[idx], levels=[D_blazar], linewidths=0.0)\n', (1544, 1612), True, 'import matplotlib.pyplot as plt\n'), ((1704, 1796), 'matplotlib.pyplot.plot', 'plt.plot', (['mp_trace[mask]', 'mn_trace[mask]'], {'c': 'colors[idx]', 'ls': 'lss[idx]', 'label': 'labels[idx]'}), '(mp_trace[mask], mn_trace[mask], c=colors[idx], ls=lss[idx], label=\n labels[idx])\n', (1712, 1796), True, 'import matplotlib.pyplot as plt\n'), ((2059, 2079), 'numpy.linspace', 'np.linspace', (['(0.1)', '(10)'], {}), '(0.1, 10)\n', (2070, 2079), True, 'import numpy as np\n'), ((2081, 2101), 'numpy.linspace', 'np.linspace', (['(0.1)', '(10)'], {}), '(0.1, 10)\n', (2092, 2101), True, 'import numpy as np\n'), ((2146, 2166), 'numpy.power', 'np.power', (['(10.0)', '(-0.7)'], {}), '(10.0, -0.7)\n', (2154, 2166), True, 'import numpy as np\n'), ((2168, 2188), 'numpy.power', 'np.power', (['(10.0)', '(-0.6)'], {}), '(10.0, -0.6)\n', (2176, 2188), True, 'import numpy as np\n'), ((2431, 2451), 'numpy.power', 'np.power', (['(10.0)', '(-0.8)'], {}), '(10.0, -0.8)\n', (2439, 2451), True, 'import numpy as np\n'), ((2584, 2603), 'numpy.power', 'np.power', (['(10.0)', '(0.5)'], {}), '(10.0, 0.5)\n', (2592, 2603), True, 'import numpy as np\n'), ((2701, 2721), 'numpy.power', 'np.power', (['(10.0)', '(-0.9)'], {}), '(10.0, -0.9)\n', (2709, 2721), True, 'import numpy as np\n'), ((2723, 2742), 'numpy.power', 'np.power', (['(10.0)', '(1.1)'], {}), '(10.0, 1.1)\n', (2731, 2742), True, 'import numpy as np\n'), ((2820, 2840), 'numpy.power', 'np.power', (['(10.0)', '(-0.9)'], {}), '(10.0, -0.9)\n', (2828, 2840), True, 'import numpy as np\n'), ((2842, 2861), 'numpy.power', 'np.power', (['(10.0)', '(1.3)'], {}), '(10.0, 1.3)\n', (2850, 2861), True, 'import numpy as np\n'), ((747, 768), 'numpy.power', 'np.power', (['Ecom_MeV', '(2)'], {}), '(Ecom_MeV, 2)\n', (755, 768), True, 'import numpy as np\n'), ((787, 805), 'numpy.power', 'np.power', (['(10.0)', '(-1)'], {}), '(10.0, -1)\n', (795, 805), True, 'import numpy as np\n'), ((1019, 1038), 'numpy.power', 'np.power', (['(10.0)', '(-28)'], {}), '(10.0, -28)\n', (1027, 1038), True, 'import numpy as np\n'), ((1066, 1083), 'numpy.power', 'np.power', (['(10.0)', '(6)'], {}), '(10.0, 6)\n', (1074, 1083), True, 'import numpy as np\n'), ((1367, 1417), 'crossSection.sigma_with_masses', 'sigma_with_masses', (['s_arr', 'ge', 'gm', 'gt', 'MP', 'MN', 'pmns'], {}), '(s_arr, ge, gm, gt, MP, MN, pmns)\n', (1384, 1417), False, 'from crossSection import sigma_with_masses, PMNS_matrix, neutrino_masses, mfp_gpc\n'), ((1418, 1435), 'numpy.power', 'np.power', (['MeV', '(-2)'], {}), '(MeV, -2)\n', (1426, 1435), True, 'import numpy as np\n'), ((1820, 1917), 'matplotlib.pyplot.plot', 'plt.plot', (['[mp_trace[0], mp_trace[0]]', '[mn_trace[0], mp_trace[0]]'], {'c': 'colors[idx]', 'ls': 'lss[idx]'}), '([mp_trace[0], mp_trace[0]], [mn_trace[0], mp_trace[0]], c=colors[\n idx], ls=lss[idx])\n', (1828, 1917), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1970), 'numpy.append', 'np.append', (['mp_trace', '[0, mp_trace[0]]'], {}), '(mp_trace, [0, mp_trace[0]])\n', (1942, 1970), True, 'import numpy as np\n'), ((1972, 2009), 'numpy.append', 'np.append', (['mn_trace', '[0, mp_trace[0]]'], {}), '(mn_trace, [0, mp_trace[0]])\n', (1981, 2009), True, 'import numpy as np\n'), ((3058, 3076), 'numpy.power', 'np.power', (['(10.0)', '(-1)'], {}), '(10.0, -1)\n', (3066, 3076), True, 'import numpy as np\n'), ((3078, 3095), 'numpy.power', 'np.power', (['(10.0)', '(1)'], {}), '(10.0, 1)\n', (3086, 3095), True, 'import numpy as np\n'), ((3109, 3127), 'numpy.power', 'np.power', (['(10.0)', '(-1)'], {}), '(10.0, -1)\n', (3117, 3127), True, 'import numpy as np\n')] |
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame,frame, mask= mask)
kernel = np.ones((15,15),np.float32)/225
smoothed = cv2.filter2D(res,-1,kernel)
cv2.imshow('Original',frame)
cv2.imshow('Averaging',smoothed)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
blur = cv2.GaussianBlur(res,(15,15),0)
cv2.imshow('Gaussian Blurring',blur)
median = cv2.medianBlur(res,15)
cv2.imshow('Median Blur',median)
| [
"numpy.ones",
"cv2.inRange",
"cv2.bitwise_and",
"cv2.medianBlur",
"cv2.filter2D",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.waitKey"
] | [((37, 56), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (53, 56), False, 'import cv2\n'), ((553, 576), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (574, 576), False, 'import cv2\n'), ((598, 632), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['res', '(15, 15)', '(0)'], {}), '(res, (15, 15), 0)\n', (614, 632), False, 'import cv2\n'), ((630, 667), 'cv2.imshow', 'cv2.imshow', (['"""Gaussian Blurring"""', 'blur'], {}), "('Gaussian Blurring', blur)\n", (640, 667), False, 'import cv2\n'), ((676, 699), 'cv2.medianBlur', 'cv2.medianBlur', (['res', '(15)'], {}), '(res, 15)\n', (690, 699), False, 'import cv2\n'), ((699, 732), 'cv2.imshow', 'cv2.imshow', (['"""Median Blur"""', 'median'], {}), "('Median Blur', median)\n", (709, 732), False, 'import cv2\n'), ((105, 143), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (117, 143), False, 'import cv2\n'), ((165, 188), 'numpy.array', 'np.array', (['[30, 150, 50]'], {}), '([30, 150, 50])\n', (173, 188), True, 'import numpy as np\n'), ((203, 228), 'numpy.array', 'np.array', (['[255, 255, 180]'], {}), '([255, 255, 180])\n', (211, 228), True, 'import numpy as np\n'), ((243, 281), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_red', 'upper_red'], {}), '(hsv, lower_red, upper_red)\n', (254, 281), False, 'import cv2\n'), ((292, 332), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (307, 332), False, 'import cv2\n'), ((393, 422), 'cv2.filter2D', 'cv2.filter2D', (['res', '(-1)', 'kernel'], {}), '(res, -1, kernel)\n', (405, 422), False, 'import cv2\n'), ((425, 454), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'frame'], {}), "('Original', frame)\n", (435, 454), False, 'import cv2\n'), ((458, 491), 'cv2.imshow', 'cv2.imshow', (['"""Averaging"""', 'smoothed'], {}), "('Averaging', smoothed)\n", (468, 491), False, 'import cv2\n'), ((346, 375), 'numpy.ones', 'np.ones', (['(15, 15)', 'np.float32'], {}), '((15, 15), np.float32)\n', (353, 375), True, 'import numpy as np\n'), ((500, 514), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (511, 514), False, 'import cv2\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
Salary = pd.read_csv("data.csv")
X = Salary['YearsExperience'].values
y = Salary['Salary'].values
X = X.reshape(-1,1)
y = y.reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(X,y,train_size=0.8,test_size=0.2,random_state=100)
print(f"X_train shape {x_train.shape}")
print(f"y_train shape {y_train.shape}")
print(f"X_test shap {x_test.shape}")
print(f"y_test shape {y_test.shape}")
lm = LinearRegression()
lm.fit(x_train,y_train)
y_predict = lm.predict(x_test)
print(f"Train accuracy {round(lm.score(x_train,y_train)*100,2)} %")
print(f"Test accuracy {round(lm.score(x_test,y_test)*100,2)} %")
yoe = np.array([15,1.5,7.3,9.65])
yoe = yoe.reshape(-1,1)
yoe_salary = lm.predict(yoe)
for salary in yoe_salary:
print(f"$ {salary}")
plt.scatter(x_train,y_train,color='red')
plt.xlabel('Years of experience')
plt.ylabel('Salary in $')
plt.title('Training data')
plt.show()
plt.scatter(x_train,y_train,color='red')
plt.plot(x_test,y_predict)
plt.xlabel("Years of Experience")
plt.ylabel("Salary in $")
plt.title("Trained model plot")
plt.show()
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
] | [((184, 207), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (195, 207), True, 'import pandas as pd\n'), ((351, 422), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'train_size': '(0.8)', 'test_size': '(0.2)', 'random_state': '(100)'}), '(X, y, train_size=0.8, test_size=0.2, random_state=100)\n', (367, 422), False, 'from sklearn.model_selection import train_test_split\n'), ((582, 600), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (598, 600), False, 'from sklearn.linear_model import LinearRegression\n'), ((797, 827), 'numpy.array', 'np.array', (['[15, 1.5, 7.3, 9.65]'], {}), '([15, 1.5, 7.3, 9.65])\n', (805, 827), True, 'import numpy as np\n'), ((935, 977), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_train', 'y_train'], {'color': '"""red"""'}), "(x_train, y_train, color='red')\n", (946, 977), True, 'import matplotlib.pyplot as plt\n'), ((976, 1009), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Years of experience"""'], {}), "('Years of experience')\n", (986, 1009), True, 'import matplotlib.pyplot as plt\n'), ((1010, 1035), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Salary in $"""'], {}), "('Salary in $')\n", (1020, 1035), True, 'import matplotlib.pyplot as plt\n'), ((1036, 1062), 'matplotlib.pyplot.title', 'plt.title', (['"""Training data"""'], {}), "('Training data')\n", (1045, 1062), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1073), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1071, 1073), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1118), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_train', 'y_train'], {'color': '"""red"""'}), "(x_train, y_train, color='red')\n", (1087, 1118), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1144), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 'y_predict'], {}), '(x_test, y_predict)\n', (1125, 1144), True, 'import matplotlib.pyplot as plt\n'), ((1144, 1177), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Years of Experience"""'], {}), "('Years of Experience')\n", (1154, 1177), True, 'import matplotlib.pyplot as plt\n'), ((1178, 1203), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Salary in $"""'], {}), "('Salary in $')\n", (1188, 1203), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1235), 'matplotlib.pyplot.title', 'plt.title', (['"""Trained model plot"""'], {}), "('Trained model plot')\n", (1213, 1235), True, 'import matplotlib.pyplot as plt\n'), ((1236, 1246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1244, 1246), True, 'import matplotlib.pyplot as plt\n')] |
'''
A class to generate descriptor of an image or a set of imagess
Author: <NAME>
Date: 01/03/2020
'''
import os
import numpy as np
import cv2
import sys
from HandSegmentation import Segmentation
from ObjectDetector import ObjectDetector
import time
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
import statistics
class DescriptorGenerator:
def __init__(self):
pass
def initialize(self):
model = 'TOR_hand_all+TEgO_fcn8_10k_16_1e-5_450x450'
threshold = 0.5
self.input_width = 450
self.input_height = 450
# initialize Localizer and Classifier
debug = False
self.segmentation = Segmentation(model=model,
threshold=threshold,
image_width=self.input_width,
image_height=self.input_height,
debug=debug)
self.object_detector = ObjectDetector()
'''
generate the image descriptor
'''
def getImageDescriptor(self, img_path):
th_hand = 1343 # 0.0034542181
hand = False
hand_area = self.getHandArea(img_path)
if hand_area > th_hand:
hand = True
blurry = False
# th_blur = 29.2
th_blur = 3
blurriness = self.getBlurriness(img_path)
if blurriness < th_blur:
blurry = True
cropped = False
small = False
boxes, img_width, img_height = self.object_detector.detect(img_path)
if len(boxes) == 1:
if self.isCropped(boxes[0], img_width, img_height):
cropped = True
if self.isSmall(boxes[0], img_width, img_height):
small = True
desc_obj = {}
desc_obj['hand_area'] = hand_area
desc_obj['blurriness'] = blurriness
desc_obj['boxes'] = boxes
desc_obj['img_width'] = img_width
desc_obj['img_height'] = img_height
return hand, blurry, cropped, small, desc_obj
# return hand_area, blurriness,
def isSmall(self, box, img_width, img_height):
box_w = box['xmax']-box['xmin']
box_y = box['ymax']-box['ymin']
if box_w*box_y/(img_width+1)*(img_height+1) < 0.125:
return True
return False
def isCropped(self, box, img_width, img_height):
if box['xmin'] < 0.02 * img_width or box['ymin'] < 0.02 * img_height or box['xmax'] > 0.98 * img_width or box['ymax'] > 0.98 * img_height:
return True
return False
def getHandArea(self, img_path):
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
if self.input_width is not None and self.input_height is not None:
new_shape = (int(self.input_width), int(self.input_height))
image = cv2.resize(image, new_shape, cv2.INTER_CUBIC)
# localize an object from the input image
image, pred = self.segmentation.do(image)
hand_area = np.sum(pred)
return hand_area
'''
generate the set descriptor
'''
def getSetDescriptor(self, arinfo_path):
arinfo = self.loadARInfo(arinfo_path)
cam_pos_sd, cam_ori_sd = self.computeBackgroundVariation(arinfo)
side_num = self.computeSideVariation(arinfo)
dist_sd = self.computeDistanceVariation(arinfo)
hand, blurry, cropped, small = self.countImgDescriptors(arinfo)
# bg_var = True if cam_pos_sd > 0.1 or cam_ori_sd > 0.1 else False
# side_var = True if side_num > 1 else False
# dist_var = True if dist_sd > 0.1 else False
#
# return bg_var, side_var, dist_var, hand, blurry, cropped, small
# bg_var = min(max(cam_pos_sd/0.15, cam_ori_sd / 0.15), 1.0) * 100
# side_var = min(side_num/1.5, 1.0) * 100
# dist_var = min(dist_sd/0.15, 1.0) * 100
bg_var = max(cam_pos_sd, cam_ori_sd)
side_var = side_num
if side_var == 0:
side_var = cam_ori_sd
dist_var = dist_sd
if dist_var == 0:
dist_var = cam_pos_sd
print('set descriptors:')
print('bg_var', bg_var, 'side_var', side_var, 'dist_var', dist_var)
print('cam_pos_sd', cam_pos_sd, 'cam_ori_sd', cam_ori_sd, 'side_num', side_num, 'dist_sd', dist_sd)
return bg_var, side_var, dist_var, hand, blurry, cropped, small
'''
compute the background variation using the AR information.
The background variation is the variation of camera orientation and position
'''
def computeBackgroundVariation(self, arinfo):
pos_diff = []
orientation_diff = []
for img_id1, img_info1 in arinfo.items():
for img_id2, img_info2 in arinfo.items():
if img_id1 < img_id2:
cp1 = img_info1['camera_position']
cp2 = img_info2['camera_position']
pd = euclidean_distances([cp1], [cp2])[0][0]
pos_diff.append(pd)
co1 = img_info1['camera_orientation']
co2 = img_info2['camera_orientation']
od = 1-cosine_similarity([co1], [co2])[0][0]
orientation_diff.append(od)
# print(statistics.stdev(pos_diff),statistics.stdev(orientation_diff))
return statistics.stdev(pos_diff),statistics.stdev(orientation_diff)
def computeSideVariation(self, arinfo):
sides = []
for img_id, img_info in arinfo.items():
ar_side = img_info['ar_side']
if not ar_side in sides:
sides.append(ar_side)
# print(sides)
return len(sides) - 1
def computeDistanceVariation(self, arinfo):
dist_diff = []
for img_id1, img_info1 in arinfo.items():
for img_id2, img_info2 in arinfo.items():
dist1 = euclidean_distances([img_info1['obj_cam_position']], [(0, 0, 0)])[0][0]
dist2 = euclidean_distances([img_info2['obj_cam_position']], [(0, 0, 0)])[0][0]
dd = abs(dist1-dist2)
if not dd in dist_diff:
dist_diff.append(dd)
if len(dist_diff) > 1:
# print(statistics.stdev(dist_diff))
return statistics.stdev(dist_diff)
else:
# print('0')
return 0
def countImgDescriptors(self, arinfo):
hand, blurry, cropped, small = 0, 0, 0, 0
for img_id, img_info in arinfo.items():
if img_info['hand'] == 'True':
hand += 1
if img_info['blurry'] == 'True':
blurry += 1
if img_info['cropped'] == 'True':
cropped += 1
if img_info['small'] == 'True':
small += 1
# print(hand, blurry, cropped, small)
return hand, blurry, cropped, small
'''
load AR information from a text file.
format:
var desc_info = "\(count)#\(self.ar_side)#" + //1
"\(self.camera_position.0)#\(self.camera_position.1)#\(self.camera_position.2)," + // 4
"\(self.camera_orientation.0)#\(self.camera_orientation.1)#\(self.camera_orientation.2)" + // 7
"\(self.obj_position.0)#\(self.obj_position.1)#\(self.obj_position.2)," + // 10
"\(self.obj_orientation.0)#\(self.obj_orientation.1)#\(self.obj_orientation.2)" + // 13
"\(self.obj_cam_position.0)#\(self.obj_cam_position.1)#\(self.obj_cam_position.2)" + // 16
"\(cam_mat)#\(obj_mat)#\(cam_obj_mat)" // 19
'''
def loadARInfo(self, arinfo_path):
arinfo = {}
f = open(arinfo_path, "r")
for line in f:
words = line.split('#')
# for i, w in enumerate(words):
# print(i, w)
# print()
img_id = int(words[0])
arinfo[img_id] = {}
arinfo[img_id]['ar_side'] = words[1]
arinfo[img_id]['camera_position'] = (float(words[2]), float(words[3]), float(words[4]))
arinfo[img_id]['camera_orientation'] = (float(words[5]), float(words[6]), float(words[7]))
arinfo[img_id]['obj_cam_position'] = (float(words[14]), float(words[15]), float(words[16]))
arinfo[img_id]['hand'] = words[20]
arinfo[img_id]['blurry'] = words[21]
arinfo[img_id]['cropped'] = words[22]
arinfo[img_id]['small'] = words[23]
return arinfo
# higher value means more blurriness
def getBlurriness(self, img_path):
image = cv2.imread(img_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# compute the Laplacian of the image and then return the focus
# measure, which is simply the variance of the Laplacian
blurriness = cv2.Laplacian(gray, cv2.CV_64F).var()
print('blur:', blurriness)
return blurriness
if __name__ == '__main__':
dg = DescriptorGenerator()
dg.initialize()
# dg.getBlurriness('/home/jhong12/TOR-app-files/photo/TempFiles/CA238C3A-BDE9-4A7F-8CCA-76956A9ABD83/tmp_2.jpg')
# print('Omega3 - with variation')
# train_img_dir = '/home/jhong12/TOR-app-files/photo/TrainFiles/CA238C3A-BDE9-4A7F-8CCA-76956A9ABD83/Spice/Omega3'
# arinfo_path = '/home/jhong12/TOR-app-files/ARInfo/CA238C3A-BDE9-4A7F-8CCA-76956A9ABD83/Omega3/desc_info.txt'
# print(dg.getSetDescriptor(train_img_dir, arinfo_path))
#
# print()
# print('Knife - no variation')
# train_img_dir = '/home/jhong12/TOR-app-files/photo/TrainFiles/CA238C3A-BDE9-4A7F-8CCA-76956A9ABD83/Spice/Knife'
# arinfo_path = '/home/jhong12/TOR-app-files/ARInfo/CA238C3A-BDE9-4A7F-8CCA-76956A9ABD83/Knife/desc_info.txt'
# print(dg.getSetDescriptor(train_img_dir, arinfo_path))
train_img_dir = '/home/jhong12/TOR-app-files/photo/TrainFiles/B2803393-73CE-4F25-B9F1-410D2A37D0DE/Spice/Knife'
arinfo_path = '/home/jhong12/TOR-app-files/ARInfo/B2803393-73CE-4F25-B9F1-410D2A37D0DE/Knife/desc_info.txt'
print(dg.getSetDescriptor(train_img_dir, arinfo_path))
train_img_dir = '/home/jhong12/TOR-app-files/photo/TrainFiles/74DBAC2E-79F5-4C39-B281-7719602D54BC/Spice/Mouse'
arinfo_path = '/home/jhong12/TOR-app-files/ARInfo/74DBAC2E-79F5-4C39-B281-7719602D54BC/Mouse/desc_info.txt'
print(dg.getSetDescriptor(train_img_dir, arinfo_path))
| [
"statistics.stdev",
"cv2.Laplacian",
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.metrics.pairwise.euclidean_distances",
"ObjectDetector.ObjectDetector",
"numpy.sum",
"cv2.cvtColor",
"HandSegmentation.Segmentation",
"cv2.resize",
"cv2.imread"
] | [((725, 850), 'HandSegmentation.Segmentation', 'Segmentation', ([], {'model': 'model', 'threshold': 'threshold', 'image_width': 'self.input_width', 'image_height': 'self.input_height', 'debug': 'debug'}), '(model=model, threshold=threshold, image_width=self.input_width,\n image_height=self.input_height, debug=debug)\n', (737, 850), False, 'from HandSegmentation import Segmentation\n'), ((908, 924), 'ObjectDetector.ObjectDetector', 'ObjectDetector', ([], {}), '()\n', (922, 924), False, 'from ObjectDetector import ObjectDetector\n'), ((2306, 2344), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_COLOR'], {}), '(img_path, cv2.IMREAD_COLOR)\n', (2316, 2344), False, 'import cv2\n'), ((2637, 2649), 'numpy.sum', 'np.sum', (['pred'], {}), '(pred)\n', (2643, 2649), True, 'import numpy as np\n'), ((7362, 7382), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (7372, 7382), False, 'import cv2\n'), ((7392, 7431), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (7404, 7431), False, 'import cv2\n'), ((2488, 2533), 'cv2.resize', 'cv2.resize', (['image', 'new_shape', 'cv2.INTER_CUBIC'], {}), '(image, new_shape, cv2.INTER_CUBIC)\n', (2498, 2533), False, 'import cv2\n'), ((4642, 4668), 'statistics.stdev', 'statistics.stdev', (['pos_diff'], {}), '(pos_diff)\n', (4658, 4668), False, 'import statistics\n'), ((4669, 4703), 'statistics.stdev', 'statistics.stdev', (['orientation_diff'], {}), '(orientation_diff)\n', (4685, 4703), False, 'import statistics\n'), ((5417, 5444), 'statistics.stdev', 'statistics.stdev', (['dist_diff'], {}), '(dist_diff)\n', (5433, 5444), False, 'import statistics\n'), ((7571, 7602), 'cv2.Laplacian', 'cv2.Laplacian', (['gray', 'cv2.CV_64F'], {}), '(gray, cv2.CV_64F)\n', (7584, 7602), False, 'import cv2\n'), ((5098, 5163), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (["[img_info1['obj_cam_position']]", '[(0, 0, 0)]'], {}), "([img_info1['obj_cam_position']], [(0, 0, 0)])\n", (5117, 5163), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((5182, 5247), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (["[img_info2['obj_cam_position']]", '[(0, 0, 0)]'], {}), "([img_info2['obj_cam_position']], [(0, 0, 0)])\n", (5201, 5247), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((4317, 4350), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['[cp1]', '[cp2]'], {}), '([cp1], [cp2])\n', (4336, 4350), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((4486, 4517), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['[co1]', '[co2]'], {}), '([co1], [co2])\n', (4503, 4517), False, 'from sklearn.metrics.pairwise import cosine_similarity\n')] |
'''
Population functions.
Code from https://github.com/cortex-lab/phylib/blob/master/phylib/stats/ccg.py by <NAME>.
Code for decoding by <NAME>
'''
import numpy as np
import scipy as sp
import types
from itertools import groupby
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import KFold, LeaveOneOut, LeaveOneGroupOut
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, roc_auc_score
from sklearn.utils import shuffle as sklearn_shuffle
def _get_spike_counts_in_bins(spike_times, spike_clusters, intervals):
"""
Return the number of spikes in a sequence of time intervals, for each neuron.
Parameters
----------
spike_times : 1D array
spike times (in seconds)
spike_clusters : 1D array
cluster ids corresponding to each event in `spikes`
intervals : 2D array of shape (n_events, 2)
the start and end times of the events
Returns
---------
counts : 2D array of shape (n_neurons, n_events)
the spike counts of all neurons ffrom scipy.stats import sem, tor all events
value (i, j) is the number of spikes of neuron `neurons[i]` in interval #j
cluster_ids : 1D array
list of cluster ids
"""
# Check input
assert intervals.ndim == 2
assert intervals.shape[1] == 2
assert np.all(np.diff(spike_times) >= 0), "Spike times need to be sorted"
intervals_idx = np.searchsorted(spike_times, intervals)
# For each neuron and each interval, the number of spikes in the interval.
cluster_ids = np.unique(spike_clusters)
n_neurons = len(cluster_ids)
n_intervals = intervals.shape[0]
counts = np.zeros((n_neurons, n_intervals), dtype=np.uint32)
for j in range(n_intervals):
t0, t1 = intervals[j, :]
# Count the number of spikes in the window, for each neuron.
x = np.bincount(
spike_clusters[intervals_idx[j, 0]:intervals_idx[j, 1]],
minlength=cluster_ids.max() + 1)
counts[:, j] = x[cluster_ids]
return counts, cluster_ids
def _index_of(arr, lookup):
"""Replace scalars in an array by their indices in a lookup table.
Implicitely assume that:
* All elements of arr and lookup are non-negative integers.
* All elements or arr belong to lookup.
This is not checked for performance reasons.
"""
# Equivalent of np.digitize(arr, lookup) - 1, but much faster.
# TODO: assertions to disable in production for performance reasons.
# TODO: np.searchsorted(lookup, arr) is faster on small arrays with large
# values
lookup = np.asarray(lookup, dtype=np.int32)
m = (lookup.max() if len(lookup) else 0) + 1
tmp = np.zeros(m + 1, dtype=np.int)
# Ensure that -1 values are kept.
tmp[-1] = -1
if len(lookup):
tmp[lookup] = np.arange(len(lookup))
return tmp[arr]
def _increment(arr, indices):
"""Increment some indices in a 1D vector of non-negative integers.
Repeated indices are taken into account."""
bbins = np.bincount(indices)
arr[:len(bbins)] += bbins
return arr
def _diff_shifted(arr, steps=1):
return arr[steps:] - arr[:len(arr) - steps]
def _create_correlograms_array(n_clusters, winsize_bins):
return np.zeros((n_clusters, n_clusters, winsize_bins // 2 + 1), dtype=np.int32)
def _symmetrize_correlograms(correlograms):
"""Return the symmetrized version of the CCG arrays."""
n_clusters, _, n_bins = correlograms.shape
assert n_clusters == _
# We symmetrize c[i, j, 0].
# This is necessary because the algorithm in correlograms()
# is sensitive to the order of identical spikes.
correlograms[..., 0] = np.maximum(
correlograms[..., 0], correlograms[..., 0].T)
sym = correlograms[..., 1:][..., ::-1]
sym = np.transpose(sym, (1, 0, 2))
return np.dstack((sym, correlograms))
def xcorr(spike_times, spike_clusters, bin_size=None, window_size=None):
"""Compute all pairwise cross-correlograms among the clusters appearing in `spike_clusters`.
Parameters
----------
:param spike_times: Spike times in seconds.
:type spike_times: array-like
:param spike_clusters: Spike-cluster mapping.
:type spike_clusters: array-like
:param bin_size: Size of the bin, in seconds.
:type bin_size: float
:param window_size: Size of the window, in seconds.
:type window_size: float
Returns an `(n_clusters, n_clusters, winsize_samples)` array with all pairwise
cross-correlograms.
"""
assert np.all(np.diff(spike_times) >= 0), ("The spike times must be increasing.")
assert spike_times.ndim == 1
assert spike_times.shape == spike_clusters.shape
# Find `binsize`.
bin_size = np.clip(bin_size, 1e-5, 1e5) # in seconds
# Find `winsize_bins`.
window_size = np.clip(window_size, 1e-5, 1e5) # in seconds
winsize_bins = 2 * int(.5 * window_size / bin_size) + 1
# Take the cluster order into account.
clusters = np.unique(spike_clusters)
n_clusters = len(clusters)
# Like spike_clusters, but with 0..n_clusters-1 indices.
spike_clusters_i = _index_of(spike_clusters, clusters)
# Shift between the two copies of the spike trains.
shift = 1
# At a given shift, the mask precises which spikes have matching spikes
# within the correlogram time window.
mask = np.ones_like(spike_times, dtype=np.bool)
correlograms = _create_correlograms_array(n_clusters, winsize_bins)
# The loop continues as long as there is at least one spike with
# a matching spike.
while mask[:-shift].any():
# Interval between spike i and spike i+shift.
spike_diff = _diff_shifted(spike_times, shift)
# Binarize the delays between spike i and spike i+shift.
spike_diff_b = np.round(spike_diff / bin_size).astype(np.int64)
# Spikes with no matching spikes are masked.
mask[:-shift][spike_diff_b > (winsize_bins / 2)] = False
# Cache the masked spike delays.
m = mask[:-shift].copy()
d = spike_diff_b[m]
# Find the indices in the raveled correlograms array that need
# to be incremented, taking into account the spike clusters.
indices = np.ravel_multi_index(
(spike_clusters_i[:-shift][m], spike_clusters_i[+shift:][m], d), correlograms.shape)
# Increment the matching spikes in the correlograms array.
_increment(correlograms.ravel(), indices)
shift += 1
return _symmetrize_correlograms(correlograms)
def decode(spike_times, spike_clusters, event_times, event_groups, pre_time=0, post_time=0.5,
classifier='bayes', cross_validation='kfold', num_splits=5, prob_left=None,
custom_validation=None, n_neurons='all', iterations=1, shuffle=False, phase_rand=False):
"""
Use decoding to classify groups of trials (e.g. stim left/right). Classification is done using
the population vector of summed spike counts from the specified time window. Cross-validation
is achieved using n-fold cross validation or leave-one-out cross validation. Decoders can
decode any number of groups. When providing the classfier with an imbalanced dataset (not
the same number of trials in each group) the chance level will not be 1/groups. In that case,
to compare the classification performance against change one has to either determine chance
level by decoding a shuffled dataset or use the 'auroc' metric as readout (this metric is
robust against imbalanced datasets)
Parameters
----------
spike_times : 1D array
spike times (in seconds)
spike_clusters : 1D array
cluster ids corresponding to each event in `spikes`
event_times : 1D array
times (in seconds) of the events from the two groups
event_groups : 1D array
group identities of the events, can be any number of groups, accepts integers and strings
pre_time : float
time (in seconds) preceding the event times
post_time : float
time (in seconds) following the event times
classifier : string or sklearn object
which decoder to use, either input a scikit learn clf object directly or a string.
When it's a string options are (all classifiers are used with default options):
'bayes' Naive Bayes
'forest' Random forest
'regression' Logistic regression
'lda' Linear Discriminant Analysis
cross_validation : string
which cross-validation method to use, options are:
'none' No cross-validation
'kfold' K-fold cross-validation
'leave-one-out' Leave out the trial that is being decoded
'block' Leave out the block the to-be-decoded trial is in
'custom' Any custom cross-validation provided by the user
num_splits : integer
** only for 'kfold' cross-validation **
Number of splits to use for k-fold cross validation, a value of 5 means that the decoder
will be trained on 4/5th of the data and used to predict the remaining 1/5th. This process
is repeated five times so that all data has been used as both training and test set.
prob_left : 1D array
** only for 'block' cross-validation **
the probability of the stimulus appearing on the left for each trial in event_times
custom_validation : generator
** only for 'custom' cross-validation **
a generator object with the splits to be used for cross validation using this format:
(
(split1_train_idxs, split1_test_idxs),
(split2_train_idxs, split2_test_idxs),
(split3_train_idxs, split3_test_idxs),
...)
n_neurons : string or integer
number of neurons to randomly subselect from the population (default is 'all')
iterations : int
number of times to repeat the decoding (especially usefull when subselecting neurons)
shuffle : boolean
whether to shuffle the trial labels each decoding iteration
phase_rand : boolean
whether to use phase randomization of the activity over trials to use as a "chance"
predictor
Returns
-------
results : dict
dictionary with decoding results
accuracy : float
accuracy of the classifier in percentage correct
f1 : float
F1 score of the classifier
auroc : float
the area under the ROC curve of the classification performance
confusion_matrix : 2D array
normalized confusion matrix
predictions : 2D array with dimensions iterations x trials
predicted group label for all trials in every iteration
probabilities : 2D array with dimensions iterations x trials
classification probability for all trials in every iteration
"""
# Check input
assert classifier in ['bayes', 'forest', 'regression', 'lda']
assert cross_validation in ['none', 'kfold', 'leave-one-out', 'block', 'custom']
assert event_times.shape[0] == event_groups.shape[0]
if cross_validation == 'block':
assert event_times.shape[0] == prob_left.shape[0]
if cross_validation == 'custom':
assert isinstance(custom_validation, types.GeneratorType)
# Get matrix of all neuronal responses
times = np.column_stack(((event_times - pre_time), (event_times + post_time)))
pop_vector, cluster_ids = _get_spike_counts_in_bins(spike_times, spike_clusters, times)
pop_vector = pop_vector.T
# Exclude last trial if the number of trials is even and phase shuffling
if (phase_rand is True) & (event_groups.shape[0] % 2 == 0):
event_groups = event_groups[:-1]
pop_vector = pop_vector[:-1]
# Initialize classifier
if type(classifier) == str:
if classifier == 'forest':
clf = RandomForestClassifier()
elif classifier == 'bayes':
clf = GaussianNB()
elif classifier == 'regression':
clf = LogisticRegression()
elif classifier == 'lda':
clf = LinearDiscriminantAnalysis()
else:
clf = classifier
# Pre-allocate variables
acc = np.zeros(iterations)
f1 = np.zeros(iterations)
auroc = np.zeros(iterations)
conf_matrix_norm = np.zeros((np.shape(np.unique(event_groups))[0],
np.shape(np.unique(event_groups))[0],
iterations))
pred = np.zeros([iterations, pop_vector.shape[0]])
prob = np.zeros([iterations, pop_vector.shape[0]])
for i in range(iterations):
# Pre-allocate variables for this iteration
y_pred = np.zeros(event_groups.shape)
y_probs = np.zeros(event_groups.shape)
# Get neurons to use for this iteration
if n_neurons == 'all':
sub_pop_vector = pop_vector
else:
use_neurons = np.random.choice(pop_vector.shape[1], n_neurons, replace=False)
sub_pop_vector = pop_vector[:, use_neurons]
# Shuffle trail labels if necessary
if shuffle is True:
event_groups = sklearn_shuffle(event_groups)
# Perform phase randomization of activity over trials if necessary
if phase_rand is True:
if i == 0:
original_pop_vector = sub_pop_vector
rand_pop_vector = np.empty(original_pop_vector.shape)
frequencies = int((original_pop_vector.shape[0] - 1) / 2)
fsignal = sp.fft.fft(original_pop_vector, axis=0)
power = np.abs(fsignal[1:1 + frequencies])
phases = 2 * np.pi * np.random.rand(frequencies)
for k in range(original_pop_vector.shape[1]):
newfsignal = fsignal[0, k]
newfsignal = np.append(newfsignal, np.exp(1j * phases) * power[:, k])
newfsignal = np.append(newfsignal, np.flip(np.exp(-1j * phases) * power[:, k]))
newsignal = sp.fft.ifft(newfsignal)
rand_pop_vector[:, k] = np.abs(newsignal.real)
sub_pop_vector = rand_pop_vector
if cross_validation == 'none':
# Fit the model on all the data and predict
clf.fit(sub_pop_vector, event_groups)
y_pred = clf.predict(sub_pop_vector)
# Get the probability of the prediction for ROC analysis
probs = clf.predict_proba(sub_pop_vector)
y_probs = probs[:, 1] # keep positive only
else:
# Perform cross-validation
if cross_validation == 'leave-one-out':
cv = LeaveOneOut().split(sub_pop_vector)
elif cross_validation == 'kfold':
cv = KFold(n_splits=num_splits).split(sub_pop_vector)
elif cross_validation == 'block':
block_lengths = [sum(1 for i in g) for k, g in groupby(prob_left)]
blocks = np.repeat(np.arange(len(block_lengths)), block_lengths)
cv = LeaveOneGroupOut().split(sub_pop_vector, groups=blocks)
elif cross_validation == 'custom':
cv = custom_validation
# Loop over the splits into train and test
for train_index, test_index in cv:
# Fit the model to the training data
clf.fit(sub_pop_vector[train_index], event_groups[train_index])
# Predict the test data
y_pred[test_index] = clf.predict(sub_pop_vector[test_index])
# Get the probability of the prediction for ROC analysis
probs = clf.predict_proba(sub_pop_vector[test_index])
y_probs[test_index] = probs[:, 1] # keep positive only
# Calculate performance metrics and confusion matrix
acc[i] = accuracy_score(event_groups, y_pred)
f1[i] = f1_score(event_groups, y_pred)
auroc[i] = roc_auc_score(event_groups, y_probs)
conf_matrix = confusion_matrix(event_groups, y_pred)
conf_matrix_norm[:, :, i] = conf_matrix / conf_matrix.sum(axis=1)[:, np.newaxis]
# Add prediction and probability to matrix
pred[i, :] = y_pred
prob[i, :] = y_probs
# Make integers from arrays when there's only one iteration
if iterations == 1:
acc = acc[0]
f1 = f1[0]
auroc = auroc[0]
# Add to results dictionary
if cross_validation == 'kfold':
results = dict({'accuracy': acc, 'f1': f1, 'auroc': auroc,
'predictions': pred, 'probabilities': prob,
'confusion_matrix': conf_matrix_norm,
'n_groups': np.shape(np.unique(event_groups))[0],
'classifier': classifier, 'cross_validation': '%d-fold' % num_splits,
'iterations': iterations, 'shuffle': shuffle})
else:
results = dict({'accuracy': acc, 'f1': f1, 'auroc': auroc,
'predictions': pred, 'probabilities': prob,
'confusion_matrix': conf_matrix_norm,
'n_groups': np.shape(np.unique(event_groups))[0],
'classifier': classifier, 'cross_validation': cross_validation,
'iterations': iterations, 'shuffle': shuffle})
return results
def lda_project(spike_times, spike_clusters, event_times, event_groups, pre_time=0, post_time=0.5,
cross_validation='kfold', num_splits=5, prob_left=None, custom_validation=None):
"""
Use linear discriminant analysis to project population vectors to the line that best separates
the two groups. When cross-validation is used, the LDA projection is fitted on the training
data after which the test data is projected to this projection.
spike_times : 1D array
spike times (in seconds)
spike_clusters : 1D array
cluster ids corresponding to each event in `spikes`
event_times : 1D array
times (in seconds) of the events from the two groups
event_groups : 1D array
group identities of the events, can be any number of groups, accepts integers and strings
pre_time : float
time (in seconds) preceding the event times
post_time : float
time (in seconds) following the event times
cross_validation : string
which cross-validation method to use, options are:
'none' No cross-validation
'kfold' K-fold cross-validation
'leave-one-out' Leave out the trial that is being decoded
'block' Leave out the block the to-be-decoded trial is in
'custom' Any custom cross-validation provided by the user
num_splits : integer
** only for 'kfold' cross-validation **
Number of splits to use for k-fold cross validation, a value of 5 means that the decoder
will be trained on 4/5th of the data and used to predict the remaining 1/5th. This process
is repeated five times so that all data has been used as both training and test set.
prob_left : 1D array
** only for 'block' cross-validation **
the probability of the stimulus appearing on the left for each trial in event_times
custom_validation : generator
** only for 'custom' cross-validation **
a generator object with the splits to be used for cross validation using this format:
(
(split1_train_idxs, split1_test_idxs),
(split2_train_idxs, split2_test_idxs),
(split3_train_idxs, split3_test_idxs),
...)
n_neurons : int
Group size of number of neurons to be sub-selected
Returns
-------
lda_projection : 1D array
the position along the LDA projection axis for the population vector of each trial
"""
# Check input
assert cross_validation in ['none', 'kfold', 'leave-one-out', 'block', 'custom']
assert event_times.shape[0] == event_groups.shape[0]
if cross_validation == 'block':
assert event_times.shape[0] == prob_left.shape[0]
if cross_validation == 'custom':
assert isinstance(custom_validation, types.GeneratorType)
# Get matrix of all neuronal responses
times = np.column_stack(((event_times - pre_time), (event_times + post_time)))
pop_vector, cluster_ids = _get_spike_counts_in_bins(spike_times, spike_clusters, times)
pop_vector = np.rot90(pop_vector)
# Initialize
lda = LinearDiscriminantAnalysis()
lda_projection = np.zeros(event_groups.shape)
if cross_validation == 'none':
# Find the best LDA projection on all data and transform those data
lda_projection = lda.fit_transform(pop_vector, event_groups)
else:
# Perform cross-validation
if cross_validation == 'leave-one-out':
cv = LeaveOneOut().split(pop_vector)
elif cross_validation == 'kfold':
cv = KFold(n_splits=num_splits).split(pop_vector)
elif cross_validation == 'block':
block_lengths = [sum(1 for i in g) for k, g in groupby(prob_left)]
blocks = np.repeat(np.arange(len(block_lengths)), block_lengths)
cv = LeaveOneGroupOut().split(pop_vector, groups=blocks)
elif cross_validation == 'custom':
cv = custom_validation
# Loop over the splits into train and test
for train_index, test_index in cv:
# Find LDA projection on the training data
lda.fit(pop_vector[train_index], [event_groups[j] for j in train_index])
# Project the held-out test data to projection
lda_projection[test_index] = np.rot90(lda.transform(pop_vector[test_index]))[0]
return lda_projection
| [
"numpy.clip",
"numpy.random.rand",
"numpy.ravel_multi_index",
"numpy.column_stack",
"sklearn.metrics.roc_auc_score",
"numpy.rot90",
"scipy.fft.fft",
"sklearn.model_selection.KFold",
"numpy.searchsorted",
"numpy.asarray",
"numpy.diff",
"numpy.exp",
"numpy.empty",
"numpy.maximum",
"numpy.r... | [((1639, 1678), 'numpy.searchsorted', 'np.searchsorted', (['spike_times', 'intervals'], {}), '(spike_times, intervals)\n', (1654, 1678), True, 'import numpy as np\n'), ((1780, 1805), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (1789, 1805), True, 'import numpy as np\n'), ((1892, 1943), 'numpy.zeros', 'np.zeros', (['(n_neurons, n_intervals)'], {'dtype': 'np.uint32'}), '((n_neurons, n_intervals), dtype=np.uint32)\n', (1900, 1943), True, 'import numpy as np\n'), ((2856, 2890), 'numpy.asarray', 'np.asarray', (['lookup'], {'dtype': 'np.int32'}), '(lookup, dtype=np.int32)\n', (2866, 2890), True, 'import numpy as np\n'), ((2952, 2981), 'numpy.zeros', 'np.zeros', (['(m + 1)'], {'dtype': 'np.int'}), '(m + 1, dtype=np.int)\n', (2960, 2981), True, 'import numpy as np\n'), ((3296, 3316), 'numpy.bincount', 'np.bincount', (['indices'], {}), '(indices)\n', (3307, 3316), True, 'import numpy as np\n'), ((3526, 3599), 'numpy.zeros', 'np.zeros', (['(n_clusters, n_clusters, winsize_bins // 2 + 1)'], {'dtype': 'np.int32'}), '((n_clusters, n_clusters, winsize_bins // 2 + 1), dtype=np.int32)\n', (3534, 3599), True, 'import numpy as np\n'), ((3970, 4026), 'numpy.maximum', 'np.maximum', (['correlograms[..., 0]', 'correlograms[..., 0].T'], {}), '(correlograms[..., 0], correlograms[..., 0].T)\n', (3980, 4026), True, 'import numpy as np\n'), ((4094, 4122), 'numpy.transpose', 'np.transpose', (['sym', '(1, 0, 2)'], {}), '(sym, (1, 0, 2))\n', (4106, 4122), True, 'import numpy as np\n'), ((4137, 4167), 'numpy.dstack', 'np.dstack', (['(sym, correlograms)'], {}), '((sym, correlograms))\n', (4146, 4167), True, 'import numpy as np\n'), ((5056, 5090), 'numpy.clip', 'np.clip', (['bin_size', '(1e-05)', '(100000.0)'], {}), '(bin_size, 1e-05, 100000.0)\n', (5063, 5090), True, 'import numpy as np\n'), ((5148, 5185), 'numpy.clip', 'np.clip', (['window_size', '(1e-05)', '(100000.0)'], {}), '(window_size, 1e-05, 100000.0)\n', (5155, 5185), True, 'import numpy as np\n'), ((5317, 5342), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (5326, 5342), True, 'import numpy as np\n'), ((5707, 5747), 'numpy.ones_like', 'np.ones_like', (['spike_times'], {'dtype': 'np.bool'}), '(spike_times, dtype=np.bool)\n', (5719, 5747), True, 'import numpy as np\n'), ((11929, 11995), 'numpy.column_stack', 'np.column_stack', (['(event_times - pre_time, event_times + post_time)'], {}), '((event_times - pre_time, event_times + post_time))\n', (11944, 11995), True, 'import numpy as np\n'), ((12807, 12827), 'numpy.zeros', 'np.zeros', (['iterations'], {}), '(iterations)\n', (12815, 12827), True, 'import numpy as np\n'), ((12838, 12858), 'numpy.zeros', 'np.zeros', (['iterations'], {}), '(iterations)\n', (12846, 12858), True, 'import numpy as np\n'), ((12872, 12892), 'numpy.zeros', 'np.zeros', (['iterations'], {}), '(iterations)\n', (12880, 12892), True, 'import numpy as np\n'), ((13096, 13139), 'numpy.zeros', 'np.zeros', (['[iterations, pop_vector.shape[0]]'], {}), '([iterations, pop_vector.shape[0]])\n', (13104, 13139), True, 'import numpy as np\n'), ((13152, 13195), 'numpy.zeros', 'np.zeros', (['[iterations, pop_vector.shape[0]]'], {}), '([iterations, pop_vector.shape[0]])\n', (13160, 13195), True, 'import numpy as np\n'), ((21055, 21121), 'numpy.column_stack', 'np.column_stack', (['(event_times - pre_time, event_times + post_time)'], {}), '((event_times - pre_time, event_times + post_time))\n', (21070, 21121), True, 'import numpy as np\n'), ((21237, 21257), 'numpy.rot90', 'np.rot90', (['pop_vector'], {}), '(pop_vector)\n', (21245, 21257), True, 'import numpy as np\n'), ((21289, 21317), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (21315, 21317), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((21340, 21368), 'numpy.zeros', 'np.zeros', (['event_groups.shape'], {}), '(event_groups.shape)\n', (21348, 21368), True, 'import numpy as np\n'), ((6596, 6706), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['(spike_clusters_i[:-shift][m], spike_clusters_i[+shift:][m], d)', 'correlograms.shape'], {}), '((spike_clusters_i[:-shift][m], spike_clusters_i[+shift\n :][m], d), correlograms.shape)\n', (6616, 6706), True, 'import numpy as np\n'), ((13304, 13332), 'numpy.zeros', 'np.zeros', (['event_groups.shape'], {}), '(event_groups.shape)\n', (13312, 13332), True, 'import numpy as np\n'), ((13352, 13380), 'numpy.zeros', 'np.zeros', (['event_groups.shape'], {}), '(event_groups.shape)\n', (13360, 13380), True, 'import numpy as np\n'), ((16477, 16513), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['event_groups', 'y_pred'], {}), '(event_groups, y_pred)\n', (16491, 16513), False, 'from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, roc_auc_score\n'), ((16531, 16561), 'sklearn.metrics.f1_score', 'f1_score', (['event_groups', 'y_pred'], {}), '(event_groups, y_pred)\n', (16539, 16561), False, 'from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, roc_auc_score\n'), ((16582, 16618), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['event_groups', 'y_probs'], {}), '(event_groups, y_probs)\n', (16595, 16618), False, 'from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, roc_auc_score\n'), ((16642, 16680), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['event_groups', 'y_pred'], {}), '(event_groups, y_pred)\n', (16658, 16680), False, 'from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, roc_auc_score\n'), ((1556, 1576), 'numpy.diff', 'np.diff', (['spike_times'], {}), '(spike_times)\n', (1563, 1576), True, 'import numpy as np\n'), ((4859, 4879), 'numpy.diff', 'np.diff', (['spike_times'], {}), '(spike_times)\n', (4866, 4879), True, 'import numpy as np\n'), ((12468, 12492), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (12490, 12492), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((13547, 13610), 'numpy.random.choice', 'np.random.choice', (['pop_vector.shape[1]', 'n_neurons'], {'replace': '(False)'}), '(pop_vector.shape[1], n_neurons, replace=False)\n', (13563, 13610), True, 'import numpy as np\n'), ((13772, 13801), 'sklearn.utils.shuffle', 'sklearn_shuffle', (['event_groups'], {}), '(event_groups)\n', (13787, 13801), True, 'from sklearn.utils import shuffle as sklearn_shuffle\n'), ((14021, 14056), 'numpy.empty', 'np.empty', (['original_pop_vector.shape'], {}), '(original_pop_vector.shape)\n', (14029, 14056), True, 'import numpy as np\n'), ((14151, 14190), 'scipy.fft.fft', 'sp.fft.fft', (['original_pop_vector'], {'axis': '(0)'}), '(original_pop_vector, axis=0)\n', (14161, 14190), True, 'import scipy as sp\n'), ((14212, 14246), 'numpy.abs', 'np.abs', (['fsignal[1:1 + frequencies]'], {}), '(fsignal[1:1 + frequencies])\n', (14218, 14246), True, 'import numpy as np\n'), ((6155, 6186), 'numpy.round', 'np.round', (['(spike_diff / bin_size)'], {}), '(spike_diff / bin_size)\n', (6163, 6186), True, 'import numpy as np\n'), ((12549, 12561), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (12559, 12561), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((14281, 14308), 'numpy.random.rand', 'np.random.rand', (['frequencies'], {}), '(frequencies)\n', (14295, 14308), True, 'import numpy as np\n'), ((14625, 14648), 'scipy.fft.ifft', 'sp.fft.ifft', (['newfsignal'], {}), '(newfsignal)\n', (14636, 14648), True, 'import scipy as sp\n'), ((14690, 14712), 'numpy.abs', 'np.abs', (['newsignal.real'], {}), '(newsignal.real)\n', (14696, 14712), True, 'import numpy as np\n'), ((12623, 12643), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (12641, 12643), False, 'from sklearn.linear_model import LogisticRegression\n'), ((12936, 12959), 'numpy.unique', 'np.unique', (['event_groups'], {}), '(event_groups)\n', (12945, 12959), True, 'import numpy as np\n'), ((13008, 13031), 'numpy.unique', 'np.unique', (['event_groups'], {}), '(event_groups)\n', (13017, 13031), True, 'import numpy as np\n'), ((21670, 21683), 'sklearn.model_selection.LeaveOneOut', 'LeaveOneOut', ([], {}), '()\n', (21681, 21683), False, 'from sklearn.model_selection import KFold, LeaveOneOut, LeaveOneGroupOut\n'), ((12698, 12726), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (12724, 12726), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((14464, 14485), 'numpy.exp', 'np.exp', (['(1.0j * phases)'], {}), '(1.0j * phases)\n', (14470, 14485), True, 'import numpy as np\n'), ((15278, 15291), 'sklearn.model_selection.LeaveOneOut', 'LeaveOneOut', ([], {}), '()\n', (15289, 15291), False, 'from sklearn.model_selection import KFold, LeaveOneOut, LeaveOneGroupOut\n'), ((17362, 17385), 'numpy.unique', 'np.unique', (['event_groups'], {}), '(event_groups)\n', (17371, 17385), True, 'import numpy as np\n'), ((17817, 17840), 'numpy.unique', 'np.unique', (['event_groups'], {}), '(event_groups)\n', (17826, 17840), True, 'import numpy as np\n'), ((21763, 21789), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'num_splits'}), '(n_splits=num_splits)\n', (21768, 21789), False, 'from sklearn.model_selection import KFold, LeaveOneOut, LeaveOneGroupOut\n'), ((14559, 14581), 'numpy.exp', 'np.exp', (['(-1.0j * phases)'], {}), '(-1.0j * phases)\n', (14565, 14581), True, 'import numpy as np\n'), ((15383, 15409), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'num_splits'}), '(n_splits=num_splits)\n', (15388, 15409), False, 'from sklearn.model_selection import KFold, LeaveOneOut, LeaveOneGroupOut\n'), ((21911, 21929), 'itertools.groupby', 'groupby', (['prob_left'], {}), '(prob_left)\n', (21918, 21929), False, 'from itertools import groupby\n'), ((22027, 22045), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (22043, 22045), False, 'from sklearn.model_selection import KFold, LeaveOneOut, LeaveOneGroupOut\n'), ((15543, 15561), 'itertools.groupby', 'groupby', (['prob_left'], {}), '(prob_left)\n', (15550, 15561), False, 'from itertools import groupby\n'), ((15667, 15685), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (15683, 15685), False, 'from sklearn.model_selection import KFold, LeaveOneOut, LeaveOneGroupOut\n')] |
import numpy as np
import copy
from sklearn.linear_model import SGDRegressor
from skmultiflow.core import BaseSKMObject, RegressorMixin, MetaEstimatorMixin, MultiOutputMixin
from skmultiflow.utils import check_random_state
class RegressorChain(BaseSKMObject, RegressorMixin, MetaEstimatorMixin, MultiOutputMixin):
""" Regressor Chains for multi-output learning.
Parameters
----------
base_estimator: skmultiflow.core.BaseSKMObject or sklearn.BaseEstimator (default=SGDRegressor)
Each member of the ensemble is an instance of the base estimator.
order : str (default=None)
`None` to use default order, 'random' for random order.
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by `np.random`.
Notes
-----
Regressor Chains are a modification of Classifier Chains [1]_ for regression.
References
----------
.. [1] Read, Jesse, <NAME>, <NAME>, and <NAME>. "Classifier chains for multi-label
classification." In Joint European Conference on Machine Learning and Knowledge Discovery in Databases,
pp. 254-269. Springer, Berlin, Heidelberg, 2009.
"""
def __init__(self, base_estimator=SGDRegressor(), order=None, random_state=None):
super().__init__()
self.base_estimator = base_estimator
self.order = order
self.random_state = random_state
self.chain = None
self.ensemble = None
self.L = None
self._random_state = None # This is the actual random_state object used internally
self.__configure()
def __configure(self):
self.ensemble = None
self.L = -1
self._random_state = check_random_state(self.random_state)
def fit(self, X, y, sample_weight=None):
""" Fit the model.
Parameters
----------
X : numpy.ndarray of shape (n_samples, n_features)
The features to train the model.
y: numpy.ndarray of shape (n_samples, n_targets)
An array-like with the target values of all samples in X.
sample_weight: Not used (default=None)
Returns
-------
self
"""
N, self.L = y.shape
L = self.L
N, D = X.shape
self.chain = np.arange(L)
if self.order == 'random':
self._random_state.shuffle(self.chain)
# Set the chain order
y = y[:, self.chain]
# Train
self.ensemble = [copy.deepcopy(self.base_estimator) for _ in range(L)]
XY = np.zeros((N, D + L-1))
XY[:, 0:D] = X
XY[:, D:] = y[:, 0:L - 1]
for j in range(self.L):
self.ensemble[j].fit(XY[:, 0:D + j], y[:, j])
return self
def partial_fit(self, X, y, sample_weight=None):
""" Partially (incrementally) fit the model.
Parameters
----------
X : numpy.ndarray of shape (n_samples, n_features)
The features to train the model.
y: numpy.ndarray of shape (n_samples)
An array-like with the target values of all samples in X.
sample_weight: Not used (default=None)
Returns
-------
self
"""
if self.ensemble is None:
# This is the first time that the model is fit
self.fit(X, y)
return self
N, self.L = y.shape
L = self.L
N, D = X.shape
# Set the chain order
y = y[:, self.chain]
XY = np.zeros((N, D + L-1))
XY[:, 0:D] = X
XY[:, D:] = y[:, 0:L - 1]
for j in range(L):
self.ensemble[j].partial_fit(XY[:, 0:D + j], y[:, j])
return self
def predict(self, X):
""" Predict target values for the passed data.
Parameters
----------
X : numpy.ndarray of shape (n_samples, n_features)
The set of data samples to predict the target values for.
Returns
-------
A numpy.ndarray with all the predictions for the samples in X.
"""
N, D = X.shape
Y = np.zeros((N,self.L))
for j in range(self.L):
if j > 0:
X = np.column_stack([X, Y[:, j-1]])
Y[:, j] = self.ensemble[j].predict(X)
# Unset the chain order (back to default)
return Y[:, np.argsort(self.chain)]
def reset(self):
self.__configure()
return self
def predict_proba(self, X):
""" Not implemented for this method.
"""
raise NotImplementedError
def _more_tags(self):
return {'multioutput': True,
'multioutput_only': True}
| [
"skmultiflow.utils.check_random_state",
"sklearn.linear_model.SGDRegressor",
"numpy.column_stack",
"numpy.argsort",
"numpy.zeros",
"copy.deepcopy",
"numpy.arange"
] | [((1433, 1447), 'sklearn.linear_model.SGDRegressor', 'SGDRegressor', ([], {}), '()\n', (1445, 1447), False, 'from sklearn.linear_model import SGDRegressor\n'), ((1924, 1961), 'skmultiflow.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (1942, 1961), False, 'from skmultiflow.utils import check_random_state\n'), ((2505, 2517), 'numpy.arange', 'np.arange', (['L'], {}), '(L)\n', (2514, 2517), True, 'import numpy as np\n'), ((2773, 2797), 'numpy.zeros', 'np.zeros', (['(N, D + L - 1)'], {}), '((N, D + L - 1))\n', (2781, 2797), True, 'import numpy as np\n'), ((3726, 3750), 'numpy.zeros', 'np.zeros', (['(N, D + L - 1)'], {}), '((N, D + L - 1))\n', (3734, 3750), True, 'import numpy as np\n'), ((4322, 4343), 'numpy.zeros', 'np.zeros', (['(N, self.L)'], {}), '((N, self.L))\n', (4330, 4343), True, 'import numpy as np\n'), ((2706, 2740), 'copy.deepcopy', 'copy.deepcopy', (['self.base_estimator'], {}), '(self.base_estimator)\n', (2719, 2740), False, 'import copy\n'), ((4417, 4450), 'numpy.column_stack', 'np.column_stack', (['[X, Y[:, j - 1]]'], {}), '([X, Y[:, j - 1]])\n', (4432, 4450), True, 'import numpy as np\n'), ((4570, 4592), 'numpy.argsort', 'np.argsort', (['self.chain'], {}), '(self.chain)\n', (4580, 4592), True, 'import numpy as np\n')] |
import datetime
import numpy as np
import requests
import time
def requests_get(url,header):
try:
res = requests.get(url,verify = False, timeout = 10,headers = header)
return res
except Exception as e:
if 'Max retries exceeded' in str(e) or 'Read timed out' in str(e):
time.sleep(60)
def requests_post(url,header,form_data):
try:
res = requests.post(url,verify = False, timeout = 10,headers = header,data = form_data)
return res
except Exception as e:
if 'Max retries exceeded' in str(e) or 'Read timed out' in str(e):
time.sleep(60)
raise
def get_time():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
class Crawler:
def date2days(self,date):
# date = '2018-08-03'
date = datetime.datetime.strptime(date,'%Y-%m-%d').date()
value = (date - datetime.date(1970,1,1)).days
value = value*60*60*24*1000
return value
def days2date(self,day):
#day = 631497600000
# 60s = 1min
# 60min = 1hr
day = int(day)
day = int( day/1000/60/60/24 )
value = datetime.date(1970,1,1) + datetime.timedelta(days = day)
return value
def millisecond2date(self,ms):
# ms = 1559489350000
ms = int(ms)
date = str( self.days2date(ms) )
days = int( ms/1000/60/60/24 )
second = ms/1000 - days*60*60*24
hour = int(second/60/60)
minute = int( ( second - hour*60*60 )/60 )-1
if hour < 0:
hour = '00'
elif hour < 10:
hour = '0' + str(hour)
else:
hour = str(hour)
if minute < 0:
minute = '00'
elif minute < 10:
minute = '0'+ str(minute)
else:
minute = str(minute)
#second = (second - hour*60*60 - minute*60)#hour
value = date + ' ' + hour + ':' + minute + ':00'
return value
def millisecond2date2(self,ms):
# ms = 1566898740000
ms = int(ms)
date = str( self.days2date(ms) )
days = int( ms/1000/60/60/24 )
second = ms/1000 - days*60*60*24
hour = int(second/60/60)
minute = int( ( second - hour*60*60 )/60 )
second = int( second - hour*60*60 - minute*60 )
if hour < 0:
hour = '00'
elif hour < 10:
hour = '0' + str(hour)
else:
hour = str(hour)
if minute < 0:
minute = '00'
elif minute < 10:
minute = '0'+ str(minute)
else:
minute = str(minute)
#second = (second - hour*60*60 - minute*60)#hour
if second < 10:
second = '0{}'.format(second)
value = '{} {}:{}:{}'.format(date,hour,minute,second)
return value
def date2millisecond(self,date):
#date = '2019-06-02 15:30:00'
date = datetime.datetime.strptime(date,'%Y-%m-%d %H:%M:%S')
date = date + datetime.timedelta(minutes = 1)
second = date - datetime.datetime(1970,1,1,0,0,0)
second = second.days*24*60*60+second.seconds
#ms = ms*1000
return second
def create_date(self,start,today = False):# start = '2018-07-31'
start = datetime.datetime.strptime( start,"%Y-%m-%d").date() + datetime.timedelta(days = 1)
end = datetime.date.today()
day_len = (end - start).days
if today : day_len = (end - start).days + 1
date = [ str( start + datetime.timedelta(days = dat) ) for dat in range(day_len) ]
return date
def remove_outlier(self,data,var_name):
value = list( data[var_name] )
mean = np.mean(value, axis=0)
sd = np.std(value, axis=0)
if sd<1:
return data
_bool = []
for x in value:
if (5*mean) > x > (-5*mean):
_bool.append(True)
else:
_bool.append(False)
data = data[_bool]
return data
def change_chinese_date_us(d):
y,m,d = [ int(x) for x in d.split('/') ]
y = y + 1911
date = datetime.date(y,m,d)
return date
| [
"datetime.datetime",
"numpy.mean",
"requests.post",
"datetime.datetime.strptime",
"requests.get",
"datetime.timedelta",
"time.sleep",
"datetime.date",
"numpy.std",
"datetime.date.today",
"time.time"
] | [((4302, 4324), 'datetime.date', 'datetime.date', (['y', 'm', 'd'], {}), '(y, m, d)\n', (4315, 4324), False, 'import datetime\n'), ((118, 177), 'requests.get', 'requests.get', (['url'], {'verify': '(False)', 'timeout': '(10)', 'headers': 'header'}), '(url, verify=False, timeout=10, headers=header)\n', (130, 177), False, 'import requests\n'), ((407, 483), 'requests.post', 'requests.post', (['url'], {'verify': '(False)', 'timeout': '(10)', 'headers': 'header', 'data': 'form_data'}), '(url, verify=False, timeout=10, headers=header, data=form_data)\n', (420, 483), False, 'import requests\n'), ((3053, 3106), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(date, '%Y-%m-%d %H:%M:%S')\n", (3079, 3106), False, 'import datetime\n'), ((3513, 3534), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3532, 3534), False, 'import datetime\n'), ((3852, 3874), 'numpy.mean', 'np.mean', (['value'], {'axis': '(0)'}), '(value, axis=0)\n', (3859, 3874), True, 'import numpy as np\n'), ((3888, 3909), 'numpy.std', 'np.std', (['value'], {'axis': '(0)'}), '(value, axis=0)\n', (3894, 3909), True, 'import numpy as np\n'), ((745, 756), 'time.time', 'time.time', ([], {}), '()\n', (754, 756), False, 'import time\n'), ((1205, 1230), 'datetime.date', 'datetime.date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (1218, 1230), False, 'import datetime\n'), ((1231, 1259), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'day'}), '(days=day)\n', (1249, 1259), False, 'import datetime\n'), ((3128, 3157), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (3146, 3157), False, 'import datetime\n'), ((3184, 3222), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0, 0)\n', (3201, 3222), False, 'import datetime\n'), ((3470, 3496), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3488, 3496), False, 'import datetime\n'), ((315, 329), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (325, 329), False, 'import time\n'), ((622, 636), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (632, 636), False, 'import time\n'), ((852, 896), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (878, 896), False, 'import datetime\n'), ((927, 952), 'datetime.date', 'datetime.date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (940, 952), False, 'import datetime\n'), ((3415, 3460), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['start', '"""%Y-%m-%d"""'], {}), "(start, '%Y-%m-%d')\n", (3441, 3460), False, 'import datetime\n'), ((3663, 3691), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'dat'}), '(days=dat)\n', (3681, 3691), False, 'import datetime\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Built-in py_transforms_utils functions.
"""
import random
import numpy as np
from ..core.py_util_helpers import is_numpy, ExceptionHandler
def all_numpy(args):
""" for multi-input lambdas"""
if isinstance(args, tuple):
for value in args:
if not is_numpy(value):
return False
return True
return is_numpy(args)
def compose(transforms, *args):
"""
Compose a list of transforms and apply on the image.
Args:
img (numpy.ndarray): An image in NumPy ndarray.
transforms (list): A list of transform Class objects to be composed.
Returns:
img (numpy.ndarray), An augmented image in NumPy ndarray.
"""
if all_numpy(args):
for transform in transforms:
try:
args = transform(*args)
except Exception:
result = ExceptionHandler(where="in map(or batch) worker and execute python function")
result.reraise()
args = (args,) if not isinstance(args, tuple) else args
if all_numpy(args):
return args
raise TypeError('args should be NumPy ndarray. Got {}. Append ToTensor() to transforms.'.format(type(args)))
raise TypeError('args should be NumPy ndarray. Got {}.'.format(type(args)))
def one_hot_encoding(label, num_classes, epsilon):
"""
Apply label smoothing transformation to the input label, and make label be more smoothing and continuous.
Args:
label (numpy.ndarray): label to be applied label smoothing.
num_classes (int): Num class of object in dataset, value should over 0.
epsilon (float): The adjustable Hyper parameter. Default is 0.0.
Returns:
img (numpy.ndarray), label after being one hot encoded and done label smoothed.
"""
if label > num_classes:
raise ValueError('the num_classes is smaller than the category number.')
num_elements = label.size
one_hot_label = np.zeros((num_elements, num_classes), dtype=int)
if isinstance(label, list) is False:
label = [label]
for index in range(num_elements):
one_hot_label[index, label[index]] = 1
return (1 - epsilon) * one_hot_label + epsilon / num_classes
def random_order(img, transforms):
"""
Applies a list of transforms in a random order.
Args:
img: Image to be applied transformations in a random order.
transforms (list): List of the transformations to be applied.
Returns:
img, Transformed image.
"""
random.shuffle(transforms)
for transform in transforms:
img = transform(img)
return img
def random_apply(img, transforms, prob):
"""
Apply a list of transformation, randomly with a given probability.
Args:
img: Image to be randomly applied a list transformations.
transforms (list): List of transformations to be applied.
prob (float): The probability to apply the transformation list.
Returns:
img, Transformed image.
"""
if prob < random.random():
return img
for transform in transforms:
img = transform(img)
return img
def random_choice(img, transforms):
"""
Random selects one transform from a list of transforms and applies that on the image.
Args:
img: Image to be applied transformation.
transforms (list): List of transformations to be chosen from to apply.
Returns:
img, Transformed image.
"""
return random.choice(transforms)(img)
class FuncWrapper:
"""
Wrap function with try except logic, mainly for warping python function.
Args:
transform: Callable python function.
Returns:
result, data after apply transformation.
"""
def __init__(self, transform):
if not callable(transform):
raise ValueError("FuncWrapper only support warping callable python function.")
self.transform = transform
def __call__(self, *args):
result = None
try:
result = self.transform(*args)
except Exception:
result = ExceptionHandler(where="in map(or batch) worker and execute python function")
result.reraise()
return result
| [
"random.random",
"numpy.zeros",
"random.choice",
"random.shuffle"
] | [((2647, 2695), 'numpy.zeros', 'np.zeros', (['(num_elements, num_classes)'], {'dtype': 'int'}), '((num_elements, num_classes), dtype=int)\n', (2655, 2695), True, 'import numpy as np\n'), ((3217, 3243), 'random.shuffle', 'random.shuffle', (['transforms'], {}), '(transforms)\n', (3231, 3243), False, 'import random\n'), ((3726, 3741), 'random.random', 'random.random', ([], {}), '()\n', (3739, 3741), False, 'import random\n'), ((4179, 4204), 'random.choice', 'random.choice', (['transforms'], {}), '(transforms)\n', (4192, 4204), False, 'import random\n')] |
"""Contains an abstract class that makes it easier to implement load word
vectors from text files.
"""
__author__ = '<NAME>'
from typing import List, Dict
from dataclasses import dataclass, field, InitVar
from abc import abstractmethod, ABCMeta
import logging
from pathlib import Path
import pickle
import numpy as np
import h5py
from h5py import Dataset
from zensols.util import time
from zensols.config import Dictable
from zensols.persist import Primeable
from zensols.install import Installer, Resource
from zensols.deepnlp.embed import WordVectorModel, WordEmbedModel
from . import WordEmbedError
logger = logging.getLogger(__name__)
@dataclass
class TextWordModelMetadata(Dictable):
"""Describes a text based :class:`.WordEmbedModel`. This information in this
class is used to construct paths both text source vector file and all
generated binary files
"""
name: str = field()
"""The name of the word vector set (i.e. glove)."""
desc: str = field()
"""A descriptor about this particular word vector set (i.e. 6B)."""
dimension: int = field()
"""The dimension of the word vectors."""
n_vocab: int = field()
"""The number of words in the vocabulary."""
source_path: Path = field()
"""The path to the text file."""
sub_directory: InitVar[Path] = field(default=None)
"""The subdirectory to be appended to :obj:`self.bin_dir`, which defaults to
the directory ``bin/<description>.<dimension>``.
"""
def __post_init__(self, sub_directory: Path):
if sub_directory is None:
sub_directory = Path('bin', f'{self.desc}.{self.dimension}')
self.bin_dir = self.source_path.parent / sub_directory
self.bin_file = self.bin_dir / 'vec.dat'
self.words_file = self.bin_dir / 'words.dat'
self.idx_file = self.bin_dir / 'idx.dat'
@dataclass
class TextWordEmbedModel(WordEmbedModel, Primeable, metaclass=ABCMeta):
"""Extensions of this class read a text vectors file and compile, then write a
binary representation for fast loading.
"""
DATASET_NAME = 'vec'
"""Name of the dataset in the HD5F file."""
path: Path = field(default=None)
"""The path to the model file(s)."""
installer: Installer = field(default=None)
"""The installer used to for the text vector zip file."""
resource: Resource = field(default=None)
"""The zip resource used to find the path to the model files."""
@abstractmethod
def _get_metadata(self) -> TextWordModelMetadata:
"""Create the metadata used to construct paths both text source vector file and
all generated binary files.
"""
pass
def _install(self) -> Path:
"""Install any missing word vector models."""
self.installer()
return self.installer[self.resource]
@property
def metadata(self):
"""Return the metadata used to construct paths both text source vector file and
all generated binary files.
"""
if self.path is None and self.installer is None:
raise WordEmbedError('No path is not set')
if self.installer is not None and self.resource is None:
raise WordEmbedError("Installer given but not 'resource''")
if self.installer is not None:
self.path = self._install()
if not hasattr(self, '_metadata'):
self._metadata = self._get_metadata()
return self._metadata
def _get_model_id(self) -> str:
"""Return a string used to uniquely identify this model.
"""
meta = self.metadata
return f'{meta.name}: description={meta.desc}, dim={meta.dimension}'
def _populate_vec_lines(self, words: List[str], word2idx: Dict[str, int],
ds: Dataset):
"""Add word vectors to the h5py dataset, vocab and vocab index.
:param words: the list of vocabulary words
:param word2idx: dictionary of word to word vector index (row)
:param ds: the h5py data structure to add the word vectors
"""
meta = self.metadata
idx = 0
lc = 0
with open(meta.source_path, 'rb') as f:
for rix, ln in enumerate(f):
lc += 1
line = ln.decode().split(' ')
word = line[0]
words.append(word)
word2idx[word] = idx
idx += 1
try:
ds[rix, :] = line[1:]
except Exception as e:
raise WordEmbedError(
f'Could not parse line {lc} (word: {word}): ' +
f'{e}; line: {ln}') from e
def _write_vecs(self) -> np.ndarray:
"""Write the h5py binary files. Only when they do not exist on the files
system already are they calculated and written.
"""
meta = self.metadata
meta.bin_dir.mkdir(parents=True, exist_ok=True)
words = []
word2idx = {}
if logger.isEnabledFor(logging.INFO):
logger.info(f'writing binary vectors {meta.source_path} ' +
f'-> {meta.bin_dir}')
shape = (meta.n_vocab, meta.dimension)
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating h5py binary vec files with shape {shape}:')
meta.write_to_log(logger, logging.INFO, 1)
with time(f'wrote h5py to {meta.bin_file}'):
with h5py.File(meta.bin_file, 'w') as f:
dset: Dataset = f.create_dataset(
self.DATASET_NAME, shape, dtype='float64')
self._populate_vec_lines(words, word2idx, dset)
with open(meta.words_file, 'wb') as f:
pickle.dump(words[:], f)
with open(meta.idx_file, 'wb') as f:
pickle.dump(word2idx, f)
def _assert_binary_vecs(self):
meta = self.metadata
if not meta.bin_file.exists():
if logger.isEnabledFor(logging.INFO):
logger.info(f'wriging binary vectors to: {meta.bin_file}')
self._write_vecs()
def prime(self):
self._assert_binary_vecs()
def _create_data(self) -> WordVectorModel:
"""Read the binary bcolz, vocabulary and index files from disk.
"""
self._assert_binary_vecs()
meta = self.metadata
if logger.isEnabledFor(logging.INFO):
logger.info(f'reading binary vector file: {meta.bin_file}')
with time('loaded {cnt} vectors'):
with h5py.File(meta.bin_file, 'r') as f:
ds: Dataset = f[self.DATASET_NAME]
vectors = ds[:]
with open(meta.words_file, 'rb') as f:
words = pickle.load(f)
with open(meta.idx_file, 'rb') as f:
word2idx = pickle.load(f)
cnt = len(word2idx)
with time('prepared vectors'):
unknown_vec = np.expand_dims(np.zeros(self.dimension), axis=0)
vectors = np.concatenate((vectors, unknown_vec))
word2idx[self.UNKNOWN] = len(words)
words.append(self.UNKNOWN)
word2vec = {w: vectors[word2idx[w]] for w in words}
return WordVectorModel(vectors, word2vec, words, word2idx)
| [
"logging.getLogger",
"pickle.dump",
"pathlib.Path",
"zensols.util.time",
"pickle.load",
"zensols.deepnlp.embed.WordVectorModel",
"h5py.File",
"numpy.zeros",
"numpy.concatenate",
"dataclasses.field"
] | [((614, 641), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (631, 641), False, 'import logging\n'), ((902, 909), 'dataclasses.field', 'field', ([], {}), '()\n', (907, 909), False, 'from dataclasses import dataclass, field, InitVar\n'), ((983, 990), 'dataclasses.field', 'field', ([], {}), '()\n', (988, 990), False, 'from dataclasses import dataclass, field, InitVar\n'), ((1085, 1092), 'dataclasses.field', 'field', ([], {}), '()\n', (1090, 1092), False, 'from dataclasses import dataclass, field, InitVar\n'), ((1158, 1165), 'dataclasses.field', 'field', ([], {}), '()\n', (1163, 1165), False, 'from dataclasses import dataclass, field, InitVar\n'), ((1240, 1247), 'dataclasses.field', 'field', ([], {}), '()\n', (1245, 1247), False, 'from dataclasses import dataclass, field, InitVar\n'), ((1321, 1340), 'dataclasses.field', 'field', ([], {'default': 'None'}), '(default=None)\n', (1326, 1340), False, 'from dataclasses import dataclass, field, InitVar\n'), ((2167, 2186), 'dataclasses.field', 'field', ([], {'default': 'None'}), '(default=None)\n', (2172, 2186), False, 'from dataclasses import dataclass, field, InitVar\n'), ((2256, 2275), 'dataclasses.field', 'field', ([], {'default': 'None'}), '(default=None)\n', (2261, 2275), False, 'from dataclasses import dataclass, field, InitVar\n'), ((2364, 2383), 'dataclasses.field', 'field', ([], {'default': 'None'}), '(default=None)\n', (2369, 2383), False, 'from dataclasses import dataclass, field, InitVar\n'), ((7219, 7270), 'zensols.deepnlp.embed.WordVectorModel', 'WordVectorModel', (['vectors', 'word2vec', 'words', 'word2idx'], {}), '(vectors, word2vec, words, word2idx)\n', (7234, 7270), False, 'from zensols.deepnlp.embed import WordVectorModel, WordEmbedModel\n'), ((1596, 1640), 'pathlib.Path', 'Path', (['"""bin"""', 'f"""{self.desc}.{self.dimension}"""'], {}), "('bin', f'{self.desc}.{self.dimension}')\n", (1600, 1640), False, 'from pathlib import Path\n'), ((5418, 5456), 'zensols.util.time', 'time', (['f"""wrote h5py to {meta.bin_file}"""'], {}), "(f'wrote h5py to {meta.bin_file}')\n", (5422, 5456), False, 'from zensols.util import time\n'), ((5747, 5771), 'pickle.dump', 'pickle.dump', (['words[:]', 'f'], {}), '(words[:], f)\n', (5758, 5771), False, 'import pickle\n'), ((5829, 5853), 'pickle.dump', 'pickle.dump', (['word2idx', 'f'], {}), '(word2idx, f)\n', (5840, 5853), False, 'import pickle\n'), ((6499, 6527), 'zensols.util.time', 'time', (['"""loaded {cnt} vectors"""'], {}), "('loaded {cnt} vectors')\n", (6503, 6527), False, 'from zensols.util import time\n'), ((6891, 6915), 'zensols.util.time', 'time', (['"""prepared vectors"""'], {}), "('prepared vectors')\n", (6895, 6915), False, 'from zensols.util import time\n'), ((7014, 7052), 'numpy.concatenate', 'np.concatenate', (['(vectors, unknown_vec)'], {}), '((vectors, unknown_vec))\n', (7028, 7052), True, 'import numpy as np\n'), ((5475, 5504), 'h5py.File', 'h5py.File', (['meta.bin_file', '"""w"""'], {}), "(meta.bin_file, 'w')\n", (5484, 5504), False, 'import h5py\n'), ((6546, 6575), 'h5py.File', 'h5py.File', (['meta.bin_file', '"""r"""'], {}), "(meta.bin_file, 'r')\n", (6555, 6575), False, 'import h5py\n'), ((6740, 6754), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6751, 6754), False, 'import pickle\n'), ((6831, 6845), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6842, 6845), False, 'import pickle\n'), ((6958, 6982), 'numpy.zeros', 'np.zeros', (['self.dimension'], {}), '(self.dimension)\n', (6966, 6982), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: sinannasir
"""
import numpy as np
#import matplotlib.pyplot as plt
import project_backend as pb
import tensorflow as tf
import collections
import copy
class DDPG:
def __init__(self, options,options_policy,N,Pmax,noise_var):
tf.reset_default_graph()
self.total_samples = options['simulation']['total_samples']
self.train_episodes = options['train_episodes']
R_defined = options['simulation']['R_defined']
self.R = (2.0/np.sqrt(3))*R_defined
self.N = N
self.Pmax = Pmax
self.noise_var = noise_var
self.tmp_exp_type_1 = []
self.tmp_exp_type_2 = []
self.prev_suminterferences = np.zeros(N)
for i in range(self.N):
self.tmp_exp_type_1.append(collections.deque([],4))
self.tmp_exp_type_2.append(collections.deque([],3))
self.num_output = self.num_actions = 1 # Kumber of actions
self.discount_factor = options_policy['discount_factor']
self.N_neighbors = options_policy['N_neighbors']
self.num_input = 6 + 7 * self.N_neighbors
learning_rate_0 = options_policy['learning_rate_0_critic']
learning_rate_decay = options_policy['learning_rate_decay_critic']
learning_rate_min = options_policy['learning_rate_min_critic']
self.learning_rate_all_critic = [learning_rate_0]
for i in range(1,self.total_samples):
if i % self.train_episodes['T_train'] == 0:
self.learning_rate_all_critic.append(learning_rate_0)
else:
self.learning_rate_all_critic.append(max(learning_rate_min,learning_rate_decay*self.learning_rate_all_critic[-1]))
learning_rate_0 = options_policy['learning_rate_0_actor']
learning_rate_decay = options_policy['learning_rate_decay_actor']
learning_rate_min = options_policy['learning_rate_min_actor']
self.learning_rate_all_actor = [learning_rate_0]
for i in range(1,self.total_samples):
if i % self.train_episodes['T_train'] == 0:
self.learning_rate_all_actor.append(learning_rate_0)
else:
self.learning_rate_all_actor.append(max(learning_rate_min,learning_rate_decay*self.learning_rate_all_actor[-1]))
self.batch_size = options_policy['batch_size']
memory_per_agent = options_policy['memory_per_agent']
# epsilon greedy algorithm
max_epsilon = options_policy['max_epsilon']
epsilon_decay = options_policy['epsilon_decay']
min_epsilon = options_policy['min_epsilon']
# quasi-static target network update
self.target_update_count = options_policy['target_update_count']
self.time_slot_to_pass_weights = options_policy['time_slot_to_pass_weights'] # 50 slots needed to pass the weights
n_hidden_1 = options_policy['n_hiddens'][0]
n_hidden_2 = options_policy['n_hiddens'][1]
n_hidden_3 = options_policy['n_hiddens'][2]
scale_R_inner = options_policy['scale_R_inner']
scale_R_interf = options_policy['scale_R_interf']
scale_g_dB_R = scale_R_inner*self.R
rb = 200.0
if(scale_g_dB_R < rb):
scale_g_dB = - (128.1 + 37.6* np.log10(0.001 * scale_g_dB_R))
else:
scale_g_dB = - (128.1 + 37.6* np.log10(scale_g_dB_R/rb) + 37.6* np.log10(0.001*rb))
self.scale_gain = np.power(10.0,scale_g_dB/10.0)
self.input_placer = np.log10(self.noise_var/self.scale_gain)
scale_g_dB_inter_R = scale_R_interf * self.R
if(scale_g_dB_R < rb):
scale_g_dB_interf = - (128.1 + 37.6* np.log10(0.001 * scale_g_dB_inter_R))
else:
scale_g_dB_interf = - (128.1 + 37.6* np.log10(scale_g_dB_inter_R/rb) + 37.6* np.log10(0.001*rb))
self.scale_gain_interf = np.power(10.0,scale_g_dB_interf/10.0)
# Experience-replay memory size
self.memory_len = memory_per_agent*N
# learning rate
# epsilon greedy algorithm
self.epsilon_all=[max_epsilon]
for i in range(1,self.total_samples):
if i % self.train_episodes['T_train'] == 0:
# if int(i/self.train_episodes['T_train']) == (self.total_samples/self.train_episodes['T_train']-1):
# self.epsilon_all.append(0.0) # Test scenario
# else:
self.epsilon_all.append(max_epsilon)
else:
self.epsilon_all.append(max(min_epsilon,epsilon_decay*self.epsilon_all[-1]))
# Experience replay memory
self.memory = {}
self.memory['s'] = collections.deque([],self.memory_len+self.N)
self.memory['s_prime'] = collections.deque([],self.memory_len+self.N)
self.memory['rewards'] = collections.deque([],self.memory_len+self.N)
self.memory['actions'] = collections.deque([],self.memory_len+self.N)
self.previous_state = np.zeros((self.N,self.num_input))
self.previous_action = np.ones(self.N) * self.num_actions
# required for session to know whether dictionary is train or test
self.is_train = tf.placeholder("bool")
##
# <NAME>
self.x_s_critic = tf.placeholder("float", [None, self.num_input])
self.x_a_critic = tf.placeholder("float", [None, self.num_actions])
self.y_critic = tf.placeholder("float", [None, 1])
self.x_s_critic_target = tf.placeholder("float", [None, self.num_input])
self.x_a_critic_target = tf.placeholder("float", [None, self.num_actions])
self.y_critic_target = tf.placeholder("float", [None, 1])
with tf.name_scope("C_weights"):
self.weights_critic = pb.initial_weights (self.num_input+self.num_actions, n_hidden_1,
n_hidden_2, n_hidden_3, self.num_output)
with tf.name_scope("Ctarget_weights"):
self.weights_target_critic = pb.initial_weights (self.num_input+self.num_actions, n_hidden_1,
n_hidden_2, n_hidden_3, self.num_output)
with tf.name_scope("C_biases"):
self.biases_critic = pb.initial_biases (n_hidden_1, n_hidden_2, n_hidden_3,
self.num_output)
with tf.name_scope("Ctarget_biases"):
self.biases_target_critic = pb.initial_biases (n_hidden_1, n_hidden_2, n_hidden_3,
self.num_output)
# initialize the neural network for each agent
self.critic= pb.critic_net(self.x_s_critic,self.x_a_critic, self.weights_critic, self.biases_critic)
self.critic_target = pb.critic_net(self.x_s_critic_target,self.x_a_critic_target, self.weights_target_critic,
self.biases_target_critic)
self.action_grads_v = tf.gradients(self.critic, self.x_a_critic)
self.action_grads = [self.action_grads_v[0]]#/(tf.to_float(tf.shape(self.action_grads_v[0])[0]))]#*self.batch_size)]
# l2_regularizer_loss = 0.001*tf.reduce_sum(tf.pow(self.weights_critic['h2'],2))
self.critic_loss = tf.nn.l2_loss(self.y_critic_target - self.critic) # + l2_regularizer_loss
self.c_loss = []
self.c_loss_track = []
# self.critic_loss = tf.reduce_mean(tf.pow(self.y_critic_target- self.critic,2)) #+ l2_regularizer_loss
self.critic_learning_rate = (tf.placeholder('float'))
# self.critic_optimizer = tf.train.AdamOptimizer(self.critic_learning_rate).minimize(self.critic_loss)
self.critic_optimizer = tf.train.RMSPropOptimizer(self.critic_learning_rate, decay=0.9,
epsilon=1e-10).minimize(self.critic_loss)
# Actor Ketwork
self.x_actor = tf.placeholder("float", [None, self.num_input])
self.y_actor = tf.placeholder("float", [None, 1])
self.x_actor_agent = tf.placeholder("float", [None, self.num_input])
with tf.name_scope("A_weights"):
self.weights_actor = pb.initial_weights (self.num_input, n_hidden_1,
n_hidden_2, n_hidden_3, self.num_output)
with tf.name_scope("Aagent_weights"):
self.weights_target_actor = pb.initial_weights (self.num_input, n_hidden_1,
n_hidden_2, n_hidden_3, self.num_output)
with tf.name_scope("Abroadcast_weights"):
self.weights_tmp_actor = pb.initial_weights (self.num_input, n_hidden_1,
n_hidden_2, n_hidden_3, self.num_output)
with tf.name_scope("A_biases"):
self.biases_actor = pb.initial_biases (n_hidden_1, n_hidden_2, n_hidden_3,
self.num_output)
with tf.name_scope("Aagent_biases"):
self.biases_target_actor = pb.initial_biases (n_hidden_1, n_hidden_2, n_hidden_3,
self.num_output)
with tf.name_scope("Abroadcast_biases"):
self.biases_tmp_actor = pb.initial_biases (n_hidden_1, n_hidden_2, n_hidden_3,
self.num_output)
# initialize the neural network for each agent
self.actor= pb.actor_net(self.x_actor, self.weights_actor, self.biases_actor)
self.actor_agent = pb.actor_net(self.x_actor_agent, self.weights_target_actor,
self.biases_target_actor)
self.critic_gradient = tf.placeholder(tf.float32, [None, self.num_output])
self.actor_params = self.get_params('A_')
self.policy_gradients = tf.gradients(self.actor, self.actor_params, -self.critic_gradient)
self.actor_learning_rate = (tf.placeholder('float'))
# Adam
# self.actor_optimizer = tf.train.AdamOptimizer(self.actor_learning_rate).apply_gradients(zip(self.policy_gradients,self.actor_params))
# RMSprop algorithm used
self.actor_optimizer = tf.train.RMSPropOptimizer(self.actor_learning_rate, decay=0.9,
epsilon=1e-10).apply_gradients(zip(self.policy_gradients,self.actor_params))
self.init = tf.global_variables_initializer()
# quasi-static target update simulation counter = 0
self.saver = tf.train.Saver()
self.std = tf.placeholder("float")
self.noise = tf.random_uniform(shape = (1, 1), minval=-self.std, maxval=self.std)
def get_params(self, para_name):
sets=[]
for var in tf.trainable_variables():
if not var.name.find(para_name):
sets.append(var)
return sets
def initialize_critic_updates(self,sess): # Keed to rund this before calling quasi static.
self.saver = tf.train.Saver(tf.global_variables())
self.update_class1_critic = []
for (w,tmp_w) in zip(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='C_weights'),
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Ctarget_weights')):
self.update_class1_critic.append(tf.assign(tmp_w,w))
sess.run(self.update_class1_critic[-1])
for (b,tmp_b) in zip(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='C_biases'),
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Ctarget_biases')):
self.update_class1_critic.append(tf.assign(tmp_b,b))
sess.run(self.update_class1_critic[-1])
print('first critic update')
def initialize_actor_updates(self,sess): # Keed to rund this before calling quasi static.
self.saver = tf.train.Saver(tf.global_variables())
self.update_class1 = []
for (w,tmp_w) in zip(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='A_weights'),
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Abroadcast_weights')):
self.update_class1.append(tf.assign(tmp_w,w))
sess.run(self.update_class1[-1])
for (b,tmp_b) in zip(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='A_biases'),
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Abroadcast_biases')):
self.update_class1.append(tf.assign(tmp_b,b))
sess.run(self.update_class1[-1])
self.update_class2 = []
for (tmp_w,t_w) in zip(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Abroadcast_weights'),
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Aagent_weights')):
self.update_class2.append(tf.assign(t_w,tmp_w))
sess.run(self.update_class2[-1])
for (tmp_b,t_b) in zip(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Abroadcast_biases'),
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='Aagent_biases')):
self.update_class2.append(tf.assign(t_b,tmp_b))
sess.run(self.update_class2[-1])
self.simulation_target_update_counter = self.target_update_count
self.process_weight_update = False
self.simulation_target_pass_counter = self.time_slot_to_pass_weights
print('first update')
def check_memory_restart(self,sess,sim):
if(sim %self.train_episodes['T_train'] == 0 and sim != 0): # Restart experience replay.
self.memory = {}
self.memory['s'] = collections.deque([],self.memory_len+self.N)
self.memory['s_prime'] = collections.deque([],self.memory_len+self.N)
self.memory['rewards'] = collections.deque([],self.memory_len+self.N)
self.memory['actions'] = collections.deque([],self.memory_len+self.N)
self.previous_state = np.zeros((self.N,self.num_input))
self.previous_action = np.ones(self.N) * self.num_actions
def update_handler(self,sess,sim):
# First check whether target network has to be changed.
self.simulation_target_update_counter -= 1
# Update critic all the time after training
for update_instance in self.update_class1_critic:
sess.run(update_instance)
# Actor broadcast
if (self.simulation_target_update_counter == 0):
for update_instance in self.update_class1:
sess.run(update_instance)
self.simulation_target_update_counter = self.target_update_count
self.process_weight_update = True
if self.process_weight_update:
self.simulation_target_pass_counter -= 1
if (self.simulation_target_pass_counter <= 0):
for update_instance in self.update_class2:
sess.run(update_instance)
self.process_weight_update = False
self.simulation_target_pass_counter = self.time_slot_to_pass_weights
def act(self,sess,current_local_state,sim,actor_idx):
# for stability return something random for first 100 time slots.
if sim<500 and np.random.rand() < 0.25:
return 0.
# epsilon greedy algorithm
if np.random.rand() < self.epsilon_all[sim]:# or sum(self.previous_action>0.95)==self.N or sum(self.previous_action<0.01)==self.N:
strategy = np.random.rand()
return strategy
strategy = sess.run(self.actor_agent, feed_dict={self.x_actor_agent: current_local_state.reshape(1,self.num_input), self.is_train: False})[0][0]
return strategy
def act_noepsilon(self,sess,current_local_state,sim):
# Current QNN outputs for all available actions
return sess.run(self.actor_agent, feed_dict={self.x_actor_agent: current_local_state.reshape(1,self.num_input), self.is_train: False})[0][0]
def remember(self,agent,current_local_state,current_reward):
self.memory['s'].append(copy.copy(self.previous_state[agent,:]).reshape(self.num_input))
self.memory['s_prime'].append(copy.copy(current_local_state))
self.memory['actions'].append(copy.copy(self.previous_action[agent]))
self.memory['rewards'].append(copy.copy(current_reward))
def train(self,sess,sim):
# skip training for 100 time slots.
# if sim < 100: return
if len(self.memory['s']) >= self.batch_size+self.N:
# Minus N ensures that experience samples from previous timeslots been used
idx = np.random.randint(len(self.memory['rewards'])-self.N,size=self.batch_size)
s_prime_shaped = np.array(self.memory['s_prime'])[idx, :].reshape(self.batch_size,self.num_input)
action_t_1_batch = sess.run(self.actor_agent, feed_dict={self.x_actor_agent: s_prime_shaped})
#Q'(s_i+1,a_i+1)
q_t_1 = sess.run(self.critic_target, feed_dict={self.x_s_critic_target: s_prime_shaped,
self.x_a_critic_target: action_t_1_batch,self.is_train: False})
y_batch = np.array(self.memory['rewards'])[idx].reshape(self.batch_size,1) + self.discount_factor * q_t_1
s_shaped = np.array(self.memory['s'])[idx, :].reshape(self.batch_size,self.num_input)
(tmp,tmp_critloss) = sess.run([self.critic_optimizer, self.critic_loss], feed_dict={self.critic_learning_rate:self.learning_rate_all_critic[sim],
self.x_s_critic: s_shaped,
self.x_a_critic: np.array(self.memory['actions'])[idx].reshape(self.batch_size,self.num_actions),
self.y_critic_target: y_batch.reshape(self.batch_size,1), self.is_train: True})
self.c_loss_track.append(tmp_critloss)
if sim%100==0:
self.c_loss.append(np.mean(self.c_loss_track))
self.c_loss_track = []
# if sim%5==0:
action_for_delQ = sess.run(self.actor, feed_dict={self.x_actor:s_shaped})
del_Q_a = sess.run(self.action_grads, feed_dict={self.x_s_critic: s_shaped,
self.x_a_critic: action_for_delQ,self.is_train: False})[0]
tmp = sess.run([self.actor_optimizer], feed_dict={self.actor_learning_rate:self.learning_rate_all_actor[sim],
self.x_actor: s_shaped,
self.critic_gradient: del_Q_a, self.is_train: True})
def equalize(self,sess):
for update_instance in self.update_class1:
sess.run(update_instance)
for update_instance in self.update_class2:
sess.run(update_instance)
def save(self,sess,model_destination):
self.saver = tf.train.Saver(tf.global_variables())
save_path = self.saver.save(sess, model_destination)
print("Model saved in path: %s" % save_path)
def load(self,sess,model_destination):
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess, model_destination)
print('Model loaded from: %s' %(model_destination))
def local_state(self,sim,agent,p_strategy_all,H_all_2,neighbors,neighbors_in,sum_rate_list_distributed_policy,sims_pos_p):
current_experiences = np.zeros(self.num_input)
if(p_strategy_all[-1][agent]==0):
current_experiences[0] = 0.0
else:
current_experiences[0] = (p_strategy_all[-1][agent])/self.Pmax
current_experiences[1] = np.log10(H_all_2[sim][agent,:][agent]/self.scale_gain)
current_experiences[2] = np.log10(H_all_2[sim-1][agent,:][agent]/self.scale_gain)
current_experiences[3] = 0.5 * sum_rate_list_distributed_policy[-1].diagonal()[agent] # maximum value of sum-rate is around 10, so we wanna slightly reduce for better performance.
if(len(np.where(np.delete(p_strategy_all[-2],agent)==0)[0])!=self.N-1):
current_experiences[4] = np.log10((self.noise_var+np.matmul(np.delete(H_all_2[sim-2][agent,:],agent),
np.delete(p_strategy_all[-2],agent)))/(self.scale_gain))
else:
current_experiences[4] = self.input_placer
if(len(np.where(np.delete(p_strategy_all[-1],agent)==0)[0])!=self.N-1):
current_experiences[5] = np.log10((self.noise_var+np.matmul(np.delete(H_all_2[sim-1][agent,:],agent),
np.delete(p_strategy_all[-1],agent)))/(self.scale_gain))
else:
current_experiences[5] = self.input_placer
if(len(self.tmp_exp_type_1[agent]) == 0):
if(len(neighbors_in[-2][agent]) != 0):
self.tmp_exp_type_1[agent].append(np.log10(np.multiply(H_all_2[sim-2][agent,neighbors_in[-2][agent]],p_strategy_all[-2][neighbors_in[-2][agent]])/(self.scale_gain_interf)))
tmp_exp_type_1_index = np.argsort(self.tmp_exp_type_1[agent][-1])[::-1]
self.tmp_exp_type_1[agent][-1] = self.tmp_exp_type_1[agent][-1][tmp_exp_type_1_index]
self.tmp_exp_type_1[agent].append(0.5 * sum_rate_list_distributed_policy[-2].diagonal()[neighbors_in[-2][agent]][tmp_exp_type_1_index])
else:
self.tmp_exp_type_1[agent].append(np.array([]))
self.tmp_exp_type_1[agent].append(np.array([]))
# Append negative numbers if needed
if (len(self.tmp_exp_type_1[agent][-2]) < self.N_neighbors):
self.tmp_exp_type_1[agent][-2] = np.append(self.tmp_exp_type_1[agent][-2],(self.N_neighbors - len(self.tmp_exp_type_1[agent][-2]))*[self.input_placer])
self.tmp_exp_type_1[agent][-1] = np.append(self.tmp_exp_type_1[agent][-1],(self.N_neighbors - len(self.tmp_exp_type_1[agent][-1]))*[self.input_placer])
if(len(neighbors_in[-1][agent]) != 0):
self.tmp_exp_type_1[agent].append(np.log10(np.multiply(H_all_2[sim-1][agent,neighbors_in[-1][agent]],p_strategy_all[-1][neighbors_in[-1][agent]])/(self.scale_gain_interf)))
tmp_exp_type_1_index = np.argsort(self.tmp_exp_type_1[agent][-1])[::-1]
self.tmp_exp_type_1[agent][-1] = self.tmp_exp_type_1[agent][-1][tmp_exp_type_1_index]
self.tmp_exp_type_1[agent].append(0.5 * sum_rate_list_distributed_policy[-1].diagonal()[neighbors_in[-1][agent]][tmp_exp_type_1_index])
else:
self.tmp_exp_type_1[agent].append(np.array([]))
self.tmp_exp_type_1[agent].append(np.array([]))
# Append negative numbers if needed
if (len(self.tmp_exp_type_1[agent][-2]) < self.N_neighbors):
self.tmp_exp_type_1[agent][-2] = np.append(self.tmp_exp_type_1[agent][-2],(self.N_neighbors - len(self.tmp_exp_type_1[agent][-2]))*[self.input_placer])
self.tmp_exp_type_1[agent][-1] = np.append(self.tmp_exp_type_1[agent][-1],(self.N_neighbors - len(self.tmp_exp_type_1[agent][-1]))*[-1])
current_experiences[(6 + 0 * self.N_neighbors):(6 + 1 * self.N_neighbors)] = self.tmp_exp_type_1[agent][-1][:self.N_neighbors]
current_experiences[(6 + 1 * self.N_neighbors):(6 + 2 * self.N_neighbors)] = self.tmp_exp_type_1[agent][-2][:self.N_neighbors]
current_experiences[(6 + 2 * self.N_neighbors):(6 + 3 * self.N_neighbors)] = self.tmp_exp_type_1[agent][-3][:self.N_neighbors]
current_experiences[(6 + 3 * self.N_neighbors):(6 + 4 * self.N_neighbors)] = self.tmp_exp_type_1[agent][-4][:self.N_neighbors]
current_experiences[(6 + 4 * self.N_neighbors):(6 + 5 * self.N_neighbors)] = current_experiences[(6 + 4 * self.N_neighbors):(6 + 5 * self.N_neighbors)] + self.input_placer
current_experiences[(6 + 5 * self.N_neighbors):(6 + 6 * self.N_neighbors)] = current_experiences[(6 + 5 * self.N_neighbors):(6 + 6 * self.N_neighbors)] + self.input_placer
current_experiences[(6 + 6 * self.N_neighbors):(6 + 7 * self.N_neighbors)] = current_experiences[(6 + 6 * self.N_neighbors):(6 + 7 * self.N_neighbors)] + self.input_placer
if(len(neighbors[-1][agent])>0 and p_strategy_all[-1][agent] != 0):
self.tmp_exp_type_2[agent].append(np.log10(H_all_2[sim-1][np.array(neighbors[-1][agent]),agent]/self.prev_suminterferences[neighbors[-1][agent]]))
tmp_exp_type_2_index = np.argsort(self.tmp_exp_type_2[agent][-1])[::-1]
self.tmp_exp_type_2[agent][-1] = self.tmp_exp_type_2[agent][-1][tmp_exp_type_2_index]
self.tmp_exp_type_2[agent].append(np.log10((H_all_2[sim-1].diagonal()[np.array(neighbors[-1][agent])])/self.scale_gain))
self.tmp_exp_type_2[agent][-1] = self.tmp_exp_type_2[agent][-1][tmp_exp_type_2_index]
self.tmp_exp_type_2[agent].append(0.5 * sum_rate_list_distributed_policy[-1].diagonal()[neighbors[-1][agent]][tmp_exp_type_2_index])
if (len(self.tmp_exp_type_2[agent][-2]) < self.N_neighbors):
self.tmp_exp_type_2[agent][-1] = np.append(self.tmp_exp_type_2[agent][-1],(self.N_neighbors - len(self.tmp_exp_type_2[agent][-1]))*[self.input_placer])
self.tmp_exp_type_2[agent][-2] = np.append(self.tmp_exp_type_2[agent][-2],(self.N_neighbors - len(self.tmp_exp_type_2[agent][-2]))*[self.input_placer])
self.tmp_exp_type_2[agent][-3] = np.append(self.tmp_exp_type_2[agent][-3],(self.N_neighbors - len(self.tmp_exp_type_2[agent][-3]))*[self.input_placer])
current_experiences[(6 + 4 * self.N_neighbors):(6 + 5 * self.N_neighbors)] = self.tmp_exp_type_2[agent][-3][:self.N_neighbors]
current_experiences[(6 + 5 * self.N_neighbors):(6 + 6 * self.N_neighbors)] = self.tmp_exp_type_2[agent][-2][:self.N_neighbors]
current_experiences[(6 + 6 * self.N_neighbors):(6 + 7 * self.N_neighbors)] = self.tmp_exp_type_2[agent][-1][:self.N_neighbors]
elif(sims_pos_p[agent]>0):
sim_pos_p = sims_pos_p[agent]
self.tmp_exp_type_2[agent].append(np.log10(H_all_2[sim_pos_p-1][np.array(neighbors[-1][agent]),agent]/self.prev_suminterferences[neighbors[-1][agent]]))
tmp_exp_type_2_index = np.argsort(self.tmp_exp_type_2[agent][-1])[::-1]
self.tmp_exp_type_2[agent][-1] = self.tmp_exp_type_2[agent][-1][tmp_exp_type_2_index]
self.tmp_exp_type_2[agent].append(np.log10((H_all_2[sim-1].diagonal()[np.array(neighbors[-1][agent])])/self.scale_gain))
self.tmp_exp_type_2[agent][-1] = self.tmp_exp_type_2[agent][-1][tmp_exp_type_2_index]
self.tmp_exp_type_2[agent].append(0.5 * sum_rate_list_distributed_policy[-1].diagonal()[neighbors[-1][agent]][tmp_exp_type_2_index])
if (len(self.tmp_exp_type_2[agent][-2]) < self.N_neighbors):
self.tmp_exp_type_2[agent][-1] = np.append(self.tmp_exp_type_2[agent][-1],(self.N_neighbors - len(self.tmp_exp_type_2[agent][-1]))*[self.input_placer])
self.tmp_exp_type_2[agent][-2] = np.append(self.tmp_exp_type_2[agent][-2],(self.N_neighbors - len(self.tmp_exp_type_2[agent][-2]))*[self.input_placer])
self.tmp_exp_type_2[agent][-3] = np.append(self.tmp_exp_type_2[agent][-3],(self.N_neighbors - len(self.tmp_exp_type_2[agent][-3]))*[self.input_placer])
current_experiences[(6 + 4 * self.N_neighbors):(6 + 5 * self.N_neighbors)] = self.tmp_exp_type_2[agent][-3][:self.N_neighbors]
current_experiences[(6 + 5 * self.N_neighbors):(6 + 6 * self.N_neighbors)] = self.tmp_exp_type_2[agent][-2][:self.N_neighbors]
current_experiences[(6 + 6 * self.N_neighbors):(6 + 7 * self.N_neighbors)] = self.tmp_exp_type_2[agent][-1][:self.N_neighbors]
return current_experiences
| [
"numpy.log10",
"numpy.sqrt",
"numpy.random.rand",
"tensorflow.gradients",
"numpy.argsort",
"numpy.array",
"copy.copy",
"project_backend.initial_biases",
"numpy.mean",
"numpy.multiply",
"collections.deque",
"project_backend.critic_net",
"tensorflow.placeholder",
"numpy.delete",
"tensorflo... | [((274, 298), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (296, 298), True, 'import tensorflow as tf\n'), ((704, 715), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (712, 715), True, 'import numpy as np\n'), ((3443, 3476), 'numpy.power', 'np.power', (['(10.0)', '(scale_g_dB / 10.0)'], {}), '(10.0, scale_g_dB / 10.0)\n', (3451, 3476), True, 'import numpy as np\n'), ((3502, 3544), 'numpy.log10', 'np.log10', (['(self.noise_var / self.scale_gain)'], {}), '(self.noise_var / self.scale_gain)\n', (3510, 3544), True, 'import numpy as np\n'), ((3870, 3910), 'numpy.power', 'np.power', (['(10.0)', '(scale_g_dB_interf / 10.0)'], {}), '(10.0, scale_g_dB_interf / 10.0)\n', (3878, 3910), True, 'import numpy as np\n'), ((4679, 4726), 'collections.deque', 'collections.deque', (['[]', '(self.memory_len + self.N)'], {}), '([], self.memory_len + self.N)\n', (4696, 4726), False, 'import collections\n'), ((4757, 4804), 'collections.deque', 'collections.deque', (['[]', '(self.memory_len + self.N)'], {}), '([], self.memory_len + self.N)\n', (4774, 4804), False, 'import collections\n'), ((4835, 4882), 'collections.deque', 'collections.deque', (['[]', '(self.memory_len + self.N)'], {}), '([], self.memory_len + self.N)\n', (4852, 4882), False, 'import collections\n'), ((4913, 4960), 'collections.deque', 'collections.deque', (['[]', '(self.memory_len + self.N)'], {}), '([], self.memory_len + self.N)\n', (4930, 4960), False, 'import collections\n'), ((4997, 5031), 'numpy.zeros', 'np.zeros', (['(self.N, self.num_input)'], {}), '((self.N, self.num_input))\n', (5005, 5031), True, 'import numpy as np\n'), ((5204, 5226), 'tensorflow.placeholder', 'tf.placeholder', (['"""bool"""'], {}), "('bool')\n", (5218, 5226), True, 'import tensorflow as tf\n'), ((5290, 5337), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, self.num_input]'], {}), "('float', [None, self.num_input])\n", (5304, 5337), True, 'import tensorflow as tf\n'), ((5364, 5413), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, self.num_actions]'], {}), "('float', [None, self.num_actions])\n", (5378, 5413), True, 'import tensorflow as tf\n'), ((5438, 5472), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 1]'], {}), "('float', [None, 1])\n", (5452, 5472), True, 'import tensorflow as tf\n'), ((5506, 5553), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, self.num_input]'], {}), "('float', [None, self.num_input])\n", (5520, 5553), True, 'import tensorflow as tf\n'), ((5587, 5636), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, self.num_actions]'], {}), "('float', [None, self.num_actions])\n", (5601, 5636), True, 'import tensorflow as tf\n'), ((5668, 5702), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 1]'], {}), "('float', [None, 1])\n", (5682, 5702), True, 'import tensorflow as tf\n'), ((6650, 6743), 'project_backend.critic_net', 'pb.critic_net', (['self.x_s_critic', 'self.x_a_critic', 'self.weights_critic', 'self.biases_critic'], {}), '(self.x_s_critic, self.x_a_critic, self.weights_critic, self.\n biases_critic)\n', (6663, 6743), True, 'import project_backend as pb\n'), ((6767, 6888), 'project_backend.critic_net', 'pb.critic_net', (['self.x_s_critic_target', 'self.x_a_critic_target', 'self.weights_target_critic', 'self.biases_target_critic'], {}), '(self.x_s_critic_target, self.x_a_critic_target, self.\n weights_target_critic, self.biases_target_critic)\n', (6780, 6888), True, 'import project_backend as pb\n'), ((6966, 7008), 'tensorflow.gradients', 'tf.gradients', (['self.critic', 'self.x_a_critic'], {}), '(self.critic, self.x_a_critic)\n', (6978, 7008), True, 'import tensorflow as tf\n'), ((7259, 7308), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.y_critic_target - self.critic)'], {}), '(self.y_critic_target - self.critic)\n', (7272, 7308), True, 'import tensorflow as tf\n'), ((7547, 7570), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (7561, 7570), True, 'import tensorflow as tf\n'), ((7924, 7971), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, self.num_input]'], {}), "('float', [None, self.num_input])\n", (7938, 7971), True, 'import tensorflow as tf\n'), ((7995, 8029), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 1]'], {}), "('float', [None, 1])\n", (8009, 8029), True, 'import tensorflow as tf\n'), ((8059, 8106), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, self.num_input]'], {}), "('float', [None, self.num_input])\n", (8073, 8106), True, 'import tensorflow as tf\n'), ((9459, 9524), 'project_backend.actor_net', 'pb.actor_net', (['self.x_actor', 'self.weights_actor', 'self.biases_actor'], {}), '(self.x_actor, self.weights_actor, self.biases_actor)\n', (9471, 9524), True, 'import project_backend as pb\n'), ((9552, 9642), 'project_backend.actor_net', 'pb.actor_net', (['self.x_actor_agent', 'self.weights_target_actor', 'self.biases_target_actor'], {}), '(self.x_actor_agent, self.weights_target_actor, self.\n biases_target_actor)\n', (9564, 9642), True, 'import project_backend as pb\n'), ((9722, 9773), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.num_output]'], {}), '(tf.float32, [None, self.num_output])\n', (9736, 9773), True, 'import tensorflow as tf\n'), ((9865, 9931), 'tensorflow.gradients', 'tf.gradients', (['self.actor', 'self.actor_params', '(-self.critic_gradient)'], {}), '(self.actor, self.actor_params, -self.critic_gradient)\n', (9877, 9931), True, 'import tensorflow as tf\n'), ((9975, 9998), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (9989, 9998), True, 'import tensorflow as tf\n'), ((10430, 10463), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10461, 10463), True, 'import tensorflow as tf\n'), ((10545, 10561), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10559, 10561), True, 'import tensorflow as tf\n'), ((10590, 10613), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (10604, 10613), True, 'import tensorflow as tf\n'), ((10635, 10701), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '(1, 1)', 'minval': '(-self.std)', 'maxval': 'self.std'}), '(shape=(1, 1), minval=-self.std, maxval=self.std)\n', (10652, 10701), True, 'import tensorflow as tf\n'), ((10781, 10805), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (10803, 10805), True, 'import tensorflow as tf\n'), ((19511, 19535), 'numpy.zeros', 'np.zeros', (['self.num_input'], {}), '(self.num_input)\n', (19519, 19535), True, 'import numpy as np\n'), ((19741, 19798), 'numpy.log10', 'np.log10', (['(H_all_2[sim][agent, :][agent] / self.scale_gain)'], {}), '(H_all_2[sim][agent, :][agent] / self.scale_gain)\n', (19749, 19798), True, 'import numpy as np\n'), ((19838, 19899), 'numpy.log10', 'np.log10', (['(H_all_2[sim - 1][agent, :][agent] / self.scale_gain)'], {}), '(H_all_2[sim - 1][agent, :][agent] / self.scale_gain)\n', (19846, 19899), True, 'import numpy as np\n'), ((5062, 5077), 'numpy.ones', 'np.ones', (['self.N'], {}), '(self.N)\n', (5069, 5077), True, 'import numpy as np\n'), ((5716, 5742), 'tensorflow.name_scope', 'tf.name_scope', (['"""C_weights"""'], {}), "('C_weights')\n", (5729, 5742), True, 'import tensorflow as tf\n'), ((5778, 5888), 'project_backend.initial_weights', 'pb.initial_weights', (['(self.num_input + self.num_actions)', 'n_hidden_1', 'n_hidden_2', 'n_hidden_3', 'self.num_output'], {}), '(self.num_input + self.num_actions, n_hidden_1,\n n_hidden_2, n_hidden_3, self.num_output)\n', (5796, 5888), True, 'import project_backend as pb\n'), ((5944, 5976), 'tensorflow.name_scope', 'tf.name_scope', (['"""Ctarget_weights"""'], {}), "('Ctarget_weights')\n", (5957, 5976), True, 'import tensorflow as tf\n'), ((6020, 6130), 'project_backend.initial_weights', 'pb.initial_weights', (['(self.num_input + self.num_actions)', 'n_hidden_1', 'n_hidden_2', 'n_hidden_3', 'self.num_output'], {}), '(self.num_input + self.num_actions, n_hidden_1,\n n_hidden_2, n_hidden_3, self.num_output)\n', (6038, 6130), True, 'import project_backend as pb\n'), ((6186, 6211), 'tensorflow.name_scope', 'tf.name_scope', (['"""C_biases"""'], {}), "('C_biases')\n", (6199, 6211), True, 'import tensorflow as tf\n'), ((6246, 6316), 'project_backend.initial_biases', 'pb.initial_biases', (['n_hidden_1', 'n_hidden_2', 'n_hidden_3', 'self.num_output'], {}), '(n_hidden_1, n_hidden_2, n_hidden_3, self.num_output)\n', (6263, 6316), True, 'import project_backend as pb\n'), ((6373, 6404), 'tensorflow.name_scope', 'tf.name_scope', (['"""Ctarget_biases"""'], {}), "('Ctarget_biases')\n", (6386, 6404), True, 'import tensorflow as tf\n'), ((6447, 6517), 'project_backend.initial_biases', 'pb.initial_biases', (['n_hidden_1', 'n_hidden_2', 'n_hidden_3', 'self.num_output'], {}), '(n_hidden_1, n_hidden_2, n_hidden_3, self.num_output)\n', (6464, 6517), True, 'import project_backend as pb\n'), ((8120, 8146), 'tensorflow.name_scope', 'tf.name_scope', (['"""A_weights"""'], {}), "('A_weights')\n", (8133, 8146), True, 'import tensorflow as tf\n'), ((8181, 8273), 'project_backend.initial_weights', 'pb.initial_weights', (['self.num_input', 'n_hidden_1', 'n_hidden_2', 'n_hidden_3', 'self.num_output'], {}), '(self.num_input, n_hidden_1, n_hidden_2, n_hidden_3, self\n .num_output)\n', (8199, 8273), True, 'import project_backend as pb\n'), ((8330, 8361), 'tensorflow.name_scope', 'tf.name_scope', (['"""Aagent_weights"""'], {}), "('Aagent_weights')\n", (8343, 8361), True, 'import tensorflow as tf\n'), ((8404, 8496), 'project_backend.initial_weights', 'pb.initial_weights', (['self.num_input', 'n_hidden_1', 'n_hidden_2', 'n_hidden_3', 'self.num_output'], {}), '(self.num_input, n_hidden_1, n_hidden_2, n_hidden_3, self\n .num_output)\n', (8422, 8496), True, 'import project_backend as pb\n'), ((8553, 8588), 'tensorflow.name_scope', 'tf.name_scope', (['"""Abroadcast_weights"""'], {}), "('Abroadcast_weights')\n", (8566, 8588), True, 'import tensorflow as tf\n'), ((8628, 8720), 'project_backend.initial_weights', 'pb.initial_weights', (['self.num_input', 'n_hidden_1', 'n_hidden_2', 'n_hidden_3', 'self.num_output'], {}), '(self.num_input, n_hidden_1, n_hidden_2, n_hidden_3, self\n .num_output)\n', (8646, 8720), True, 'import project_backend as pb\n'), ((8777, 8802), 'tensorflow.name_scope', 'tf.name_scope', (['"""A_biases"""'], {}), "('A_biases')\n", (8790, 8802), True, 'import tensorflow as tf\n'), ((8836, 8906), 'project_backend.initial_biases', 'pb.initial_biases', (['n_hidden_1', 'n_hidden_2', 'n_hidden_3', 'self.num_output'], {}), '(n_hidden_1, n_hidden_2, n_hidden_3, self.num_output)\n', (8853, 8906), True, 'import project_backend as pb\n'), ((8963, 8993), 'tensorflow.name_scope', 'tf.name_scope', (['"""Aagent_biases"""'], {}), "('Aagent_biases')\n", (8976, 8993), True, 'import tensorflow as tf\n'), ((9035, 9105), 'project_backend.initial_biases', 'pb.initial_biases', (['n_hidden_1', 'n_hidden_2', 'n_hidden_3', 'self.num_output'], {}), '(n_hidden_1, n_hidden_2, n_hidden_3, self.num_output)\n', (9052, 9105), True, 'import project_backend as pb\n'), ((9162, 9196), 'tensorflow.name_scope', 'tf.name_scope', (['"""Abroadcast_biases"""'], {}), "('Abroadcast_biases')\n", (9175, 9196), True, 'import tensorflow as tf\n'), ((9235, 9305), 'project_backend.initial_biases', 'pb.initial_biases', (['n_hidden_1', 'n_hidden_2', 'n_hidden_3', 'self.num_output'], {}), '(n_hidden_1, n_hidden_2, n_hidden_3, self.num_output)\n', (9252, 9305), True, 'import project_backend as pb\n'), ((11037, 11058), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (11056, 11058), True, 'import tensorflow as tf\n'), ((11128, 11198), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""C_weights"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='C_weights')\n", (11145, 11198), True, 'import tensorflow as tf\n'), ((11212, 11288), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""Ctarget_weights"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Ctarget_weights')\n", (11229, 11288), True, 'import tensorflow as tf\n'), ((11436, 11505), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""C_biases"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='C_biases')\n", (11453, 11505), True, 'import tensorflow as tf\n'), ((11519, 11594), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""Ctarget_biases"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Ctarget_biases')\n", (11536, 11594), True, 'import tensorflow as tf\n'), ((11881, 11902), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (11900, 11902), True, 'import tensorflow as tf\n'), ((11965, 12035), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""A_weights"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='A_weights')\n", (11982, 12035), True, 'import tensorflow as tf\n'), ((12049, 12128), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""Abroadcast_weights"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Abroadcast_weights')\n", (12066, 12128), True, 'import tensorflow as tf\n'), ((12262, 12331), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""A_biases"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='A_biases')\n", (12279, 12331), True, 'import tensorflow as tf\n'), ((12345, 12423), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""Abroadcast_biases"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Abroadcast_biases')\n", (12362, 12423), True, 'import tensorflow as tf\n'), ((12591, 12670), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""Abroadcast_weights"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Abroadcast_weights')\n", (12608, 12670), True, 'import tensorflow as tf\n'), ((12684, 12759), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""Aagent_weights"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Aagent_weights')\n", (12701, 12759), True, 'import tensorflow as tf\n'), ((12897, 12975), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""Abroadcast_biases"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Abroadcast_biases')\n", (12914, 12975), True, 'import tensorflow as tf\n'), ((12989, 13063), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""Aagent_biases"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Aagent_biases')\n", (13006, 13063), True, 'import tensorflow as tf\n'), ((13602, 13649), 'collections.deque', 'collections.deque', (['[]', '(self.memory_len + self.N)'], {}), '([], self.memory_len + self.N)\n', (13619, 13649), False, 'import collections\n'), ((13684, 13731), 'collections.deque', 'collections.deque', (['[]', '(self.memory_len + self.N)'], {}), '([], self.memory_len + self.N)\n', (13701, 13731), False, 'import collections\n'), ((13766, 13813), 'collections.deque', 'collections.deque', (['[]', '(self.memory_len + self.N)'], {}), '([], self.memory_len + self.N)\n', (13783, 13813), False, 'import collections\n'), ((13848, 13895), 'collections.deque', 'collections.deque', (['[]', '(self.memory_len + self.N)'], {}), '([], self.memory_len + self.N)\n', (13865, 13895), False, 'import collections\n'), ((13940, 13974), 'numpy.zeros', 'np.zeros', (['(self.N, self.num_input)'], {}), '((self.N, self.num_input))\n', (13948, 13974), True, 'import numpy as np\n'), ((15297, 15313), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (15311, 15313), True, 'import numpy as np\n'), ((15448, 15464), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (15462, 15464), True, 'import numpy as np\n'), ((16152, 16182), 'copy.copy', 'copy.copy', (['current_local_state'], {}), '(current_local_state)\n', (16161, 16182), False, 'import copy\n'), ((16222, 16260), 'copy.copy', 'copy.copy', (['self.previous_action[agent]'], {}), '(self.previous_action[agent])\n', (16231, 16260), False, 'import copy\n'), ((16300, 16325), 'copy.copy', 'copy.copy', (['current_reward'], {}), '(current_reward)\n', (16309, 16325), False, 'import copy\n'), ((18985, 19006), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (19004, 19006), True, 'import tensorflow as tf\n'), ((19210, 19231), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (19229, 19231), True, 'import tensorflow as tf\n'), ((500, 510), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (507, 510), True, 'import numpy as np\n'), ((787, 811), 'collections.deque', 'collections.deque', (['[]', '(4)'], {}), '([], 4)\n', (804, 811), False, 'import collections\n'), ((851, 875), 'collections.deque', 'collections.deque', (['[]', '(3)'], {}), '([], 3)\n', (868, 875), False, 'import collections\n'), ((7715, 7793), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['self.critic_learning_rate'], {'decay': '(0.9)', 'epsilon': '(1e-10)'}), '(self.critic_learning_rate, decay=0.9, epsilon=1e-10)\n', (7740, 7793), True, 'import tensorflow as tf\n'), ((10223, 10300), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['self.actor_learning_rate'], {'decay': '(0.9)', 'epsilon': '(1e-10)'}), '(self.actor_learning_rate, decay=0.9, epsilon=1e-10)\n', (10248, 10300), True, 'import tensorflow as tf\n'), ((11335, 11354), 'tensorflow.assign', 'tf.assign', (['tmp_w', 'w'], {}), '(tmp_w, w)\n', (11344, 11354), True, 'import tensorflow as tf\n'), ((11641, 11660), 'tensorflow.assign', 'tf.assign', (['tmp_b', 'b'], {}), '(tmp_b, b)\n', (11650, 11660), True, 'import tensorflow as tf\n'), ((12168, 12187), 'tensorflow.assign', 'tf.assign', (['tmp_w', 'w'], {}), '(tmp_w, w)\n', (12177, 12187), True, 'import tensorflow as tf\n'), ((12463, 12482), 'tensorflow.assign', 'tf.assign', (['tmp_b', 'b'], {}), '(tmp_b, b)\n', (12472, 12482), True, 'import tensorflow as tf\n'), ((12799, 12820), 'tensorflow.assign', 'tf.assign', (['t_w', 'tmp_w'], {}), '(t_w, tmp_w)\n', (12808, 12820), True, 'import tensorflow as tf\n'), ((13103, 13124), 'tensorflow.assign', 'tf.assign', (['t_b', 'tmp_b'], {}), '(t_b, tmp_b)\n', (13112, 13124), True, 'import tensorflow as tf\n'), ((14009, 14024), 'numpy.ones', 'np.ones', (['self.N'], {}), '(self.N)\n', (14016, 14024), True, 'import numpy as np\n'), ((15204, 15220), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (15218, 15220), True, 'import numpy as np\n'), ((22366, 22408), 'numpy.argsort', 'np.argsort', (['self.tmp_exp_type_1[agent][-1]'], {}), '(self.tmp_exp_type_1[agent][-1])\n', (22376, 22408), True, 'import numpy as np\n'), ((22721, 22733), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (22729, 22733), True, 'import numpy as np\n'), ((22781, 22793), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (22789, 22793), True, 'import numpy as np\n'), ((24599, 24641), 'numpy.argsort', 'np.argsort', (['self.tmp_exp_type_2[agent][-1]'], {}), '(self.tmp_exp_type_2[agent][-1])\n', (24609, 24641), True, 'import numpy as np\n'), ((16049, 16089), 'copy.copy', 'copy.copy', (['self.previous_state[agent, :]'], {}), '(self.previous_state[agent, :])\n', (16058, 16089), False, 'import copy\n'), ((17961, 17987), 'numpy.mean', 'np.mean', (['self.c_loss_track'], {}), '(self.c_loss_track)\n', (17968, 17987), True, 'import numpy as np\n'), ((21193, 21235), 'numpy.argsort', 'np.argsort', (['self.tmp_exp_type_1[agent][-1]'], {}), '(self.tmp_exp_type_1[agent][-1])\n', (21203, 21235), True, 'import numpy as np\n'), ((21564, 21576), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (21572, 21576), True, 'import numpy as np\n'), ((21628, 21640), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (21636, 21640), True, 'import numpy as np\n'), ((26448, 26490), 'numpy.argsort', 'np.argsort', (['self.tmp_exp_type_2[agent][-1]'], {}), '(self.tmp_exp_type_2[agent][-1])\n', (26458, 26490), True, 'import numpy as np\n'), ((3274, 3304), 'numpy.log10', 'np.log10', (['(0.001 * scale_g_dB_R)'], {}), '(0.001 * scale_g_dB_R)\n', (3282, 3304), True, 'import numpy as np\n'), ((3396, 3416), 'numpy.log10', 'np.log10', (['(0.001 * rb)'], {}), '(0.001 * rb)\n', (3404, 3416), True, 'import numpy as np\n'), ((3676, 3712), 'numpy.log10', 'np.log10', (['(0.001 * scale_g_dB_inter_R)'], {}), '(0.001 * scale_g_dB_inter_R)\n', (3684, 3712), True, 'import numpy as np\n'), ((3817, 3837), 'numpy.log10', 'np.log10', (['(0.001 * rb)'], {}), '(0.001 * rb)\n', (3825, 3837), True, 'import numpy as np\n'), ((16707, 16739), 'numpy.array', 'np.array', (["self.memory['s_prime']"], {}), "(self.memory['s_prime'])\n", (16715, 16739), True, 'import numpy as np\n'), ((17314, 17340), 'numpy.array', 'np.array', (["self.memory['s']"], {}), "(self.memory['s'])\n", (17322, 17340), True, 'import numpy as np\n'), ((22201, 22311), 'numpy.multiply', 'np.multiply', (['H_all_2[sim - 1][agent, neighbors_in[-1][agent]]', 'p_strategy_all[-1][neighbors_in[-1][agent]]'], {}), '(H_all_2[sim - 1][agent, neighbors_in[-1][agent]],\n p_strategy_all[-1][neighbors_in[-1][agent]])\n', (22212, 22311), True, 'import numpy as np\n'), ((3362, 3389), 'numpy.log10', 'np.log10', (['(scale_g_dB_R / rb)'], {}), '(scale_g_dB_R / rb)\n', (3370, 3389), True, 'import numpy as np\n'), ((3777, 3810), 'numpy.log10', 'np.log10', (['(scale_g_dB_inter_R / rb)'], {}), '(scale_g_dB_inter_R / rb)\n', (3785, 3810), True, 'import numpy as np\n'), ((17182, 17214), 'numpy.array', 'np.array', (["self.memory['rewards']"], {}), "(self.memory['rewards'])\n", (17190, 17214), True, 'import numpy as np\n'), ((20107, 20143), 'numpy.delete', 'np.delete', (['p_strategy_all[-2]', 'agent'], {}), '(p_strategy_all[-2], agent)\n', (20116, 20143), True, 'import numpy as np\n'), ((20235, 20279), 'numpy.delete', 'np.delete', (['H_all_2[sim - 2][agent, :]', 'agent'], {}), '(H_all_2[sim - 2][agent, :], agent)\n', (20244, 20279), True, 'import numpy as np\n'), ((20320, 20356), 'numpy.delete', 'np.delete', (['p_strategy_all[-2]', 'agent'], {}), '(p_strategy_all[-2], agent)\n', (20329, 20356), True, 'import numpy as np\n'), ((20470, 20506), 'numpy.delete', 'np.delete', (['p_strategy_all[-1]', 'agent'], {}), '(p_strategy_all[-1], agent)\n', (20479, 20506), True, 'import numpy as np\n'), ((20598, 20642), 'numpy.delete', 'np.delete', (['H_all_2[sim - 1][agent, :]', 'agent'], {}), '(H_all_2[sim - 1][agent, :], agent)\n', (20607, 20642), True, 'import numpy as np\n'), ((20683, 20719), 'numpy.delete', 'np.delete', (['p_strategy_all[-1]', 'agent'], {}), '(p_strategy_all[-1], agent)\n', (20692, 20719), True, 'import numpy as np\n'), ((21007, 21117), 'numpy.multiply', 'np.multiply', (['H_all_2[sim - 2][agent, neighbors_in[-2][agent]]', 'p_strategy_all[-2][neighbors_in[-2][agent]]'], {}), '(H_all_2[sim - 2][agent, neighbors_in[-2][agent]],\n p_strategy_all[-2][neighbors_in[-2][agent]])\n', (21018, 21117), True, 'import numpy as np\n'), ((24870, 24900), 'numpy.array', 'np.array', (['neighbors[-1][agent]'], {}), '(neighbors[-1][agent])\n', (24878, 24900), True, 'import numpy as np\n'), ((24475, 24505), 'numpy.array', 'np.array', (['neighbors[-1][agent]'], {}), '(neighbors[-1][agent])\n', (24483, 24505), True, 'import numpy as np\n'), ((26677, 26707), 'numpy.array', 'np.array', (['neighbors[-1][agent]'], {}), '(neighbors[-1][agent])\n', (26685, 26707), True, 'import numpy as np\n'), ((17655, 17687), 'numpy.array', 'np.array', (["self.memory['actions']"], {}), "(self.memory['actions'])\n", (17663, 17687), True, 'import numpy as np\n'), ((26324, 26354), 'numpy.array', 'np.array', (['neighbors[-1][agent]'], {}), '(neighbors[-1][agent])\n', (26332, 26354), True, 'import numpy as np\n')] |
import networkx as nx
import csv
import pandas as pd
import itertools
import json
import dedupe
from itertools import combinations,product
import sys
import os
import numpy as np
from affinegap import normalizedAffineGapDistance
import simplejson
from tqdm import tqdm
import tempfile
from dedupe.clustering import cluster as dedupe_cluster
import dm_file_checker
def get_deduper_probs_and_threshold(deduper, unlabeled_data, blocked_data = None, recall_weight = 1):
if blocked_data is None:
pairs = deduper.pairs(unlabeled_data)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(deduper, blocked_data))
probs = dedupe.core.scoreDuplicates(pairs,
deduper.data_model,
deduper.classifier,
deduper.num_cores)['score']
# the memory mapped file location of the scored records
temp_filename = probs.filename
probs = probs.copy()
probs.sort()
probs = probs[::-1]
# delete the memory mapped file so it won't clog the disk
os.remove(temp_filename)
expected_dupes = np.cumsum(probs)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / np.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = np.argmax(score)
print('Maximum expected recall and precision')
print('recall: {:.2f}%'.format(recall[i]*100))
print('precision: {:.2f}%'.format(precision[i]*100))
print('With threshold: {:.2f}%'.format(probs[i]*100))
return probs, probs[i]
def get_linker_probs_and_threshold(linker, unlabeled_data_1, unlabeled_data_2, blocked_data = None, recall_weight = 1):
if blocked_data is None:
pairs = linker.pairs(unlabeled_data_1, unlabeled_data_2)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(linker, blocked_data))
probs = dedupe.core.scoreDuplicates(pairs,
linker.data_model,
linker.classifier,
linker.num_cores)['score']
# the memory mapped file location of the scored records
temp_filename = probs.filename
probs = probs.copy()
probs.sort()
probs = probs[::-1]
# delete the memory mapped file so it won't clog the disk
os.remove(temp_filename)
expected_dupes = np.cumsum(probs)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / np.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = np.argmax(score)
print('Maximum expected recall and precision')
print('recall: {:.2f}%'.format(recall[i]*100))
print('precision: {:.2f}%'.format(precision[i]*100))
print('With threshold: {:.2f}%'.format(probs[i]*100))
return probs, probs[i]
def get_model_weights(deduper_or_linker):
fields = [field.name for field in deduper_or_linker.data_model._variables]
model_weights = sorted(list(zip(fields, deduper_or_linker.classifier.weights)), key = lambda x: x[1], reverse = False)
model_weights = pd.DataFrame(model_weights, columns = ["variable", "logistic_reg_weight"])
return model_weights
def map_cluster_ids(deduper, unlabeled_data, threshold, hard_threshold = 0.0,
blocked_data = None, canonicalize = True, numeric_fields = None,
cluster_id_tag = None,
mapped_records_filepath = None,
cluster_canonical_filepath = None):
# BADLY NEED TO REFACTOR THIS
"""
Function that maps record ids to cluster ids
Parameters
----------
deduper : dedupe.Deduper
A trained instance of dedupe.
unlabeled_data : dict
The dedupe formatted data dictionary.
threshold : dedupe.Threshold
The threshold used for clustering.
hard_threshold: float
Threshold for record pair scores that will be included in the clustering
canonicalize : bool or list, default False
Option that provides the canonical records as additional columns.
Specifying a list of column names only canonicalizes those columns.
numeric_fields: list of str, default None
Specify which fields are numeric
cluster_id_tag: str, default None
Additional tag for distinguishing the cluster id of different datasets
Returns
-------
mapped_records
A dataframe storing the mapping from cluster_id to record_id
cluster_canonicals
A dataframe storing the canonical representation per cluster_id
"""
assert (hard_threshold < 1) and (hard_threshold >= 0), "hard_threshold should less than 1 at at least 0.0"
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "w", newline = "") as f:
mapped_records_header = ["record id", "cluster id", "confidence score", "cluster type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
if canonicalize:
if cluster_canonical_filepath is not None:
with open(cluster_canonical_filepath, "w", newline = "") as f:
cluster_canonical_header = [field.field for field in deduper.data_model.primary_fields]
cluster_canonical_header.append("cluster id")
writer = csv.DictWriter(f, fieldnames = cluster_canonical_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
else:
assert cluster_canonical_filepath is None, "can't have canonicalize be False if cluster_canonical_filepath exists"
# ## Clustering
if blocked_data is None:
pairs = deduper.pairs(unlabeled_data)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(deduper, blocked_data))
pair_scores = deduper.score(pairs)
pair_scores = pair_scores[pair_scores["score"] > hard_threshold]
clustered_dupes = deduper.cluster(pair_scores, threshold)
if numeric_fields is not None:
assert isinstance(numeric_fields, list)
mapped_records = []
cluster_canonicals = []
record_ids_in_clusters = []
# assign cluster ids to record ids
i = 0
print("Mapping cluster ids...")
for cluster in tqdm(clustered_dupes):
i += 1
cluster_id = "cl-{}".format(i)
if cluster_id_tag is not None:
cluster_id = "{}-{}".format(cluster_id_tag, cluster_id)
id_set, scores = cluster
if canonicalize:
cluster_data = [unlabeled_data[i] for i in id_set]
canonical_rep = get_canonical_rep(cluster_data, numeric_fields = numeric_fields)
canonical_rep["cluster id"] = cluster_id
if cluster_canonical_filepath is not None:
with open(cluster_canonical_filepath, "a") as f:
writer = csv.DictWriter(f, fieldnames = cluster_canonical_header, quoting = csv.QUOTE_ALL)
writer.writerow(canonical_rep)
else:
cluster_canonicals.append(canonical_rep)
for record_id, score in zip(id_set, scores):
record_dict = {
"record id": record_id,
"cluster id": cluster_id,
"confidence score": score,
"cluster type":'dup'
}
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "a", newline = "") as f:
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerow(record_dict)
else:
mapped_records.append(record_dict)
record_ids_in_clusters.append(record_id)
record_ids_in_clusters = set(record_ids_in_clusters)
solo_ids = list(set(unlabeled_data.keys()).difference(record_ids_in_clusters))
# assign solo ids to record ids
print("Mapping solo record ids...")
for record_id in tqdm(solo_ids):
i += 1
cluster_id = "cl-{}".format(i)
if cluster_id_tag is not None:
cluster_id = "{}-{}".format(cluster_id_tag, cluster_id)
record_dict = {
"record id":record_id,
"cluster id":cluster_id,
"confidence score":None,
"cluster type":'solo'
}
mapped_records.append(record_dict)
if mapped_records_filepath is None:
mapped_records = pd.DataFrame(mapped_records)
else:
with open(mapped_records_filepath, "a", newline = "") as f:
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerows(mapped_records)
mapped_records = None
if cluster_canonical_filepath is None:
cluster_canonicals = pd.DataFrame(cluster_canonicals)
else:
cluster_canonicals = None
# delete temporary file generated for pair_scores
try:
mmap_file = pair_scores.filename
del pair_scores
os.remove(mmap_file)
except AttributeError:
pass
if canonicalize:
return mapped_records, cluster_canonicals
else:
return mapped_records
def abs_distance(x,y):
return np.abs(x-y)
def get_canonical_rep(record_cluster, numeric_fields = None):
"""
Given a list of records within a duplicate cluster, constructs a
canonical representation of the cluster by finding canonical
values for each field
"""
canonical_rep = {}
keys = record_cluster[0].keys()
if numeric_fields is None:
numeric_fields = []
for key in keys:
key_values = []
# difference distance functions for numeric and non-numeric fields
if key in numeric_fields:
comparator = abs_distance
else:
comparator = normalizedAffineGapDistance
for record in record_cluster:
# assume non-empty values always better than empty value
# for canonical record
if record[key]:
key_values.append(record[key])
if key_values:
canonical_rep[key] = dedupe.canonical.getCentroid(key_values, comparator)
else:
canonical_rep[key] = ''
return canonical_rep
def get_linked_ids(linker, unlabeled_data_1, unlabeled_data_2, threshold, hard_threshold = 0.0, blocked_data = None,
mapped_records_filepath = None, constraint = "one-to-one"):
# BADLY NEED TO REFACTOR THIS
"""
constraint: What type of constraint to put on a join.
'one-to-one'
Every record in data_1 can match at most
one record from data_2 and every record
from data_2 can match at most one record
from data_1. This is good for when both
data_1 and data_2 are from different
sources and you are interested in
matching across the sources. If,
individually, data_1 or data_2 have many
duplicates you will not get good
matches.
'many-to-one'
Every record in data_1 can match at most
one record from data_2, but more than
one record from data_1 can match to the
same record in data_2. This is good for
when data_2 is a lookup table and data_1
is messy, such as geocoding or matching
against golden records.
'many-to-many'
Every record in data_1 can match
multiple records in data_2 and vice
versa. This is like a SQL inner join.
"""
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "w", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
## link matching
if blocked_data is None:
pairs = linker.pairs(unlabeled_data_1, unlabeled_data_2)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(linker, blocked_data))
pair_scores = linker.score(pairs)
pair_scores = pair_scores[pair_scores["score"] > hard_threshold]
assert constraint in {'one-to-one', 'many-to-one', 'many-to-many'}, (
'%s is an invalid constraint option. Valid options include '
'one-to-one, many-to-one, or many-to-many' % constraint)
if constraint == 'one-to-one':
links = linker.one_to_one(pair_scores, threshold)
elif constraint == 'many-to-one':
links = linker.many_to_one(pair_scores, threshold)
elif constraint == 'many-to-many':
links = pair_scores[pair_scores['score'] > threshold]
links = list(links)
# delete temporary file generated for pair_scores
try:
mmap_file = pair_scores.filename
del pair_scores
os.remove(mmap_file)
except AttributeError:
pass
mapped_records = []
ids_with_links_1 = []
ids_with_links_2 = []
print("Mapping linked pairs...")
for record_pair in tqdm(links):
record_ids, score = record_pair
pair_dict = {
"record id 1":record_ids[0],
"record id 2":record_ids[1],
"confidence score":score,
"link type":"dup",
}
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "a", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerow(pair_dict)
else:
mapped_records.append(pair_dict)
ids_with_links_1.append(record_ids[0])
ids_with_links_2.append(record_ids[1])
ids_with_links_1 = set(ids_with_links_1)
ids_with_links_2 = set(ids_with_links_2)
# include the records without found links
ids_without_links_1 = list(set(unlabeled_data_1.keys()).difference(ids_with_links_1))
ids_without_links_2 = list(set(unlabeled_data_2.keys()).difference(ids_with_links_2))
print("Mapping unlinked records in dataset 1...")
for record_id in tqdm(ids_without_links_1):
pair_dict = {
"record id 1":record_id,
"record id 2":None,
"confidence score":None,
"link type":"solo",
}
mapped_records.append(pair_dict)
print("Mapping unlinked records in dataset 2...")
for record_id in tqdm(ids_without_links_2):
pair_dict = {
"record id 1":None,
"record id 2":record_id,
"confidence score":None,
"link type":"solo",
}
mapped_records.append(pair_dict)
if mapped_records_filepath is None:
mapped_records = pd.DataFrame(mapped_records)
else:
with open(mapped_records_filepath, "a", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerows(mapped_records)
mapped_records = None
return mapped_records
def get_uncertain_clusters(mapped_records_df, threshold = 0.9):
cluster_means_df = mapped_records_df\
.groupby("cluster id")\
.mean()\
.sort_values(by = "confidence score", ascending = True)
cluster_means_bool = (cluster_means_df["confidence score"] < threshold)
print("There are {} clusters with mean confidence score lower than {:.1f}% threshold".format(cluster_means_bool.sum(), threshold*100))
uncertain_clusters_dict = cluster_means_df.loc[cluster_means_bool,:].to_dict()["confidence score"]
return uncertain_clusters_dict
def get_pairs_from_uncertain_clusters(mapped_records_df, labeled_id_pairs, threshold = 0.9):
assert isinstance(labeled_id_pairs, list)
uncertain_clusters = get_uncertain_clusters(mapped_records_df, threshold = threshold)
n_uncertain_clusters = len(uncertain_clusters)
nth_cluster = 0
for cluster_id, mean_conf_score in uncertain_clusters.items():
nth_cluster += 1
pairs_in_cluster = []
# get record ids in cluster
ids_in_cluster = mapped_records_df.loc[mapped_records_df["cluster id"] == cluster_id,"record id"].values.tolist()
# generating record pairs from cluster
for id_1, id_2 in combinations(ids_in_cluster, 2):
id_pair = tuple(sorted((id_1,id_2)))
# if pair is not already tagged, grab data of records
if id_pair not in labeled_id_pairs:
pairs_in_cluster.append(id_pair)
yield ids_in_cluster, pairs_in_cluster, nth_cluster, n_uncertain_clusters, mean_conf_score
def find_ids_of_labeled_data(labeled_data, unlabeled_data):
labeled_pair_ids = []
for label in labeled_data.keys():
assert label in ["distinct", "match"]
print("Finding ids for {} pairs".format(label))
data_pairs_list = labeled_data[label]
for data_pair in tqdm(data_pairs_list):
try:
# for backwards compatibility
record_1, record_2 = data_pair["__value__"]
except:
record_1, record_2 = data_pair
record_1_id = [key for key,val in unlabeled_data.items() if unlabeled_data[key] == record_1]
record_2_id = [key for key,val in unlabeled_data.items() if unlabeled_data[key] == record_2]
if len(record_1_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_1_id),record_1))
record_1_id = record_1_id[0]
if len(record_2_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_2_id),record_2))
record_2_id = record_2_id[0]
labeled_pair = {"record id 1":record_1_id, "record id 2":record_2_id, "label":label}
labeled_pair_ids.append(labeled_pair)
labeled_pair_ids = pd.DataFrame(labeled_pair_ids, dtype = "str")
return labeled_pair_ids
def find_ids_of_labeled_data_rl(labeled_data, unlabeled_data_1, unlabeled_data_2):
labeled_pair_ids = []
for label in labeled_data.keys():
assert label in ["distinct", "match"]
print("Finding ids for {} pairs".format(label))
data_pairs_list = labeled_data[label]
for data_pair in tqdm(data_pairs_list):
record_1, record_2 = data_pair
record_1_id = [key for key,val in unlabeled_data_1.items() if unlabeled_data_1[key] == record_1]
record_2_id = [key for key,val in unlabeled_data_2.items() if unlabeled_data_2[key] == record_2]
if len(record_1_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_1_id),record_1))
record_1_id = record_1_id[0]
if len(record_2_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_2_id),record_2))
record_2_id = record_2_id[0]
labeled_pair = {"record id 1":record_1_id, "record id 2":record_2_id, "label":label}
labeled_pair_ids.append(labeled_pair)
labeled_pair_ids = pd.DataFrame(labeled_pair_ids, dtype = "str")
return labeled_pair_ids
def consoleLabel_cluster_old(deduper, mapped_records_df, labeled_id_pairs, unlabeled_data, threshold = 0.9):
'''
Command line interface for presenting and labeling uncertain clusters by the user
Argument :
A deduper object
'''
finished = False
fields = [field.field for field in deduper.data_model.primary_fields]
assert len(fields) == len(list(set(fields)))
labeled_pairs = {"distinct":[], "match":[]}
uncertain_pair_generator = get_pairs_from_uncertain_clusters(mapped_records_df,
labeled_id_pairs,
threshold = threshold)
while not finished:
try:
ids_in_cluster, pairs_in_cluster, nth_cluster, n_uncertain_clusters, mean_conf_score = next(uncertain_pair_generator)
records_in_cluster = {i:unlabeled_data[i] for i in ids_in_cluster}
except StopIteration:
print("Already tagged all {} uncertain clusters.".format(n_uncertain_clusters))
print("Finished labeling")
break
print("Viewing {} out of {} uncertain clusters".format(nth_cluster, n_uncertain_clusters), file = sys.stderr)
print("Cluster contains {} records".format(len(ids_in_cluster)))
print("Mean Cluster Score {:.1f}%\n".format(mean_conf_score*100), file = sys.stderr)
for record_id, record in records_in_cluster.items():
print("Record {}".format(record_id), file=sys.stderr)
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
user_input = _prompt_records_same()
if user_input == "y":
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
labeled_pairs["match"].append(record_pair)
elif user_input == "n":
print("Reviewing pairs in cluster", file=sys.stderr)
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
for record in record_pair:
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
user_input = _prompt_records_same()
if user_input == "y":
labeled_pairs["match"].append(record_pair)
elif user_input == "n":
labeled_pairs["distinct"].append(record_pair)
elif user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
break
elif user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
deduper.markPairs(labeled_pairs)
def consoleLabel_cluster(deduper, mapped_records_df, labeled_id_pairs, unlabeled_data,
recall = 1.0, threshold = 0.9):
'''
Command line interface for presenting and labeling uncertain clusters by the user
Argument :
A deduper object
'''
finished = False
fields = [field.field for field in deduper.data_model.primary_fields]
assert len(fields) == len(list(set(fields)))
labeled_pairs = {"distinct":[], "match":[]}
uncertain_pair_generator = get_pairs_from_uncertain_clusters(mapped_records_df,
labeled_id_pairs,
threshold = threshold)
while not finished:
try:
ids_in_cluster, pairs_in_cluster, nth_cluster, n_uncertain_clusters, mean_conf_score = next(uncertain_pair_generator)
records_in_cluster = {i:unlabeled_data[i] for i in ids_in_cluster}
except StopIteration:
print("Already tagged all {} uncertain clusters.".format(n_uncertain_clusters))
print("Finished labeling")
break
print("Viewing {} out of {} uncertain clusters".format(nth_cluster, n_uncertain_clusters), file = sys.stderr)
print("Cluster contains {} records".format(len(ids_in_cluster)), file = sys.stderr)
print("Mean Cluster Score {:.1f}%\n".format(mean_conf_score*100), file = sys.stderr)
for record_id, record in records_in_cluster.items():
print("Record {}".format(record_id), file=sys.stderr)
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
user_input = _prompt_records_same()
if user_input == "y":
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
labeled_pairs["match"].append(record_pair)
labeled_id_pairs.append((id_1, id_2))
elif user_input == "n":
print("Reviewing pairs in cluster", file=sys.stderr)
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
for record in record_pair:
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
pair_user_input = _prompt_records_same()
if pair_user_input == "y":
labeled_pairs["match"].append(record_pair)
labeled_id_pairs.append((id_1,id_2))
elif pair_user_input == "n":
labeled_pairs["distinct"].append(record_pair)
labeled_id_pairs.append((id_1,id_2))
elif pair_user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
break
elif user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
if (user_input == "y") or (user_input == "n"):
deduper.markPairs(labeled_pairs)
deduper.train(recall = recall)
clustering_threshold = deduper.threshold(unlabeled_data, recall_weight=1)
mapped_records_df = map_cluster_ids(deduper, unlabeled_data, clustering_threshold, canonicalize=False)
print("Resampling uncertain clusters based on retrained model", file=sys.stderr)
labeled_pairs = {"distinct":[], "match":[]}
uncertain_pair_generator = get_pairs_from_uncertain_clusters(mapped_records_df, labeled_id_pairs, threshold = threshold)
def _prompt_records_same():
print("Do these records refer to the same thing?", file = sys.stderr)
valid_response = False
user_input = ""
valid_responses = {"y", "n", "u", "f"}
while not valid_response:
prompt = "(y)es / (n)o / (u)nsure / (f)inished"
print(prompt, file=sys.stderr)
user_input = input()
if user_input in valid_responses:
valid_response = True
return user_input
def get_clusters_from_links(links, solo_records):
assert isinstance(links, pd.Index)
assert isinstance(solo_records, pd.Index)
clusters = nx.Graph(links.tolist())
clusters = list(nx.connected_components(clusters))
clusters.extend(solo_records.tolist())
return clusters
def get_deduper_candidate_pairs(deduper, unlabeled_data):
# gets candidate pairs after indexing
candidate_records = deduper.pairs(unlabeled_data)
candidate_records = [(candidate[0][0], candidate[1][0]) for candidate in candidate_records]
candidate_records = pd.MultiIndex.from_tuples(candidate_records)
# some candidate records can be placed in more than 1 block, so let's drop duplicates
candidate_records = candidate_records.drop_duplicates()
return candidate_records
def get_linker_candidate_pairs(linker, unlabeled_data_1, unlabeled_data_2):
# gets candidate pairs after indexing
candidate_records = linker.pairs(unlabeled_data_1, unlabeled_data_2)
candidate_records = [(candidate[0][0], candidate[1][0]) for candidate in candidate_records]
candidate_records = pd.MultiIndex.from_tuples(candidate_records)
# some candidate records can be placed in more than 1 block, so let's drop duplicates
candidate_records = candidate_records.drop_duplicates()
return candidate_records
# converts multindex to format preferred by dedupe method
def convert_rl_to_dedupe_candidate_pair(candidate_pairs, unlabeled_data):
assert isinstance(candidate_pairs, pd.Index)
output = []
for rec_id_1, rec_id_2 in candidate_pairs:
# dedupe candidate pairs must be in the format (record_id, record)
candidate_1 = (rec_id_1, unlabeled_data[rec_id_1])
candidate_2 = (rec_id_2, unlabeled_data[rec_id_2])
candidate_pair = (candidate_1, candidate_2)
output.append(candidate_pair)
return output
# converts multiindex to format preferred by linker method
def convert_rl_to_linker_candidate_pair(candidate_pairs, unlabeled_data_1, unlabeled_data_2):
assert isinstance(candidate_pairs, pd.Index)
output = []
for rec_id_1, rec_id_2 in candidate_pairs:
if rec_id_1 in unlabeled_data_1.keys():
rec_data_1 = unlabeled_data_1[rec_id_1]
rec_data_2 = unlabeled_data_2[rec_id_2]
assert rec_id_1 not in unlabeled_data_2.keys(), "{} key found in both datasets. Keys must be unique".format(rec_id_1)
assert rec_id_2 not in unlabeled_data_1.keys(), "{} key found in both datasets. Keys must be unique".format(rec_id_2)
else:
rec_data_1 = unlabeled_data_2[rec_id_1]
rec_data_2 = unlabeled_data_1[rec_id_2]
assert rec_id_2 not in unlabeled_data_2.keys(), "{} found in both datasets. Keys must be unique".format(rec_id_2)
# record linker candidate pairs must be in the format (record_id, record)
candidate_1 = (rec_id_1, rec_data_1)
candidate_2 = (rec_id_2, rec_data_2)
candidate_pair = (candidate_1, candidate_2)
output.append(candidate_pair)
return output
def read_unlabeled_data_json(unlabeled_data_filepath, empty_str_to_none = True, numeric_fields = None):
with open(unlabeled_data_filepath, "r") as json_file:
unlabeled_data = json.load(json_file)
unlabeled_data = pd.DataFrame.from_dict(unlabeled_data, orient = "index")
if numeric_fields is not None:
assert isinstance(numeric_fields, list)
for col in numeric_fields:
unlabeled_data[col] = unlabeled_data[col].apply(lambda x: x if x == "" else float(x))
if empty_str_to_none:
for col in unlabeled_data.columns.tolist():
empty_str_bool = (unlabeled_data[col] == "")
print("converting {} empty string values of column {} to None".format(empty_str_bool.sum(), col))
unlabeled_data.loc[empty_str_bool,col] = None
# converting NaNs of numeric columns (NaNs introduced because of the previous line) to None
if numeric_fields is not None:
for col in numeric_fields:
not_nan_bool = unlabeled_data[col].notnull()
print("converting {} NaN values of column {} to None".format((~not_nan_bool).sum(), col))
unlabeled_data[col] = unlabeled_data[col].where((not_nan_bool), None)
unlabeled_data = unlabeled_data.to_dict(orient = "index")
return unlabeled_data
def write_canonical_w_solo_unlabeled_data(canonicals_df, mapped_records_df, unlabeled_data,
canonical_w_solo_unlabeled_filepath):
# will be used for post cluster review, specifically on matching solos to clusters and merging clusters
# those two steps are based on the cluster canonicals
# remember to read in this written file using read_unlabeled_data_json later on
canonical_w_solo_data = canonicals_df.set_index("cluster id")\
.to_dict(orient = "index")
mapped_records_df = mapped_records_df.set_index("record id")
solo_records = mapped_records_df.loc[mapped_records_df["cluster type"] == "solo",:]\
.index.tolist()
for record_id in solo_records:
record = unlabeled_data[record_id]
cluster_id = mapped_records_df.loc[record_id,"cluster id"]
canonical_w_solo_data[cluster_id] = record
with open(canonical_w_solo_unlabeled_filepath, 'w') as outfile:
json.dump(canonical_w_solo_data, outfile)
def prepare_training_deduper(deduper, unlabeled_data, labeled_data_filepath, blocked_proportion = 0.5, sample_size = 15_000):
# If we have training data saved from a previous run of dedupe,
# look for it and load it in.
# __Note:__ if you want to train from scratch, delete the labeled_data_filepath
if os.path.exists(labeled_data_filepath):
print('reading labeled examples from ', labeled_data_filepath)
with open(labeled_data_filepath, 'rb') as labeled_data:
deduper.prepare_training(data = unlabeled_data, training_file = labeled_data,
blocked_proportion = blocked_proportion,
sample_size = sample_size)
else:
deduper.prepare_training(data = unlabeled_data, blocked_proportion = blocked_proportion,
sample_size = sample_size)
def save_trained_deduper(deduper, labeled_data_filepath, settings_filepath):
# When finished, save our training to disk
with open(labeled_data_filepath, 'w') as tf:
deduper.write_training(tf)
# Save our weights and predicates to disk. If the settings file
# exists, we will skip all the training and learning next time we run
# this file.
with open(settings_filepath, 'wb') as sf:
deduper.write_settings(sf)
def prepare_training_linker(linker, unlabeled_data_1, unlabeled_data_2, labeled_data_filepath, blocked_proportion = 0.5, sample_size = 15_000):
# If we have training data saved from a previous run of linker,
# look for it and load it in.
# __Note:__ if you want to train from scratch, delete the labeled_data_filepath
if os.path.exists(labeled_data_filepath):
print('reading labeled examples from ', labeled_data_filepath)
with open(labeled_data_filepath, 'rb') as labeled_data:
linker.prepare_training(data_1 = unlabeled_data_1, data_2 = unlabeled_data_2,
training_file = labeled_data,
blocked_proportion = blocked_proportion,
sample_size = sample_size)
else:
linker.prepare_training(data_1 = unlabeled_data_1, data_2 = unlabeled_data_2,
blocked_proportion = blocked_proportion,
sample_size = sample_size)
def save_trained_linker(linker, labeled_data_filepath, settings_filepath):
# When finished, save our training to disk
with open(labeled_data_filepath, 'w') as tf:
linker.write_training(tf)
# Save our weights and predicates to disk. If the settings file
# exists, we will skip all the training and learning next time we run
# this file.
with open(settings_filepath, 'wb') as sf:
linker.write_settings(sf)
def get_data_of_labeled_pairs(labeled_pairs_df, unlabeled_data):
df = pd.DataFrame.from_dict(unlabeled_data, orient = "index")
df_left = df.loc[labeled_pairs_df["record id 1"],:]
df_left.columns = ["{}_1".format(col) for col in df_left.columns]
df_left.index.name = "record id 1"
df_left = df_left.reset_index()
df_right = df.loc[labeled_pairs_df["record id 2"],:]
df_right.columns = ["{}_2".format(col) for col in df_right.columns]
df_right.index.name = "record id 2"
df_right = df_right.reset_index()
output = pd.concat([df_left, df_right], axis = 1, sort = False)
# sort columns
output = output.sort_index(axis = 1)
output = output.set_index(["record id 1", "record id 2"])
label_df = labeled_pairs_df.set_index(["record id 1", "record id 2"])
output = pd.merge(left = label_df, right = output, left_index = True, right_index = True, how = "inner")
return output
def get_deduped_data(mapped_records_df, canonicals_df, unlabeled_data, none_to_empty_str = True):
mapped_records_df = mapped_records_df.set_index("record id")
solo_record_ids = mapped_records_df.loc[mapped_records_df["cluster type"] == "solo","cluster id"].to_dict()
deduped_data = {cluster_id:unlabeled_data[record_id] for record_id,cluster_id in solo_record_ids.items()}
deduped_data = pd.DataFrame.from_dict(deduped_data, orient = "index")
deduped_data.index.name = "cluster id"
canonicals_df = canonicals_df.set_index("cluster id")
# appending the canonicalized cluster representations to the solo records
deduped_data = deduped_data.append(canonicals_df)
if none_to_empty_str:
deduped_data = deduped_data.where((deduped_data.notnull()), "")
return deduped_data
def write_deduper_blocks(deduper, unlabeled_data, blocks_filepath):
"""
simplify blocks by not writing the record entries, only the ids
"""
blocks = deduper.pairs(unlabeled_data)
with open(blocks_filepath, "w", newline = "") as csv_file:
writer = csv.writer(csv_file, quoting = csv.QUOTE_ALL)
header = ["block_id", "record_id"]
writer.writerow(header)
block_id = 1
for block in blocks:
"""
from dedupe source code:
Each item in a block must be a sequence of record_id, record and the
records also must be dictionaries
but we're only keeping the record_id, not the record here
"""
for record in block:
record_id, _, = record
block_entry = [block_id, record_id]
writer.writerow(block_entry)
block_id += 1
def read_deduper_blocks(unlabeled_data, blocks_filepath):
# assumes that the records are sorted by block number
current_block_id = None
block_records = []
"""
from dedupe source code:
Each item in a block must be a sequence of record_id, record, and the
records also must be dictionaries
"""
with open(blocks_filepath, "r") as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
block_id, record_id = row["block_id"], row["record_id"]
if current_block_id == block_id:
block_records.append((record_id, unlabeled_data[record_id]))
else:
if current_block_id is not None:
yield block_records
current_block_id = block_id
block_records = [(record_id, unlabeled_data[record_id])]
yield block_records
def write_linker_blocks(linker, unlabeled_data_1, unlabeled_data_2, blocks_filepath):
"""
simplify blocks by not writing the record entries, only the ids
"""
blocks = linker.pairs(unlabeled_data_1, unlabeled_data_2)
block_id = 1
with open(blocks_filepath, "w", newline = "") as csv_file:
writer = csv.writer(csv_file, quoting = csv.QUOTE_ALL)
header = ["record_set_num", "block_id", "record_id"]
writer.writerow(header)
for block in blocks:
rec_1, rec_2 = block
rec_1_id, _ = rec_1
block_entry = ["1", block_id, rec_1_id]
writer.writerow(block_entry)
rec_2_id, _ = rec_2
block_entry = ["2", block_id, rec_2_id]
writer.writerow(block_entry)
block_id += 1
def read_linker_blocks(unlabeled_data_1, unlabeled_data_2, blocks_filepath):
# assumes that the records sorted by block number
block_records = ()
block_set_1 = []
block_set_2 = []
current_block_id = None
"""
from dedupe source code:
Each block must be a made up of two sequences, (base_sequence, target_sequence)
"""
with open(blocks_filepath, "r") as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
record_set_num, block_id, record_id = row["record_set_num"], row["block_id"], row["record_id"]
if current_block_id == block_id:
if record_set_num == "1":
block_set_1.append((record_id, unlabeled_data_1[record_id]))
elif record_set_num == "2":
block_set_2.append((record_id, unlabeled_data_2[record_id]))
else:
raise ValueError("record_set_num should only be 1 or 2, but got {}".format(record_set_num))
else:
if current_block_id is not None:
block_records = (block_set_1, block_set_2)
yield block_records
current_block_id = block_id
if record_set_num == "1":
block_set_1 = [(record_id, unlabeled_data_1[record_id])]
block_set_2 = []
elif record_set_num == "2":
block_set_1 = []
block_set_2 = [(record_id, unlabeled_data_2[record_id])]
else:
raise ValueError("record_set_num should only be 1 or 2, but got {}".format(record_set_num))
block_records = (block_set_1, block_set_2)
yield block_records
def get_blocked_pairs(deduper_or_linker, blocked_data):
if isinstance(deduper_or_linker, dedupe.api.DedupeMatching):
pairs = (combinations(sorted(block), 2) for block in blocked_data)
elif isinstance(deduper_or_linker, dedupe.api.RecordLinkMatching):
pairs = (product(base, target) for base, target in blocked_data)
else:
raise ValueError("Passed not of class DedupeMatching or of RecordLinkMatching!")
return pairs
def count_blocked_pairs(deduper_or_linker, blocked_data):
candidate_records = itertools.chain.from_iterable(get_blocked_pairs(deduper_or_linker, blocked_data))
i = 0
for _ in candidate_records:
i += 1
return i
def write_training_set_from_pairs(labeled_pair_ids_df, labeled_data_filepath, unlabeled_data, unlabeled_data_2 = None):
# create a labeled training set directly for dedupe's consumption
labeled_data_train = {"distinct":[], "match":[]}
for _, row in labeled_pair_ids_df.iterrows():
rec_id_1 = row["record id 1"]
rec_id_2 = row["record id 2"]
rec_data_1 = unlabeled_data[rec_id_1]
if unlabeled_data_2 is None:
rec_data_2 = unlabeled_data[rec_id_2]
else:
rec_data_2 = unlabeled_data_2[rec_id_2]
label = row["label"]
data_entry = {
"__class__":"tuple",
"__value__":[rec_data_1, rec_data_2]
}
labeled_data_train[label].append(data_entry)
with open(labeled_data_filepath, "w") as json_file:
simplejson.dump(labeled_data_train,
json_file,
default=dedupe.serializer._to_json,
tuple_as_array=False,
ensure_ascii=True)
def get_deduped_data_for_rl(task_name, saved_files_path):
# gets deduped dataset from respective deduping for rl
dataset_name = task_name.split("-")[1]
dataset_1_name, dataset_2_name = dataset_name.split("_")
dedup_task_1 = "dedup-{}".format(dataset_1_name)
dedup_task_2 = "dedup-{}".format(dataset_2_name)
# get all filepaths
unlabeled_data_1_filepath, unlabeled_data_2_filepath = dm_file_checker.get_proper_unlabeled_data_filepath(task_name, saved_files_path)
numeric_fields_1, numeric_fields_2 = dm_file_checker.get_dataset_info(task_name, "numeric_fields", saved_files_path)
print("Numeric fields 1 are {}".format(numeric_fields_1))
print("Numeric fields 2 are {}".format(numeric_fields_2))
canonicals_1_filepath = dm_file_checker.get_filepath(dedup_task_1, "cluster_canonical", saved_files_path)
canonicals_2_filepath = dm_file_checker.get_filepath(dedup_task_2, "cluster_canonical", saved_files_path)
mapped_records_1_filepath = dm_file_checker.get_filepath(dedup_task_1, "mapped_records", saved_files_path)
mapped_records_2_filepath = dm_file_checker.get_filepath(dedup_task_2, "mapped_records", saved_files_path)
# read in data from filepaths
unlabeled_data_1 = read_unlabeled_data_json(unlabeled_data_1_filepath, empty_str_to_none = False,
numeric_fields = numeric_fields_1)
unlabeled_data_2 = read_unlabeled_data_json(unlabeled_data_2_filepath, empty_str_to_none = False,
numeric_fields = numeric_fields_2)
canonicals_1_df = pd.read_csv(canonicals_1_filepath, keep_default_na = False, low_memory = False)
canonicals_2_df = pd.read_csv(canonicals_2_filepath, keep_default_na = False, low_memory = False)
mapped_records_1_df = pd.read_csv(mapped_records_1_filepath, keep_default_na = False)
mapped_records_2_df = pd.read_csv(mapped_records_2_filepath, keep_default_na = False)
# get deduped data in dictionary form
deduped_data_1 = get_deduped_data(mapped_records_1_df, canonicals_1_df, unlabeled_data_1, none_to_empty_str = False)
deduped_data_2 = get_deduped_data(mapped_records_2_df, canonicals_2_df, unlabeled_data_2, none_to_empty_str = False)
if numeric_fields_1 is not None:
for col in numeric_fields_1:
deduped_data_1[col] = deduped_data_1[col].apply(lambda x: x if x == "" else float(x))
if numeric_fields_2 is not None:
for col in numeric_fields_2:
deduped_data_2[col] = deduped_data_2[col].apply(lambda x: x if x == "" else float(x))
for col in deduped_data_1.columns:
empty_str_bool = (deduped_data_1[col] == "")
print("in deduped data 1, converting {} empty string values of column {} to None".format(empty_str_bool.sum(), col))
deduped_data_1.loc[empty_str_bool,col] = None
for col in deduped_data_2.columns:
empty_str_bool = (deduped_data_2[col] == "")
print("in deduped data 2, converting {} empty string values of column {} to None".format(empty_str_bool.sum(), col))
deduped_data_2.loc[empty_str_bool,col] = None
# converting NaNs of numeric columns (NaNs introduced because of the previous line) to None
if numeric_fields_1 is not None:
for col in numeric_fields_1:
not_nan_bool = deduped_data_1[col].notnull()
print("in deduped data 1, converting {} NaN values of {} to None".format((~not_nan_bool).sum(), col))
deduped_data_1[col] = deduped_data_1[col].where((not_nan_bool), None)
if numeric_fields_2 is not None:
for col in numeric_fields_2:
not_nan_bool = deduped_data_2[col].notnull()
print("in deduped data 2, converting {} NaN values of {} to None".format((~not_nan_bool).sum(), col))
deduped_data_2[col] = deduped_data_2[col].where((not_nan_bool), None)
deduped_data_1 = deduped_data_1.to_dict(orient = "index")
deduped_data_2 = deduped_data_2.to_dict(orient = "index")
return deduped_data_1, deduped_data_2
# function to make sure the all record ids are prepended with the name of the dataset
def verify_rec_id_format(record_id, data_name):
if pd.isnull(record_id):
is_ok = True
else:
is_ok = (record_id.split("-")[0] == data_name)
return is_ok
# function to return all results from all record linkage results
def get_all_rl_results(rl_task_names, saved_files_path):
dupe_records = pd.DataFrame(columns = ["record id 1", "record id 2", "confidence score"])
all_records = set()
# iterate over each rl mapped file
for rl_task in rl_task_names:
data_name_1, data_name_2 = rl_task.split("-")[1].split("_")
mapped_records_filepath = dm_file_checker.get_filepath(rl_task, "mapped_records", saved_files_path)
print("Getting mapped record links from {}".format(rl_task))
mapped_records_df = pd.read_csv(mapped_records_filepath)
# make sure all record ids are prepended with the name of the dataset
ok_records_1 = mapped_records_df["record id 1"].apply(lambda x: verify_rec_id_format(x, data_name_1)).all()
ok_records_2 = mapped_records_df["record id 2"].apply(lambda x: verify_rec_id_format(x, data_name_2)).all()
assert (ok_records_1 and ok_records_2), "Record ids aren't prepended with the dataset name!"
append_dupe_records = mapped_records_df.loc[mapped_records_df["link type"] == "dup",\
["record id 1", "record id 2","confidence score"]]
dupe_records = dupe_records.append(append_dupe_records, ignore_index = True)
append_all_records = mapped_records_df.loc[:,["record id 1","record id 2"]]
append_all_records = append_all_records["record id 1"].dropna().unique().tolist() \
+ append_all_records["record id 2"].dropna().unique().tolist()
append_all_records = set(append_all_records)
all_records.update(append_all_records)
all_records = list(all_records)
pairs = dupe_records.loc[:,["record id 1", "record id 2"]]\
.apply(lambda row: (row["record id 1"], row["record id 2"]), axis = 1)\
.tolist()
n_pairs = len(pairs)
id_type = (str, 265)
pairs = np.array(pairs, dtype = id_type)
scores = dupe_records.loc[:,["confidence score"]].to_numpy(dtype = float).reshape(-1)
dtype = np.dtype([("pairs", id_type, 2),
("score", "f4", 1)])
temp_file, file_path = tempfile.mkstemp()
os.close(temp_file)
scored_pairs = np.memmap(file_path,
shape = n_pairs,
dtype = dtype)
scored_pairs["pairs"] = pairs
scored_pairs["score"] = scores
return scored_pairs, all_records
def get_fusion_probs_and_threshold(scored_pairs, recall_weight = 1):
probs = scored_pairs['score']
probs = probs.copy()
probs.sort()
probs = probs[::-1]
expected_dupes = np.cumsum(probs)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / np.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = np.argmax(score)
print('Maximum expected recall and precision')
print('recall: {:.2f}%'.format(recall[i]*100))
print('precision: {:.2f}%'.format(precision[i]*100))
print('With threshold: {:.2f}%'.format(probs[i]*100))
return probs, probs[i]
def map_cluster_fusion_ids(scored_pairs, all_records, threshold):
clustered_dupes = dedupe_cluster(scored_pairs, threshold)
mapped_records = []
record_ids_in_clusters = []
# assign cluster ids to record ids
i = 0
print("Mapping cluster ids...")
for cluster in tqdm(clustered_dupes):
i += 1
cluster_id = "fs-{}".format(i)
id_set, scores = cluster
for record_id, score in zip(id_set, scores):
record_dict = {
"record id": record_id,
"cluster id": cluster_id,
"confidence score": score,
"cluster type":'link'
}
mapped_records.append(record_dict)
record_ids_in_clusters.append(record_id)
record_ids_in_clusters = set(record_ids_in_clusters)
solo_ids = [key for key in all_records if key not in record_ids_in_clusters]
# assign solo ids to record ids
print("Mapping solo record ids...")
for record_id in tqdm(solo_ids):
i += 1
cluster_id = "fs-{}".format(i)
record_dict = {
"record id":record_id,
"cluster id":cluster_id,
"confidence score":None,
"cluster type":'solo'
}
mapped_records.append(record_dict)
mapped_records = pd.DataFrame(mapped_records)
return mapped_records
def get_all_dedup_results(rl_task_names, saved_files_path, remove_data_name_prefix = True):
all_dedup_mapped_records = pd.DataFrame()
dedup_datasets = set()
for rl_task in rl_task_names:
data_name_1, data_name_2 = rl_task.split("-")[1].split("_")
for data_name in [data_name_1, data_name_2]:
dedup_task = "dedup-{}".format(data_name)
mapped_records_filepath = dm_file_checker.get_filepath(dedup_task, "mapped_records", saved_files_path)
# replace IDs only of datasets that have undergone deduplication
if os.path.exists(mapped_records_filepath) & (data_name not in dedup_datasets):
dedup_datasets.add(data_name)
dedup_mapped_records = pd.read_csv(mapped_records_filepath)
dedup_mapped_records = dedup_mapped_records.rename(columns = {"confidence score":"dedup confidence score",
"cluster type":"dedup cluster type"})
if remove_data_name_prefix:
dedup_mapped_records["record id"] = dedup_mapped_records["record id"]\
.apply(lambda x: x.replace("{}-".format(data_name), ""))
all_dedup_mapped_records = all_dedup_mapped_records.append(dedup_mapped_records, ignore_index = True)
return all_dedup_mapped_records
def check_block_sizes(blocks):
block_sizes = []
for block in blocks:
block_size = len(block)
block_sizes.append(block_size)
block_sizes = sorted(block_sizes, reverse = True)
print("Sizes of top 10 biggest blocks are: {}".format(block_sizes[:10]))
record_pair_contributions = [int(size*(size-1)/2) for size in block_sizes[:10]]
print("Record pair contributions from top 10 biggest blocks are : {}".format(record_pair_contributions)) | [
"csv.DictWriter",
"csv.DictReader",
"pandas.read_csv",
"numpy.array",
"pandas.MultiIndex.from_tuples",
"os.remove",
"os.path.exists",
"dedupe.clustering.cluster",
"numpy.memmap",
"itertools.product",
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"numpy.dtype",
"numpy.abs",
"dm_file_c... | [((1104, 1128), 'os.remove', 'os.remove', (['temp_filename'], {}), '(temp_filename)\n', (1113, 1128), False, 'import os\n'), ((1151, 1167), 'numpy.cumsum', 'np.cumsum', (['probs'], {}), '(probs)\n', (1160, 1167), True, 'import numpy as np\n'), ((1374, 1390), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (1383, 1390), True, 'import numpy as np\n'), ((2409, 2433), 'os.remove', 'os.remove', (['temp_filename'], {}), '(temp_filename)\n', (2418, 2433), False, 'import os\n'), ((2456, 2472), 'numpy.cumsum', 'np.cumsum', (['probs'], {}), '(probs)\n', (2465, 2472), True, 'import numpy as np\n'), ((2679, 2695), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (2688, 2695), True, 'import numpy as np\n'), ((3209, 3281), 'pandas.DataFrame', 'pd.DataFrame', (['model_weights'], {'columns': "['variable', 'logistic_reg_weight']"}), "(model_weights, columns=['variable', 'logistic_reg_weight'])\n", (3221, 3281), True, 'import pandas as pd\n'), ((6478, 6499), 'tqdm.tqdm', 'tqdm', (['clustered_dupes'], {}), '(clustered_dupes)\n', (6482, 6499), False, 'from tqdm import tqdm\n'), ((8218, 8232), 'tqdm.tqdm', 'tqdm', (['solo_ids'], {}), '(solo_ids)\n', (8222, 8232), False, 'from tqdm import tqdm\n'), ((9473, 9486), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (9479, 9486), True, 'import numpy as np\n'), ((13786, 13797), 'tqdm.tqdm', 'tqdm', (['links'], {}), '(links)\n', (13790, 13797), False, 'from tqdm import tqdm\n'), ((14940, 14965), 'tqdm.tqdm', 'tqdm', (['ids_without_links_1'], {}), '(ids_without_links_1)\n', (14944, 14965), False, 'from tqdm import tqdm\n'), ((15259, 15284), 'tqdm.tqdm', 'tqdm', (['ids_without_links_2'], {}), '(ids_without_links_2)\n', (15263, 15284), False, 'from tqdm import tqdm\n'), ((18903, 18946), 'pandas.DataFrame', 'pd.DataFrame', (['labeled_pair_ids'], {'dtype': '"""str"""'}), "(labeled_pair_ids, dtype='str')\n", (18915, 18946), True, 'import pandas as pd\n'), ((20155, 20198), 'pandas.DataFrame', 'pd.DataFrame', (['labeled_pair_ids'], {'dtype': '"""str"""'}), "(labeled_pair_ids, dtype='str')\n", (20167, 20198), True, 'import pandas as pd\n'), ((28217, 28261), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['candidate_records'], {}), '(candidate_records)\n', (28242, 28261), True, 'import pandas as pd\n'), ((28754, 28798), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['candidate_records'], {}), '(candidate_records)\n', (28779, 28798), True, 'import pandas as pd\n'), ((30964, 31018), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['unlabeled_data'], {'orient': '"""index"""'}), "(unlabeled_data, orient='index')\n", (30986, 31018), True, 'import pandas as pd\n'), ((33486, 33523), 'os.path.exists', 'os.path.exists', (['labeled_data_filepath'], {}), '(labeled_data_filepath)\n', (33500, 33523), False, 'import os\n'), ((34845, 34882), 'os.path.exists', 'os.path.exists', (['labeled_data_filepath'], {}), '(labeled_data_filepath)\n', (34859, 34882), False, 'import os\n'), ((36066, 36120), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['unlabeled_data'], {'orient': '"""index"""'}), "(unlabeled_data, orient='index')\n", (36088, 36120), True, 'import pandas as pd\n'), ((36548, 36598), 'pandas.concat', 'pd.concat', (['[df_left, df_right]'], {'axis': '(1)', 'sort': '(False)'}), '([df_left, df_right], axis=1, sort=False)\n', (36557, 36598), True, 'import pandas as pd\n'), ((36813, 36902), 'pandas.merge', 'pd.merge', ([], {'left': 'label_df', 'right': 'output', 'left_index': '(True)', 'right_index': '(True)', 'how': '"""inner"""'}), "(left=label_df, right=output, left_index=True, right_index=True,\n how='inner')\n", (36821, 36902), True, 'import pandas as pd\n'), ((37337, 37389), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['deduped_data'], {'orient': '"""index"""'}), "(deduped_data, orient='index')\n", (37359, 37389), True, 'import pandas as pd\n'), ((44339, 44418), 'dm_file_checker.get_proper_unlabeled_data_filepath', 'dm_file_checker.get_proper_unlabeled_data_filepath', (['task_name', 'saved_files_path'], {}), '(task_name, saved_files_path)\n', (44389, 44418), False, 'import dm_file_checker\n'), ((44460, 44539), 'dm_file_checker.get_dataset_info', 'dm_file_checker.get_dataset_info', (['task_name', '"""numeric_fields"""', 'saved_files_path'], {}), "(task_name, 'numeric_fields', saved_files_path)\n", (44492, 44539), False, 'import dm_file_checker\n'), ((44697, 44782), 'dm_file_checker.get_filepath', 'dm_file_checker.get_filepath', (['dedup_task_1', '"""cluster_canonical"""', 'saved_files_path'], {}), "(dedup_task_1, 'cluster_canonical',\n saved_files_path)\n", (44725, 44782), False, 'import dm_file_checker\n'), ((44807, 44892), 'dm_file_checker.get_filepath', 'dm_file_checker.get_filepath', (['dedup_task_2', '"""cluster_canonical"""', 'saved_files_path'], {}), "(dedup_task_2, 'cluster_canonical',\n saved_files_path)\n", (44835, 44892), False, 'import dm_file_checker\n'), ((44926, 45004), 'dm_file_checker.get_filepath', 'dm_file_checker.get_filepath', (['dedup_task_1', '"""mapped_records"""', 'saved_files_path'], {}), "(dedup_task_1, 'mapped_records', saved_files_path)\n", (44954, 45004), False, 'import dm_file_checker\n'), ((45037, 45115), 'dm_file_checker.get_filepath', 'dm_file_checker.get_filepath', (['dedup_task_2', '"""mapped_records"""', 'saved_files_path'], {}), "(dedup_task_2, 'mapped_records', saved_files_path)\n", (45065, 45115), False, 'import dm_file_checker\n'), ((45554, 45629), 'pandas.read_csv', 'pd.read_csv', (['canonicals_1_filepath'], {'keep_default_na': '(False)', 'low_memory': '(False)'}), '(canonicals_1_filepath, keep_default_na=False, low_memory=False)\n', (45565, 45629), True, 'import pandas as pd\n'), ((45656, 45731), 'pandas.read_csv', 'pd.read_csv', (['canonicals_2_filepath'], {'keep_default_na': '(False)', 'low_memory': '(False)'}), '(canonicals_2_filepath, keep_default_na=False, low_memory=False)\n', (45667, 45731), True, 'import pandas as pd\n'), ((45767, 45828), 'pandas.read_csv', 'pd.read_csv', (['mapped_records_1_filepath'], {'keep_default_na': '(False)'}), '(mapped_records_1_filepath, keep_default_na=False)\n', (45778, 45828), True, 'import pandas as pd\n'), ((45857, 45918), 'pandas.read_csv', 'pd.read_csv', (['mapped_records_2_filepath'], {'keep_default_na': '(False)'}), '(mapped_records_2_filepath, keep_default_na=False)\n', (45868, 45918), True, 'import pandas as pd\n'), ((48171, 48191), 'pandas.isnull', 'pd.isnull', (['record_id'], {}), '(record_id)\n', (48180, 48191), True, 'import pandas as pd\n'), ((48438, 48510), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['record id 1', 'record id 2', 'confidence score']"}), "(columns=['record id 1', 'record id 2', 'confidence score'])\n", (48450, 48510), True, 'import pandas as pd\n'), ((50284, 50314), 'numpy.array', 'np.array', (['pairs'], {'dtype': 'id_type'}), '(pairs, dtype=id_type)\n', (50292, 50314), True, 'import numpy as np\n'), ((50430, 50483), 'numpy.dtype', 'np.dtype', (["[('pairs', id_type, 2), ('score', 'f4', 1)]"], {}), "([('pairs', id_type, 2), ('score', 'f4', 1)])\n", (50438, 50483), True, 'import numpy as np\n'), ((50537, 50555), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (50553, 50555), False, 'import tempfile\n'), ((50560, 50579), 'os.close', 'os.close', (['temp_file'], {}), '(temp_file)\n', (50568, 50579), False, 'import os\n'), ((50604, 50652), 'numpy.memmap', 'np.memmap', (['file_path'], {'shape': 'n_pairs', 'dtype': 'dtype'}), '(file_path, shape=n_pairs, dtype=dtype)\n', (50613, 50652), True, 'import numpy as np\n'), ((51025, 51041), 'numpy.cumsum', 'np.cumsum', (['probs'], {}), '(probs)\n', (51034, 51041), True, 'import numpy as np\n'), ((51248, 51264), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (51257, 51264), True, 'import numpy as np\n'), ((51605, 51644), 'dedupe.clustering.cluster', 'dedupe_cluster', (['scored_pairs', 'threshold'], {}), '(scored_pairs, threshold)\n', (51619, 51644), True, 'from dedupe.clustering import cluster as dedupe_cluster\n'), ((51811, 51832), 'tqdm.tqdm', 'tqdm', (['clustered_dupes'], {}), '(clustered_dupes)\n', (51815, 51832), False, 'from tqdm import tqdm\n'), ((52531, 52545), 'tqdm.tqdm', 'tqdm', (['solo_ids'], {}), '(solo_ids)\n', (52535, 52545), False, 'from tqdm import tqdm\n'), ((52852, 52880), 'pandas.DataFrame', 'pd.DataFrame', (['mapped_records'], {}), '(mapped_records)\n', (52864, 52880), True, 'import pandas as pd\n'), ((53032, 53046), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (53044, 53046), True, 'import pandas as pd\n'), ((654, 751), 'dedupe.core.scoreDuplicates', 'dedupe.core.scoreDuplicates', (['pairs', 'deduper.data_model', 'deduper.classifier', 'deduper.num_cores'], {}), '(pairs, deduper.data_model, deduper.classifier,\n deduper.num_cores)\n', (681, 751), False, 'import dedupe\n'), ((1962, 2056), 'dedupe.core.scoreDuplicates', 'dedupe.core.scoreDuplicates', (['pairs', 'linker.data_model', 'linker.classifier', 'linker.num_cores'], {}), '(pairs, linker.data_model, linker.classifier,\n linker.num_cores)\n', (1989, 2056), False, 'import dedupe\n'), ((8690, 8718), 'pandas.DataFrame', 'pd.DataFrame', (['mapped_records'], {}), '(mapped_records)\n', (8702, 8718), True, 'import pandas as pd\n'), ((9045, 9077), 'pandas.DataFrame', 'pd.DataFrame', (['cluster_canonicals'], {}), '(cluster_canonicals)\n', (9057, 9077), True, 'import pandas as pd\n'), ((9263, 9283), 'os.remove', 'os.remove', (['mmap_file'], {}), '(mmap_file)\n', (9272, 9283), False, 'import os\n'), ((13586, 13606), 'os.remove', 'os.remove', (['mmap_file'], {}), '(mmap_file)\n', (13595, 13606), False, 'import os\n'), ((15563, 15591), 'pandas.DataFrame', 'pd.DataFrame', (['mapped_records'], {}), '(mapped_records)\n', (15575, 15591), True, 'import pandas as pd\n'), ((17267, 17298), 'itertools.combinations', 'combinations', (['ids_in_cluster', '(2)'], {}), '(ids_in_cluster, 2)\n', (17279, 17298), False, 'from itertools import combinations, product\n'), ((17912, 17933), 'tqdm.tqdm', 'tqdm', (['data_pairs_list'], {}), '(data_pairs_list)\n', (17916, 17933), False, 'from tqdm import tqdm\n'), ((19303, 19324), 'tqdm.tqdm', 'tqdm', (['data_pairs_list'], {}), '(data_pairs_list)\n', (19307, 19324), False, 'from tqdm import tqdm\n'), ((27839, 27872), 'networkx.connected_components', 'nx.connected_components', (['clusters'], {}), '(clusters)\n', (27862, 27872), True, 'import networkx as nx\n'), ((30921, 30941), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (30930, 30941), False, 'import json\n'), ((33124, 33165), 'json.dump', 'json.dump', (['canonical_w_solo_data', 'outfile'], {}), '(canonical_w_solo_data, outfile)\n', (33133, 33165), False, 'import json\n'), ((38040, 38083), 'csv.writer', 'csv.writer', (['csv_file'], {'quoting': 'csv.QUOTE_ALL'}), '(csv_file, quoting=csv.QUOTE_ALL)\n', (38050, 38083), False, 'import csv\n'), ((39071, 39095), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (39085, 39095), False, 'import csv\n'), ((39907, 39950), 'csv.writer', 'csv.writer', (['csv_file'], {'quoting': 'csv.QUOTE_ALL'}), '(csv_file, quoting=csv.QUOTE_ALL)\n', (39917, 39950), False, 'import csv\n'), ((40839, 40863), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (40853, 40863), False, 'import csv\n'), ((43705, 43833), 'simplejson.dump', 'simplejson.dump', (['labeled_data_train', 'json_file'], {'default': 'dedupe.serializer._to_json', 'tuple_as_array': '(False)', 'ensure_ascii': '(True)'}), '(labeled_data_train, json_file, default=dedupe.serializer.\n _to_json, tuple_as_array=False, ensure_ascii=True)\n', (43720, 43833), False, 'import simplejson\n'), ((48714, 48787), 'dm_file_checker.get_filepath', 'dm_file_checker.get_filepath', (['rl_task', '"""mapped_records"""', 'saved_files_path'], {}), "(rl_task, 'mapped_records', saved_files_path)\n", (48742, 48787), False, 'import dm_file_checker\n'), ((48885, 48921), 'pandas.read_csv', 'pd.read_csv', (['mapped_records_filepath'], {}), '(mapped_records_filepath)\n', (48896, 48921), True, 'import pandas as pd\n'), ((5132, 5206), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'mapped_records_header', 'quoting': 'csv.QUOTE_ALL'}), '(f, fieldnames=mapped_records_header, quoting=csv.QUOTE_ALL)\n', (5146, 5206), False, 'import csv\n'), ((8818, 8892), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'mapped_records_header', 'quoting': 'csv.QUOTE_ALL'}), '(f, fieldnames=mapped_records_header, quoting=csv.QUOTE_ALL)\n', (8832, 8892), False, 'import csv\n'), ((10378, 10430), 'dedupe.canonical.getCentroid', 'dedupe.canonical.getCentroid', (['key_values', 'comparator'], {}), '(key_values, comparator)\n', (10406, 10430), False, 'import dedupe\n'), ((12484, 12558), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'mapped_records_header', 'quoting': 'csv.QUOTE_ALL'}), '(f, fieldnames=mapped_records_header, quoting=csv.QUOTE_ALL)\n', (12498, 12558), False, 'import csv\n'), ((15791, 15865), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'mapped_records_header', 'quoting': 'csv.QUOTE_ALL'}), '(f, fieldnames=mapped_records_header, quoting=csv.QUOTE_ALL)\n', (15805, 15865), False, 'import csv\n'), ((53323, 53399), 'dm_file_checker.get_filepath', 'dm_file_checker.get_filepath', (['dedup_task', '"""mapped_records"""', 'saved_files_path'], {}), "(dedup_task, 'mapped_records', saved_files_path)\n", (53351, 53399), False, 'import dm_file_checker\n'), ((5583, 5660), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'cluster_canonical_header', 'quoting': 'csv.QUOTE_ALL'}), '(f, fieldnames=cluster_canonical_header, quoting=csv.QUOTE_ALL)\n', (5597, 5660), False, 'import csv\n'), ((14271, 14345), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'mapped_records_header', 'quoting': 'csv.QUOTE_ALL'}), '(f, fieldnames=mapped_records_header, quoting=csv.QUOTE_ALL)\n', (14285, 14345), False, 'import csv\n'), ((42463, 42484), 'itertools.product', 'product', (['base', 'target'], {}), '(base, target)\n', (42470, 42484), False, 'from itertools import combinations, product\n'), ((53493, 53532), 'os.path.exists', 'os.path.exists', (['mapped_records_filepath'], {}), '(mapped_records_filepath)\n', (53507, 53532), False, 'import os\n'), ((53656, 53692), 'pandas.read_csv', 'pd.read_csv', (['mapped_records_filepath'], {}), '(mapped_records_filepath)\n', (53667, 53692), True, 'import pandas as pd\n'), ((7080, 7157), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'cluster_canonical_header', 'quoting': 'csv.QUOTE_ALL'}), '(f, fieldnames=cluster_canonical_header, quoting=csv.QUOTE_ALL)\n', (7094, 7157), False, 'import csv\n'), ((7716, 7790), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'mapped_records_header', 'quoting': 'csv.QUOTE_ALL'}), '(f, fieldnames=mapped_records_header, quoting=csv.QUOTE_ALL)\n', (7730, 7790), False, 'import csv\n')] |
#In this file we provide different methods to run attacks on different models
import torch
import numpy
import ShuffleDefense
from ModelPlus import ModelPlus
import DataManagerPytorch as DMP
import AttackWrappersRayS
import AttackWrappersAdaptiveBlackBox
import AttackWrappersSAGA
from TransformerModels import VisionTransformer, CONFIGS
import BigTransferModels
from collections import OrderedDict
#Load the ViT-L-16 and CIFAR-10 dataset
def LoadViTLAndCIFAR10():
#Basic variable and data setup
device = torch.device("cuda")
numClasses = 10
imgSize = 224
batchSize = 8
#Load the CIFAR-10 data
valLoader = DMP.GetCIFAR10Validation(imgSize, batchSize)
#Load ViT-L-16
config = CONFIGS["ViT-L_16"]
model = VisionTransformer(config, imgSize, zero_head=True, num_classes=numClasses)
dir = "Models/ViT-L_16,cifar10,run0_15K_checkpoint.bin"
dict = torch.load(dir)
model.load_state_dict(dict)
model.eval()
#Wrap the model in the ModelPlus class
modelPlus = ModelPlus("ViT-L_16", model, device, imgSizeH=imgSize, imgSizeW=imgSize, batchSize=batchSize)
return valLoader, modelPlus
#Load the shuffle defense containing ViT-L-16 and BiT-M-R101x3
#For all attacks except SAGA, vis should be false (makes the Vision tranformer return the attention weights if true)
def LoadShuffleDefenseAndCIFAR10(vis=False):
modelPlusList = []
#Basic variable and data setup
device = torch.device("cuda")
numClasses = 10
imgSize = 224
batchSize = 8
#Load the CIFAR-10 data
valLoader = DMP.GetCIFAR10Validation(imgSize, batchSize)
#Load ViT-L-16
config = CONFIGS["ViT-L_16"]
model = VisionTransformer(config, imgSize, zero_head=True, num_classes=numClasses, vis = vis)
dir = "Models/ViT-L_16,cifar10,run0_15K_checkpoint.bin"
dict = torch.load(dir)
model.load_state_dict(dict)
model.eval()
#Wrap the model in the ModelPlus class
modelPlusV = ModelPlus("ViT-L_16", model, device, imgSizeH=imgSize, imgSizeW=imgSize, batchSize=batchSize)
modelPlusList.append(modelPlusV)
#Load the BiT-M-R101x3
dirB = "Models/BiT-M-R101x3-Run0.tar"
modelB = BigTransferModels.KNOWN_MODELS["BiT-M-R101x3"](head_size=numClasses, zero_head=False)
#Get the checkpoint
checkpoint = torch.load(dirB, map_location="cpu")
#Remove module so that it will load properly
new_state_dict = OrderedDict()
for k, v in checkpoint["model"].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
#Load the dictionary
modelB.load_state_dict(new_state_dict)
modelB.eval()
#Wrap the model in the ModelPlus class
#Here we hard code the Big Transfer Model Plus class input size to 160x128 (what it was trained on)
modelBig101Plus = ModelPlus("BiT-M-R101x3", modelB, device, imgSizeH=160, imgSizeW=128, batchSize=batchSize)
modelPlusList.append(modelBig101Plus)
#Now time to build the defense
defense = ShuffleDefense.ShuffleDefense(modelPlusList, numClasses)
return valLoader, defense
#Method to do the RayS attack on a single Vision Transformers
def RaySAttackVisionTransformer():
#Load the model and dataset
valLoader, defense = LoadViTLAndCIFAR10()
#Get the clean samples
numClasses = 10
attackSampleNum = 1000
cleanLoader = DMP.GetCorrectlyIdentifiedSamplesBalancedDefense(defense, attackSampleNum, valLoader, numClasses)
#Set the attack parameters
epsMax = 0.031
queryLimit = 10000
#The next line does the actual attack on the defense
advLoader = AttackWrappersRayS.RaySAttack(defense, epsMax, queryLimit, cleanLoader)
#Check the results
robustAcc = defense.validateD(advLoader)
cleanAcc = defense.validateD(valLoader)
#Print the results
print("Queries used:", queryLimit)
print("Robust acc:", robustAcc)
print("Clean acc:", cleanAcc)
#Here we do the RayS attack on a shuffle defense comprised of two models, ViT-L-16 and BiT-M-R101x3
def RaySAttackShuffleDefense():
#Load the model and dataset
valLoader, defense = LoadShuffleDefenseAndCIFAR10()
#Get the clean samples
numClasses = 10
attackSampleNum = 1000
cleanLoader = DMP.GetCorrectlyIdentifiedSamplesBalancedDefense(defense, attackSampleNum, valLoader, numClasses)
#Set the attack parameters
epsMax = 0.031
queryLimit = 10000
#The next line does the actual attack on the defense
advLoader = AttackWrappersRayS.RaySAttack(defense, epsMax, queryLimit, cleanLoader)
#Check the results
robustAcc = defense.validateD(advLoader)
cleanAcc = defense.validateD(valLoader)
#Print the results
print("Queries used:", queryLimit)
print("Robust acc:", robustAcc)
print("Clean acc:", cleanAcc)
#Run the 100% strength adaptive attack on ViT-L-16
def AdaptiveAttackVisionTransformer():
#Corresponding tag for saving files
#First part indicates the type of defense, second part indidcates the synthetic model and last part indicates the strenght of the attack (100%)
saveTag = "ViT-L-16, ViT-32(ImageNet21K), p100"
device = torch.device("cuda")
#Attack parameters
numAttackSamples = 1000
epsForAttacks = 0.031
clipMin = 0.0
clipMax = 1.0
#Parameters of training the synthetic model
imgSize = 224
batchSize = 32
numClasses = 10
numIterations = 4
epochsPerIteration = 10
epsForAug = 0.1 #when generating synthetic data, this value is eps for FGSM used to generate synthetic data
learningRate = (3e-2) / 2 #Learning rate of the synthetic model
#Load the training dataset, validation dataset and the defense
valLoader, defense = LoadViTLAndCIFAR10()
trainLoader = DMP.GetCIFAR10Training(imgSize, batchSize)
#Get the clean data
xTest, yTest = DMP.DataLoaderToTensor(valLoader)
cleanLoader = DMP.GetCorrectlyIdentifiedSamplesBalancedDefense(defense, numAttackSamples, valLoader, numClasses)
#Create the synthetic model
syntheticDir = "Models//imagenet21k_ViT-B_32.npz"
config = CONFIGS["ViT-B_32"]
syntheticModel = VisionTransformer(config, imgSize, zero_head=True, num_classes=numClasses)
syntheticModel.load_from(numpy.load(syntheticDir))
syntheticModel.to(device)
#Do the attack
oracle = defense
dataLoaderForTraining = trainLoader
optimizerName = "sgd"
#Last line does the attack
AttackWrappersAdaptiveBlackBox.AdaptiveAttack(saveTag, device, oracle, syntheticModel, numIterations, epochsPerIteration, epsForAug, learningRate, optimizerName, dataLoaderForTraining, cleanLoader, numClasses, epsForAttacks, clipMin, clipMax)
#Run the 100% strength adaptive attack on shuffle defense
def AdaptiveAttackShuffleDefense():
#Corresponding tag for saving files
#First part indicates the type of defense, second part indidcates the synthetic model and last part indicates the strenght of the attack (100%)
saveTag = "ViT-L-16, ViT-32(ImageNet21K), p100"
device = torch.device("cuda")
#Attack parameters
numAttackSamples = 1000
epsForAttacks = 0.031
clipMin = 0.0
clipMax = 1.0
#Parameters of training the synthetic model
imgSize = 224
batchSize = 32
numClasses = 10
numIterations = 4
epochsPerIteration = 10
epsForAug = 0.1 #when generating synthetic data, this value is eps for FGSM used to generate synthetic data
learningRate = (3e-2) / 2 #Learning rate of the synthetic model
#Load the training dataset, validation dataset and the defense
valLoader, defense = LoadShuffleDefenseAndCIFAR10()
trainLoader = DMP.GetCIFAR10Training(imgSize, batchSize)
#Get the clean data
xTest, yTest = DMP.DataLoaderToTensor(valLoader)
cleanLoader = DMP.GetCorrectlyIdentifiedSamplesBalancedDefense(defense, numAttackSamples, valLoader, numClasses)
#Create the synthetic model
syntheticDir = "Models//imagenet21k_ViT-B_32.npz"
config = CONFIGS["ViT-B_32"]
syntheticModel = VisionTransformer(config, imgSize, zero_head=True, num_classes=numClasses)
syntheticModel.load_from(numpy.load(syntheticDir))
syntheticModel.to(device)
#Do the attack
oracle = defense
dataLoaderForTraining = trainLoader
optimizerName = "sgd"
#Last line does the attack
AttackWrappersAdaptiveBlackBox.AdaptiveAttack(saveTag, device, oracle, syntheticModel, numIterations, epochsPerIteration, epsForAug, learningRate, optimizerName, dataLoaderForTraining, cleanLoader, numClasses, epsForAttacks, clipMin, clipMax)
#Run the Self-Attention Gradient Attack (SAGA) on ViT-L and BiT-M-R101x3
def SelfAttentionGradientAttackCIFAR10():
print("Running Self-Attention Gradient Attack on ViT-L-16 and BiT-M-R101x3")
#Set up the parameters for the attack
attackSampleNum = 1000
numClasses = 10
coefficientArray = torch.zeros(2)
secondcoeff = 2.0000e-04
coefficientArray[0] = 1.0 - secondcoeff
coefficientArray[1] = secondcoeff
print("Coeff Array:")
print(coefficientArray)
device = torch.device("cuda")
epsMax = 0.031
clipMin = 0.0
clipMax = 1.0
numSteps = 10
#Load the models and the dataset
#Note it is important to set vis to true so the transformer's model output returns the attention weights
valLoader, defense = LoadShuffleDefenseAndCIFAR10(vis=True)
modelPlusList = defense.modelPlusList
#Note that the batch size will effect how the gradient is computed in PyTorch
#Here we use batch size 8 for ViT-L and batch size 2 for BiT-M. Other batch sizes are possible but they will not generate the same result
modelPlusList[0].batchSize = 8
modelPlusList[1].batchSize = 2
#Get the clean examples
cleanLoader =AttackWrappersSAGA.GetFirstCorrectlyOverlappingSamplesBalanced(device, attackSampleNum, numClasses, valLoader, modelPlusList)
#Do the attack
advLoader = AttackWrappersSAGA.SelfAttentionGradientAttack(device, epsMax, numSteps, modelPlusList, coefficientArray, cleanLoader, clipMin, clipMax)
#Go through and check the robust accuray of each model on the adversarial examples
for i in range(0, len(modelPlusList)):
acc = modelPlusList[i].validateD(advLoader)
print(modelPlusList[i].modelName+" Robust Acc:", acc) | [
"DataManagerPytorch.GetCIFAR10Training",
"TransformerModels.VisionTransformer",
"ModelPlus.ModelPlus",
"collections.OrderedDict",
"torch.load",
"DataManagerPytorch.DataLoaderToTensor",
"AttackWrappersRayS.RaySAttack",
"DataManagerPytorch.GetCIFAR10Validation",
"AttackWrappersSAGA.GetFirstCorrectlyOv... | [((532, 552), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (544, 552), False, 'import torch\n'), ((658, 702), 'DataManagerPytorch.GetCIFAR10Validation', 'DMP.GetCIFAR10Validation', (['imgSize', 'batchSize'], {}), '(imgSize, batchSize)\n', (682, 702), True, 'import DataManagerPytorch as DMP\n'), ((770, 844), 'TransformerModels.VisionTransformer', 'VisionTransformer', (['config', 'imgSize'], {'zero_head': '(True)', 'num_classes': 'numClasses'}), '(config, imgSize, zero_head=True, num_classes=numClasses)\n', (787, 844), False, 'from TransformerModels import VisionTransformer, CONFIGS\n'), ((918, 933), 'torch.load', 'torch.load', (['dir'], {}), '(dir)\n', (928, 933), False, 'import torch\n'), ((1046, 1143), 'ModelPlus.ModelPlus', 'ModelPlus', (['"""ViT-L_16"""', 'model', 'device'], {'imgSizeH': 'imgSize', 'imgSizeW': 'imgSize', 'batchSize': 'batchSize'}), "('ViT-L_16', model, device, imgSizeH=imgSize, imgSizeW=imgSize,\n batchSize=batchSize)\n", (1055, 1143), False, 'from ModelPlus import ModelPlus\n'), ((1477, 1497), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1489, 1497), False, 'import torch\n'), ((1603, 1647), 'DataManagerPytorch.GetCIFAR10Validation', 'DMP.GetCIFAR10Validation', (['imgSize', 'batchSize'], {}), '(imgSize, batchSize)\n', (1627, 1647), True, 'import DataManagerPytorch as DMP\n'), ((1715, 1802), 'TransformerModels.VisionTransformer', 'VisionTransformer', (['config', 'imgSize'], {'zero_head': '(True)', 'num_classes': 'numClasses', 'vis': 'vis'}), '(config, imgSize, zero_head=True, num_classes=numClasses,\n vis=vis)\n', (1732, 1802), False, 'from TransformerModels import VisionTransformer, CONFIGS\n'), ((1874, 1889), 'torch.load', 'torch.load', (['dir'], {}), '(dir)\n', (1884, 1889), False, 'import torch\n'), ((2003, 2100), 'ModelPlus.ModelPlus', 'ModelPlus', (['"""ViT-L_16"""', 'model', 'device'], {'imgSizeH': 'imgSize', 'imgSizeW': 'imgSize', 'batchSize': 'batchSize'}), "('ViT-L_16', model, device, imgSizeH=imgSize, imgSizeW=imgSize,\n batchSize=batchSize)\n", (2012, 2100), False, 'from ModelPlus import ModelPlus\n'), ((2350, 2386), 'torch.load', 'torch.load', (['dirB'], {'map_location': '"""cpu"""'}), "(dirB, map_location='cpu')\n", (2360, 2386), False, 'import torch\n'), ((2459, 2472), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2470, 2472), False, 'from collections import OrderedDict\n'), ((2855, 2949), 'ModelPlus.ModelPlus', 'ModelPlus', (['"""BiT-M-R101x3"""', 'modelB', 'device'], {'imgSizeH': '(160)', 'imgSizeW': '(128)', 'batchSize': 'batchSize'}), "('BiT-M-R101x3', modelB, device, imgSizeH=160, imgSizeW=128,\n batchSize=batchSize)\n", (2864, 2949), False, 'from ModelPlus import ModelPlus\n'), ((3041, 3097), 'ShuffleDefense.ShuffleDefense', 'ShuffleDefense.ShuffleDefense', (['modelPlusList', 'numClasses'], {}), '(modelPlusList, numClasses)\n', (3070, 3097), False, 'import ShuffleDefense\n'), ((3406, 3507), 'DataManagerPytorch.GetCorrectlyIdentifiedSamplesBalancedDefense', 'DMP.GetCorrectlyIdentifiedSamplesBalancedDefense', (['defense', 'attackSampleNum', 'valLoader', 'numClasses'], {}), '(defense, attackSampleNum,\n valLoader, numClasses)\n', (3454, 3507), True, 'import DataManagerPytorch as DMP\n'), ((3657, 3728), 'AttackWrappersRayS.RaySAttack', 'AttackWrappersRayS.RaySAttack', (['defense', 'epsMax', 'queryLimit', 'cleanLoader'], {}), '(defense, epsMax, queryLimit, cleanLoader)\n', (3686, 3728), False, 'import AttackWrappersRayS\n'), ((4304, 4405), 'DataManagerPytorch.GetCorrectlyIdentifiedSamplesBalancedDefense', 'DMP.GetCorrectlyIdentifiedSamplesBalancedDefense', (['defense', 'attackSampleNum', 'valLoader', 'numClasses'], {}), '(defense, attackSampleNum,\n valLoader, numClasses)\n', (4352, 4405), True, 'import DataManagerPytorch as DMP\n'), ((4555, 4626), 'AttackWrappersRayS.RaySAttack', 'AttackWrappersRayS.RaySAttack', (['defense', 'epsMax', 'queryLimit', 'cleanLoader'], {}), '(defense, epsMax, queryLimit, cleanLoader)\n', (4584, 4626), False, 'import AttackWrappersRayS\n'), ((5232, 5252), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5244, 5252), False, 'import torch\n'), ((5857, 5899), 'DataManagerPytorch.GetCIFAR10Training', 'DMP.GetCIFAR10Training', (['imgSize', 'batchSize'], {}), '(imgSize, batchSize)\n', (5879, 5899), True, 'import DataManagerPytorch as DMP\n'), ((5946, 5979), 'DataManagerPytorch.DataLoaderToTensor', 'DMP.DataLoaderToTensor', (['valLoader'], {}), '(valLoader)\n', (5968, 5979), True, 'import DataManagerPytorch as DMP\n'), ((5999, 6101), 'DataManagerPytorch.GetCorrectlyIdentifiedSamplesBalancedDefense', 'DMP.GetCorrectlyIdentifiedSamplesBalancedDefense', (['defense', 'numAttackSamples', 'valLoader', 'numClasses'], {}), '(defense, numAttackSamples,\n valLoader, numClasses)\n', (6047, 6101), True, 'import DataManagerPytorch as DMP\n'), ((6243, 6317), 'TransformerModels.VisionTransformer', 'VisionTransformer', (['config', 'imgSize'], {'zero_head': '(True)', 'num_classes': 'numClasses'}), '(config, imgSize, zero_head=True, num_classes=numClasses)\n', (6260, 6317), False, 'from TransformerModels import VisionTransformer, CONFIGS\n'), ((6556, 6810), 'AttackWrappersAdaptiveBlackBox.AdaptiveAttack', 'AttackWrappersAdaptiveBlackBox.AdaptiveAttack', (['saveTag', 'device', 'oracle', 'syntheticModel', 'numIterations', 'epochsPerIteration', 'epsForAug', 'learningRate', 'optimizerName', 'dataLoaderForTraining', 'cleanLoader', 'numClasses', 'epsForAttacks', 'clipMin', 'clipMax'], {}), '(saveTag, device, oracle,\n syntheticModel, numIterations, epochsPerIteration, epsForAug,\n learningRate, optimizerName, dataLoaderForTraining, cleanLoader,\n numClasses, epsForAttacks, clipMin, clipMax)\n', (6601, 6810), False, 'import AttackWrappersAdaptiveBlackBox\n'), ((7155, 7175), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (7167, 7175), False, 'import torch\n'), ((7790, 7832), 'DataManagerPytorch.GetCIFAR10Training', 'DMP.GetCIFAR10Training', (['imgSize', 'batchSize'], {}), '(imgSize, batchSize)\n', (7812, 7832), True, 'import DataManagerPytorch as DMP\n'), ((7879, 7912), 'DataManagerPytorch.DataLoaderToTensor', 'DMP.DataLoaderToTensor', (['valLoader'], {}), '(valLoader)\n', (7901, 7912), True, 'import DataManagerPytorch as DMP\n'), ((7932, 8034), 'DataManagerPytorch.GetCorrectlyIdentifiedSamplesBalancedDefense', 'DMP.GetCorrectlyIdentifiedSamplesBalancedDefense', (['defense', 'numAttackSamples', 'valLoader', 'numClasses'], {}), '(defense, numAttackSamples,\n valLoader, numClasses)\n', (7980, 8034), True, 'import DataManagerPytorch as DMP\n'), ((8176, 8250), 'TransformerModels.VisionTransformer', 'VisionTransformer', (['config', 'imgSize'], {'zero_head': '(True)', 'num_classes': 'numClasses'}), '(config, imgSize, zero_head=True, num_classes=numClasses)\n', (8193, 8250), False, 'from TransformerModels import VisionTransformer, CONFIGS\n'), ((8489, 8743), 'AttackWrappersAdaptiveBlackBox.AdaptiveAttack', 'AttackWrappersAdaptiveBlackBox.AdaptiveAttack', (['saveTag', 'device', 'oracle', 'syntheticModel', 'numIterations', 'epochsPerIteration', 'epsForAug', 'learningRate', 'optimizerName', 'dataLoaderForTraining', 'cleanLoader', 'numClasses', 'epsForAttacks', 'clipMin', 'clipMax'], {}), '(saveTag, device, oracle,\n syntheticModel, numIterations, epochsPerIteration, epsForAug,\n learningRate, optimizerName, dataLoaderForTraining, cleanLoader,\n numClasses, epsForAttacks, clipMin, clipMax)\n', (8534, 8743), False, 'import AttackWrappersAdaptiveBlackBox\n'), ((9050, 9064), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (9061, 9064), False, 'import torch\n'), ((9249, 9269), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (9261, 9269), False, 'import torch\n'), ((9950, 10079), 'AttackWrappersSAGA.GetFirstCorrectlyOverlappingSamplesBalanced', 'AttackWrappersSAGA.GetFirstCorrectlyOverlappingSamplesBalanced', (['device', 'attackSampleNum', 'numClasses', 'valLoader', 'modelPlusList'], {}), '(device,\n attackSampleNum, numClasses, valLoader, modelPlusList)\n', (10012, 10079), False, 'import AttackWrappersSAGA\n'), ((10113, 10253), 'AttackWrappersSAGA.SelfAttentionGradientAttack', 'AttackWrappersSAGA.SelfAttentionGradientAttack', (['device', 'epsMax', 'numSteps', 'modelPlusList', 'coefficientArray', 'cleanLoader', 'clipMin', 'clipMax'], {}), '(device, epsMax, numSteps,\n modelPlusList, coefficientArray, cleanLoader, clipMin, clipMax)\n', (10159, 10253), False, 'import AttackWrappersSAGA\n'), ((6348, 6372), 'numpy.load', 'numpy.load', (['syntheticDir'], {}), '(syntheticDir)\n', (6358, 6372), False, 'import numpy\n'), ((8281, 8305), 'numpy.load', 'numpy.load', (['syntheticDir'], {}), '(syntheticDir)\n', (8291, 8305), False, 'import numpy\n')] |
import numpy as np
import matplotlib.pyplot as plt
from numpy import atleast_2d as twod
################################################################################
## PLOTTING FUNCTIONS #########################################################
################################################################################
def plotClassify2D(learner, X, Y, pre=lambda x: x, axis=None, nGrid=128, **kwargs):
"""
Plot data and classifier outputs on two-dimensional data.
This function plot data (X,Y) and learner.predict(X, Y)
together. The learner is is predicted on a dense grid
covering data X, to show its decision boundary.
Parameters
----------
learner : learner object
A trained learner object that inherits from one of
the 'Classify' or 'Regressor' base classes.
X : numpy array
N x M array of data; N = number of data, M = dimension
(number of features) of data.
Y : numpy array
1 x N arra containing labels corresponding to data points
in X.
pre : function object (optional)
Function that is applied to X before prediction.
axis : a matplotlib axis / plottable object (optional)
nGrid : density of 2D grid points (default 128)
"""
if twod(X).shape[1] != 2:
raise ValueError('plotClassify2D: function can only be called using two-dimensional data (features)')
# TODO: Clean up code
if axis == None: axis = plt
axis.plot( X[:,0],X[:,1], 'k.', visible=False )
# TODO: can probably replace with final dot plot and use transparency for image (?)
ax = axis.axis()
xticks = np.linspace(ax[0],ax[1],nGrid)
yticks = np.linspace(ax[2],ax[3],nGrid)
grid = np.meshgrid( xticks, yticks )
XGrid = np.column_stack( (grid[0].flatten(), grid[1].flatten()) )
if learner is not None:
YGrid = learner.predict( pre(XGrid) )
#axis.contourf( xticks,yticks,YGrid.reshape( (len(xticks),len(yticks)) ), nClasses )
axis.imshow( YGrid.reshape( (len(xticks),len(yticks)) ), extent=ax, interpolation='nearest',origin='lower',alpha=0.5, aspect='auto' )
cmap = plt.cm.get_cmap()
# TODO: if Soft: predictSoft; get colors for each class from cmap; blend pred with colors & show
#
try: classes = np.array(learner.classes);
except Exception: classes = np.unique(Y)
cvals = (classes - min(classes))/(max(classes)-min(classes)+1e-100)
for i,c in enumerate(classes):
axis.plot( X[Y==c,0],X[Y==c,1], 'ko', color=cmap(cvals[i]), **kwargs )
axis.axis(ax);
def histy(X,Y,axis=None,**kwargs):
"""
Plot a histogram (using matplotlib.hist) with multiple classes of data
Any additional arguments are passed directly into hist()
Each class of data are plotted as a different color
To specify specific histogram colors, use e.g. facecolor={0:'blue',1:'green',...}
so that facecolor[c] is the color for class c
Related but slightly different appearance to e.g.
matplotlib.hist( [X[Y==c] for c in np.unique(Y)] , histtype='barstacked' )
"""
if axis == None: axis = plt
yvals = np.unique(Y)
nil, bin_edges = np.histogram(X, **kwargs)
C,H = len(yvals),len(nil)
hist = np.zeros( shape=(C,H) )
cmap = plt.cm.get_cmap()
cvals = (yvals - min(yvals))/(max(yvals)-min(yvals)+1e-100)
widthFrac = .25+.75/(1.2+2*np.log10(len(yvals)))
for i,c in enumerate(yvals):
histc,nil = np.histogram(X[Y==c],bins=bin_edges)
hist[i,:] = histc
for j in range(H):
for i in np.argsort(hist[:,j])[::-1]:
delta = bin_edges[j+1]-bin_edges[j]
axis.bar(bin_edges[j]+delta/2*i/C*widthFrac,hist[i,j],width=delta*widthFrac,color=cmap(cvals[i]))
def plotPairs(X,Y=None,**kwargs):
"""
Plot all pairs of features in a grid
Diagonal entries are histograms of each feature
Off-diagonal are 2D scatterplots of pairs of features
"""
m,n = X.shape
if Y is None: Y = np.ones( (m,) )
fig,ax = plt.subplots(n,n)
for i in range(n):
for j in range(n):
if i == j:
histy(X[:,i],Y,axis=ax[j,i])
else:
plotClassify2D(None,X[:,[i,j]],Y,axis=ax[j,i])
def plotGauss2D(mu,cov,*args,**kwargs):
"""
Plot an ellipsoid indicating (one std deviation of) a 2D Gaussian distribution
All additional arguments are passed into plot(.)
"""
from scipy.linalg import sqrtm
theta = np.linspace(0,2*np.pi,50)
circle = np.array([np.sin(theta),np.cos(theta)])
ell = sqrtm(cov).dot(circle)
ell += twod(mu).T
plt.plot( mu[0],mu[1], 'x', ell[0,:],ell[1,:], **kwargs)
# TODO: plotSoftClassify2D
# TODO: plotRegress1D
################################################################################
################################################################################
################################################################################
| [
"numpy.atleast_2d",
"numpy.histogram",
"scipy.linalg.sqrtm",
"numpy.unique",
"numpy.ones",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.argsort",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.cos",
"matplotlib.pyplot.cm.get_cmap",
"numpy.meshgrid",
"matplotlib.pyplot.subplots"
... | [((1641, 1673), 'numpy.linspace', 'np.linspace', (['ax[0]', 'ax[1]', 'nGrid'], {}), '(ax[0], ax[1], nGrid)\n', (1652, 1673), True, 'import numpy as np\n'), ((1685, 1717), 'numpy.linspace', 'np.linspace', (['ax[2]', 'ax[3]', 'nGrid'], {}), '(ax[2], ax[3], nGrid)\n', (1696, 1717), True, 'import numpy as np\n'), ((1727, 1754), 'numpy.meshgrid', 'np.meshgrid', (['xticks', 'yticks'], {}), '(xticks, yticks)\n', (1738, 1754), True, 'import numpy as np\n'), ((2148, 2165), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', ([], {}), '()\n', (2163, 2165), True, 'import matplotlib.pyplot as plt\n'), ((3131, 3143), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (3140, 3143), True, 'import numpy as np\n'), ((3165, 3190), 'numpy.histogram', 'np.histogram', (['X'], {}), '(X, **kwargs)\n', (3177, 3190), True, 'import numpy as np\n'), ((3232, 3254), 'numpy.zeros', 'np.zeros', ([], {'shape': '(C, H)'}), '(shape=(C, H))\n', (3240, 3254), True, 'import numpy as np\n'), ((3267, 3284), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', ([], {}), '()\n', (3282, 3284), True, 'import matplotlib.pyplot as plt\n'), ((4018, 4036), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n', 'n'], {}), '(n, n)\n', (4030, 4036), True, 'import matplotlib.pyplot as plt\n'), ((4476, 4505), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)'], {}), '(0, 2 * np.pi, 50)\n', (4487, 4505), True, 'import numpy as np\n'), ((4615, 4674), 'matplotlib.pyplot.plot', 'plt.plot', (['mu[0]', 'mu[1]', '"""x"""', 'ell[0, :]', 'ell[1, :]'], {}), "(mu[0], mu[1], 'x', ell[0, :], ell[1, :], **kwargs)\n", (4623, 4674), True, 'import matplotlib.pyplot as plt\n'), ((2292, 2317), 'numpy.array', 'np.array', (['learner.classes'], {}), '(learner.classes)\n', (2300, 2317), True, 'import numpy as np\n'), ((3455, 3494), 'numpy.histogram', 'np.histogram', (['X[Y == c]'], {'bins': 'bin_edges'}), '(X[Y == c], bins=bin_edges)\n', (3467, 3494), True, 'import numpy as np\n'), ((3989, 4002), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (3996, 4002), True, 'import numpy as np\n'), ((4599, 4607), 'numpy.atleast_2d', 'twod', (['mu'], {}), '(mu)\n', (4603, 4607), True, 'from numpy import atleast_2d as twod\n'), ((2351, 2363), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (2360, 2363), True, 'import numpy as np\n'), ((3558, 3580), 'numpy.argsort', 'np.argsort', (['hist[:, j]'], {}), '(hist[:, j])\n', (3568, 3580), True, 'import numpy as np\n'), ((4525, 4538), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4531, 4538), True, 'import numpy as np\n'), ((4539, 4552), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4545, 4552), True, 'import numpy as np\n'), ((4565, 4575), 'scipy.linalg.sqrtm', 'sqrtm', (['cov'], {}), '(cov)\n', (4570, 4575), False, 'from scipy.linalg import sqrtm\n'), ((1274, 1281), 'numpy.atleast_2d', 'twod', (['X'], {}), '(X)\n', (1278, 1281), True, 'from numpy import atleast_2d as twod\n')] |
import os
import sys
from sklearn.svm import LinearSVC, SVC
from sklearn.preprocessing import StandardScaler
from FeatureExtractor import FeatureExtractor
import matplotlib.image as mpimg
import numpy as np
import cv2
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
import matplotlib.pyplot as plt
from collections import deque
from scipy.ndimage.measurements import label
from moviepy.editor import VideoFileClip
from matplotlib import cm
def get_img_file_paths(root):
files_paths = []
for root, subdirs, files in os.walk(root):
for filename in files:
file_path = os.path.join(root, filename)
if file_path.lower().endswith(('.png', '.jpg', '.jpeg')):
files_paths.append(file_path)
return files_paths
def get_data_info(vehicles_path, non_vehicles_path):
print('===Dataset statistics===')
print('Vehicles path: ' + vehicles_path)
print('Non vehicles path: ' + non_vehicles_path)
if not os.path.exists(vehicles_path):
print('ERROR: Vehicles dir not exists!')
return
if not os.path.exists(non_vehicles_path):
print('ERROR: Non-vehicles dir not exists!')
return
vehicles_file_paths = get_img_file_paths(vehicles_path)
non_vehicles_file_paths = get_img_file_paths(non_vehicles_path)
print('Vehicles set size: {}'.format(len(vehicles_file_paths)))
print('Non-vehicles set size: {}'.format(len(non_vehicles_file_paths)))
print('========================')
data_info = {'vehicles': vehicles_file_paths,
'non-vehicles': non_vehicles_file_paths}
return data_info
def load_and_extract_features(img_paths, feature_extractor):
features = []
for path in img_paths:
image = mpimg.imread(path)
#image = image.astype(np.float32)/255
image = (image*255).astype(np.uint8)
features.append(feature_extractor.process(image))
return features
def get_sliding_windows(frame_shape, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = frame_shape[1]
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = frame_shape[0]
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))
ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))
nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step)
ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step)
window_list = []
for ys in range(ny_windows):
for xs in range(nx_windows):
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
window_list.append(((startx, starty), (endx, endy)))
return window_list#np.array(window_list).astype(np.int64)
def draw_windows(img, bboxes, color=(0, 0, 255), thick=2):
imcopy = np.copy(img)
for bbox in bboxes:
cv2.rectangle(imcopy, tuple(bbox[0]), tuple(bbox[1]), color, thick)
return imcopy
def add_heat(heatmap, bbox_list):
for box in bbox_list:
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
return heatmap
def apply_threshold(heatmap, threshold):
heatmap[heatmap <= threshold] = 0
return heatmap
def draw_labeled_bboxes(img, labels):
for car_number in range(1, labels[1]+1):
nonzero = (labels[0] == car_number).nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
return img
def get_colors(inp, colormap, vmin=None, vmax=None):
norm = plt.Normalize(vmin, vmax)
m = cm.ScalarMappable(norm=norm, cmap=colormap)
return m.to_rgba(inp)#colormap(norm(inp))
class VehicleDetector:
def __init__(self):
print('Initializing detector...')
self.classifier = LinearSVC(verbose=True)
self.X_scaler = StandardScaler()
self.feature_extractor = FeatureExtractor(color_space='YCrCb',
orient=9, hog_channel='ALL')
self.last_detections = deque(maxlen=20)
def detect_vehicles(self, img, windows):
on_windows = []
for window in windows:
test_img = cv2.resize(img[window[0][1]:window[1][1],
window[0][0]:window[1][0]], (64, 64))
#plt.imshow(test_img)
#plt.show()
features = self.feature_extractor.process(test_img)
test_features = self.X_scaler.transform(np.array(features).reshape(1, -1))
prediction = self.classifier.predict(test_features)[0].astype(np.int64)
#print(prediction)
if prediction == 1:
on_windows.append(window)
return on_windows#np.array(on_windows).astype(np.int64)
def train(self, vehicles_path, non_vehicles_path):
print('Training...')
dataset_info = get_data_info(vehicles_path, non_vehicles_path)
car_features = load_and_extract_features(dataset_info['vehicles'],
self.feature_extractor)
non_car_features = load_and_extract_features(dataset_info['non-vehicles'],
self.feature_extractor)
X = np.vstack((car_features, non_car_features)).astype(np.float64)
y = np.hstack((np.ones(len(car_features)), np.zeros(len(non_car_features))))
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=rand_state)
self.X_scaler.fit(X_train)
X_train = self.X_scaler.transform(X_train)
X_test = self.X_scaler.transform(X_test)
print('Feature vector length:', len(X_train[0]))
self.classifier.fit(X_train, y_train)
print('Test Accuracy = ', round(self.classifier.score(X_test, y_test), 4))
def save(self, clf_path, sclr_path):
print('Saving classifier...')
joblib.dump(self.classifier, clf_path)
joblib.dump(self.X_scaler, sclr_path)
def load(self, clf_path, sclr_path):
print('Loading classifier...')
self.classifier = joblib.load(clf_path)
self.X_scaler = joblib.load(sclr_path)
def get_merged_detections(self, frame_detections, img, threshold=1):
heat = np.zeros_like(img[:,:,0]).astype(np.float)
heat = add_heat(heat, frame_detections)
heat = apply_threshold(heat,threshold)
heatmap = np.clip(heat, 0, 255)
labels = label(heatmap)
cars = []
for car_number in range(1, labels[1]+1):
nonzero = (labels[0] == car_number).nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
bbox = ((np.min(nonzerox), np.min(nonzeroy)),
(np.max(nonzerox), np.max(nonzeroy)))
cars.append(bbox)
return cars, heatmap
def get_avg_detections(self, img):
last_detections_conc = np.concatenate(np.array(self.last_detections))
detections, _ = self.get_merged_detections(last_detections_conc, img,
threshold=min(len(self.last_detections)-1, 15))
return detections
def process(self, img):
window_size = [220, 146, 117, 100]
y_start_stop = [[440, 660], [414, 560], [400, 517], [390, 490]]
window_overlap = [0.8, 0.8, 0.8, 0.8]
wnd_color = [(255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 255, 0)]
bboxes_overlay = np.zeros_like(img)
heatmap_overlay = np.zeros_like(img)
frame_detections = []
for y_ss, wnd_sz, wnd_olp, color in zip(y_start_stop, window_size,
window_overlap, wnd_color):
windows = get_sliding_windows(img.shape, x_start_stop=[None, None], y_start_stop=y_ss,
xy_window=(wnd_sz, wnd_sz), xy_overlap=(wnd_olp, wnd_olp))
detections = self.detect_vehicles(img, windows)
if detections:
frame_detections.append(detections)
if frame_detections:
frame_detections = np.concatenate(frame_detections)
merged, heatmap = self.get_merged_detections(frame_detections, img, 1)
if merged:
self.last_detections.append(merged)
if self.last_detections:
detections = self.get_avg_detections(img)
bboxes_overlay = draw_windows(bboxes_overlay,
detections, color=(255,255,0))
heatmap_overlay = get_colors(heatmap, cm.hot)
heatmap_overlay = heatmap_overlay[:,:,:3]*255
else:
print('No detections in frame!')
return bboxes_overlay, heatmap_overlay
def video():
detector = VehicleDetector()
detector.load('./classifier_YCrCb_lin.pkl', './scaler_YCrCb_lin.pkl')
def process_image(image):
res = np.copy(image)
bboxes_overlay, heatmap = detector.process(image)
small_heatmap = cv2.resize(heatmap, (0,0), fx=0.25, fy=0.25)
res = cv2.addWeighted(res, 1., bboxes_overlay, 1., 0.)
x_offset = image.shape[1] - small_heatmap.shape[1] - 10
y_offset = 10
res[y_offset:y_offset + small_heatmap.shape[0],
x_offset:x_offset + small_heatmap.shape[1]] = small_heatmap
return res
output = './project_video_annotated.mp4'
clip1 = VideoFileClip('./project_video.mp4')#.subclip(10,11)
#output = './test_video_annotated.mp4'
#clip1 = VideoFileClip('./test_video.mp4')#.subclip(45,46)
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
white_clip.write_videofile(output, audio=False)
def train():
detector = VehicleDetector()
#detector.train('./training_data/subset/vehicles_smallset/',
# './training_data/subset/non-vehicles_smallset/')
detector.train('./training_data/full/vehicles/',
'./training_data/full/non-vehicles/')
image = mpimg.imread('./test_images/test6.jpg')
detector.process(image)
detector.save('./classifier_YCrCb_lin.pkl', './scaler_YCrCb_lin.pkl')
def test():
for i in range(1,2):
img_name = 'test' + str(i) + '.jpg'
image = mpimg.imread('./test_images/' + img_name)
detector = VehicleDetector()
detector.load('./classifier_YCrCb_lin.pkl', './scaler_YCrCb_lin.pkl')
detector.process(image)
video()
| [
"cv2.rectangle",
"numpy.clip",
"scipy.ndimage.measurements.label",
"matplotlib.image.imread",
"sklearn.externals.joblib.load",
"numpy.array",
"os.walk",
"os.path.exists",
"collections.deque",
"matplotlib.pyplot.Normalize",
"numpy.zeros_like",
"numpy.max",
"cv2.addWeighted",
"matplotlib.cm.... | [((561, 574), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (568, 574), False, 'import os\n'), ((2312, 2354), 'numpy.int', 'np.int', (['(xy_window[0] * (1 - xy_overlap[0]))'], {}), '(xy_window[0] * (1 - xy_overlap[0]))\n', (2318, 2354), True, 'import numpy as np\n'), ((2372, 2414), 'numpy.int', 'np.int', (['(xy_window[1] * (1 - xy_overlap[1]))'], {}), '(xy_window[1] * (1 - xy_overlap[1]))\n', (2378, 2414), True, 'import numpy as np\n'), ((2426, 2462), 'numpy.int', 'np.int', (['(xy_window[0] * xy_overlap[0])'], {}), '(xy_window[0] * xy_overlap[0])\n', (2432, 2462), True, 'import numpy as np\n'), ((2476, 2512), 'numpy.int', 'np.int', (['(xy_window[1] * xy_overlap[1])'], {}), '(xy_window[1] * xy_overlap[1])\n', (2482, 2512), True, 'import numpy as np\n'), ((2527, 2572), 'numpy.int', 'np.int', (['((xspan - nx_buffer) / nx_pix_per_step)'], {}), '((xspan - nx_buffer) / nx_pix_per_step)\n', (2533, 2572), True, 'import numpy as np\n'), ((2584, 2629), 'numpy.int', 'np.int', (['((yspan - ny_buffer) / ny_pix_per_step)'], {}), '((yspan - ny_buffer) / ny_pix_per_step)\n', (2590, 2629), True, 'import numpy as np\n'), ((3053, 3065), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (3060, 3065), True, 'import numpy as np\n'), ((3810, 3835), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (3823, 3835), True, 'import matplotlib.pyplot as plt\n'), ((3841, 3884), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'colormap'}), '(norm=norm, cmap=colormap)\n', (3858, 3884), False, 'from matplotlib import cm\n'), ((8745, 8781), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['"""./project_video.mp4"""'], {}), "('./project_video.mp4')\n", (8758, 8781), False, 'from moviepy.editor import VideoFileClip\n'), ((9301, 9340), 'matplotlib.image.imread', 'mpimg.imread', (['"""./test_images/test6.jpg"""'], {}), "('./test_images/test6.jpg')\n", (9313, 9340), True, 'import matplotlib.image as mpimg\n'), ((949, 978), 'os.path.exists', 'os.path.exists', (['vehicles_path'], {}), '(vehicles_path)\n', (963, 978), False, 'import os\n'), ((1040, 1073), 'os.path.exists', 'os.path.exists', (['non_vehicles_path'], {}), '(non_vehicles_path)\n', (1054, 1073), False, 'import os\n'), ((1652, 1670), 'matplotlib.image.imread', 'mpimg.imread', (['path'], {}), '(path)\n', (1664, 1670), True, 'import matplotlib.image as mpimg\n'), ((3542, 3562), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (3550, 3562), True, 'import numpy as np\n'), ((3576, 3596), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (3584, 3596), True, 'import numpy as np\n'), ((3685, 3737), 'cv2.rectangle', 'cv2.rectangle', (['img', 'bbox[0]', 'bbox[1]', '(0, 0, 255)', '(6)'], {}), '(img, bbox[0], bbox[1], (0, 0, 255), 6)\n', (3698, 3737), False, 'import cv2\n'), ((4030, 4053), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4039, 4053), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((4072, 4088), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4086, 4088), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4116, 4182), 'FeatureExtractor.FeatureExtractor', 'FeatureExtractor', ([], {'color_space': '"""YCrCb"""', 'orient': '(9)', 'hog_channel': '"""ALL"""'}), "(color_space='YCrCb', orient=9, hog_channel='ALL')\n", (4132, 4182), False, 'from FeatureExtractor import FeatureExtractor\n'), ((4212, 4228), 'collections.deque', 'deque', ([], {'maxlen': '(20)'}), '(maxlen=20)\n', (4217, 4228), False, 'from collections import deque\n'), ((5307, 5332), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (5324, 5332), True, 'import numpy as np\n'), ((5370, 5432), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': 'rand_state'}), '(X, y, test_size=0.2, random_state=rand_state)\n', (5386, 5432), False, 'from sklearn.cross_validation import train_test_split\n'), ((5798, 5836), 'sklearn.externals.joblib.dump', 'joblib.dump', (['self.classifier', 'clf_path'], {}), '(self.classifier, clf_path)\n', (5809, 5836), False, 'from sklearn.externals import joblib\n'), ((5840, 5877), 'sklearn.externals.joblib.dump', 'joblib.dump', (['self.X_scaler', 'sclr_path'], {}), '(self.X_scaler, sclr_path)\n', (5851, 5877), False, 'from sklearn.externals import joblib\n'), ((5971, 5992), 'sklearn.externals.joblib.load', 'joblib.load', (['clf_path'], {}), '(clf_path)\n', (5982, 5992), False, 'from sklearn.externals import joblib\n'), ((6012, 6034), 'sklearn.externals.joblib.load', 'joblib.load', (['sclr_path'], {}), '(sclr_path)\n', (6023, 6034), False, 'from sklearn.externals import joblib\n'), ((6253, 6274), 'numpy.clip', 'np.clip', (['heat', '(0)', '(255)'], {}), '(heat, 0, 255)\n', (6260, 6274), True, 'import numpy as np\n'), ((6286, 6300), 'scipy.ndimage.measurements.label', 'label', (['heatmap'], {}), '(heatmap)\n', (6291, 6300), False, 'from scipy.ndimage.measurements import label\n'), ((7126, 7144), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (7139, 7144), True, 'import numpy as np\n'), ((7165, 7183), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (7178, 7183), True, 'import numpy as np\n'), ((8303, 8317), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (8310, 8317), True, 'import numpy as np\n'), ((8388, 8433), 'cv2.resize', 'cv2.resize', (['heatmap', '(0, 0)'], {'fx': '(0.25)', 'fy': '(0.25)'}), '(heatmap, (0, 0), fx=0.25, fy=0.25)\n', (8398, 8433), False, 'import cv2\n'), ((8442, 8493), 'cv2.addWeighted', 'cv2.addWeighted', (['res', '(1.0)', 'bboxes_overlay', '(1.0)', '(0.0)'], {}), '(res, 1.0, bboxes_overlay, 1.0, 0.0)\n', (8457, 8493), False, 'import cv2\n'), ((9525, 9566), 'matplotlib.image.imread', 'mpimg.imread', (["('./test_images/' + img_name)"], {}), "('./test_images/' + img_name)\n", (9537, 9566), True, 'import matplotlib.image as mpimg\n'), ((616, 644), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (628, 644), False, 'import os\n'), ((4329, 4408), 'cv2.resize', 'cv2.resize', (['img[window[0][1]:window[1][1], window[0][0]:window[1][0]]', '(64, 64)'], {}), '(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))\n', (4339, 4408), False, 'import cv2\n'), ((6420, 6440), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (6428, 6440), True, 'import numpy as np\n'), ((6455, 6475), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (6463, 6475), True, 'import numpy as np\n'), ((6689, 6719), 'numpy.array', 'np.array', (['self.last_detections'], {}), '(self.last_detections)\n', (6697, 6719), True, 'import numpy as np\n'), ((7640, 7672), 'numpy.concatenate', 'np.concatenate', (['frame_detections'], {}), '(frame_detections)\n', (7654, 7672), True, 'import numpy as np\n'), ((3608, 3624), 'numpy.min', 'np.min', (['nonzerox'], {}), '(nonzerox)\n', (3614, 3624), True, 'import numpy as np\n'), ((3626, 3642), 'numpy.min', 'np.min', (['nonzeroy'], {}), '(nonzeroy)\n', (3632, 3642), True, 'import numpy as np\n'), ((3646, 3662), 'numpy.max', 'np.max', (['nonzerox'], {}), '(nonzerox)\n', (3652, 3662), True, 'import numpy as np\n'), ((3664, 3680), 'numpy.max', 'np.max', (['nonzeroy'], {}), '(nonzeroy)\n', (3670, 3680), True, 'import numpy as np\n'), ((5149, 5192), 'numpy.vstack', 'np.vstack', (['(car_features, non_car_features)'], {}), '((car_features, non_car_features))\n', (5158, 5192), True, 'import numpy as np\n'), ((6115, 6142), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (6128, 6142), True, 'import numpy as np\n'), ((6488, 6504), 'numpy.min', 'np.min', (['nonzerox'], {}), '(nonzerox)\n', (6494, 6504), True, 'import numpy as np\n'), ((6506, 6522), 'numpy.min', 'np.min', (['nonzeroy'], {}), '(nonzeroy)\n', (6512, 6522), True, 'import numpy as np\n'), ((6530, 6546), 'numpy.max', 'np.max', (['nonzerox'], {}), '(nonzerox)\n', (6536, 6546), True, 'import numpy as np\n'), ((6548, 6564), 'numpy.max', 'np.max', (['nonzeroy'], {}), '(nonzeroy)\n', (6554, 6564), True, 'import numpy as np\n'), ((4552, 4570), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (4560, 4570), True, 'import numpy as np\n')] |
"""
A model for the disasters data with a changepoint
changepoint ~ U(1851, 1962)
early_mean ~ Exp(1.)
late_mean ~ Exp(1.)
disasters[t] ~ Poi(early_mean if t <= switchpoint, late_mean otherwise)
"""
import pymc3_ext as pm
import theano.tensor as tt
from numpy import arange, array
__all__ = ['disasters_data', 'switchpoint', 'early_mean', 'late_mean', 'rate',
'disasters']
# Time series of recorded coal mining disasters in the UK from 1851 to 1962
disasters_data = array([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
year = arange(1851, 1962)
with pm.Model() as model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=year.min(), upper=year.max())
early_mean = pm.Exponential('early_mean', lam=1.)
late_mean = pm.Exponential('late_mean', lam=1.)
# Allocate appropriate Poisson rates to years before and after current
# switchpoint location
rate = tt.switch(switchpoint >= year, early_mean, late_mean)
disasters = pm.Poisson('disasters', rate, observed=disasters_data)
# Initial values for stochastic nodes
start = {'early_mean': 2., 'late_mean': 3.}
tr = pm.sample(1000, tune=500, start=start)
pm.traceplot(tr)
| [
"pymc3_ext.Poisson",
"pymc3_ext.Exponential",
"pymc3_ext.Model",
"numpy.array",
"pymc3_ext.traceplot",
"theano.tensor.switch",
"pymc3_ext.sample",
"numpy.arange"
] | [((485, 841), 'numpy.array', 'array', (['[4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6, 3, 3, 5, 4, 5, 3, 1, 4, 4,\n 1, 5, 5, 3, 4, 2, 5, 2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0, 1,\n 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 2, 1,\n 0, 0, 0, 1, 1, 0, 2, 3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4, 0,\n 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]'], {}), '([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6, 3, 3, 5, 4, 5, 3, 1,\n 4, 4, 1, 5, 5, 3, 4, 2, 5, 2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0,\n 0, 1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0,\n 2, 1, 0, 0, 0, 1, 1, 0, 2, 3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1,\n 4, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])\n', (490, 841), False, 'from numpy import arange, array\n'), ((977, 995), 'numpy.arange', 'arange', (['(1851)', '(1962)'], {}), '(1851, 1962)\n', (983, 995), False, 'from numpy import arange, array\n'), ((1002, 1012), 'pymc3_ext.Model', 'pm.Model', ([], {}), '()\n', (1010, 1012), True, 'import pymc3_ext as pm\n'), ((1129, 1166), 'pymc3_ext.Exponential', 'pm.Exponential', (['"""early_mean"""'], {'lam': '(1.0)'}), "('early_mean', lam=1.0)\n", (1143, 1166), True, 'import pymc3_ext as pm\n'), ((1182, 1218), 'pymc3_ext.Exponential', 'pm.Exponential', (['"""late_mean"""'], {'lam': '(1.0)'}), "('late_mean', lam=1.0)\n", (1196, 1218), True, 'import pymc3_ext as pm\n'), ((1332, 1385), 'theano.tensor.switch', 'tt.switch', (['(switchpoint >= year)', 'early_mean', 'late_mean'], {}), '(switchpoint >= year, early_mean, late_mean)\n', (1341, 1385), True, 'import theano.tensor as tt\n'), ((1407, 1461), 'pymc3_ext.Poisson', 'pm.Poisson', (['"""disasters"""', 'rate'], {'observed': 'disasters_data'}), "('disasters', rate, observed=disasters_data)\n", (1417, 1461), True, 'import pymc3_ext as pm\n'), ((1567, 1605), 'pymc3_ext.sample', 'pm.sample', (['(1000)'], {'tune': '(500)', 'start': 'start'}), '(1000, tune=500, start=start)\n', (1576, 1605), True, 'import pymc3_ext as pm\n'), ((1610, 1626), 'pymc3_ext.traceplot', 'pm.traceplot', (['tr'], {}), '(tr)\n', (1622, 1626), True, 'import pymc3_ext as pm\n')] |
import os
import numpy as np
import random
import lmdb
import pickle
import platform
np.random.seed(np.random.randint(1 << 30))
num_frames = 20
seq_length = 20
image_size = 64
batch_size = 1
num_digits = 2
step_length = 0.05
digit_size = 28
frame_size = image_size ** 2
def create_reverse_dictionary(dictionary):
dictionary_reverse = {}
for word in dictionary:
index = dictionary[word]
dictionary_reverse[index] = word
return dictionary_reverse
dictionary = {'0':0, '1':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'the': 10, 'digit': 11, 'and': 12,
'is':13, 'are':14, 'bouncing': 15, 'moving':16, 'here':17, 'there':18, 'around':19, 'jumping':20, 'up':21,
'down':22, 'left':23, 'right':24, 'then':25, '.':26}
motion_strings = ['up', 'left', 'down', 'right', 'up then down', 'left then right', 'down then up', 'right then left']
motion_idxs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
def create_dataset():
numbers = [i for i in range(100) if i not in [0, 11, 22, 33, 44, 55, 66, 77, 88, 99]]
random.shuffle(numbers)
numbers = np.array(numbers)
dataset = np.zeros((4, 10 * 9), dtype=np.int)
dataset[0, :] = numbers
dataset[1, :] = 100 + numbers
dataset[2, :] = 200 + numbers
dataset[3, :] = 300 + numbers
train = []
val = []
count = 0
for i in range(90):
dummy = count % 2
val.append(dataset[dummy, i])
train.append(dataset[1 - dummy, i])
count = count + 1
for i in range(90):
dummy = count % 2
val.append(dataset[dummy + 2, i])
train.append(dataset[(1 - dummy) + 2, i])
count = count + 1
return np.array(train), np.array(val)
def sent2matrix(sentence, dictionary):
words = sentence.split()
m = np.int32(np.zeros((1, len(words))))
for i in range(len(words)):
m[0, i] = dictionary[words[i]]
return m
def matrix2sent(matrix, reverse_dictionary):
text = ""
for i in range(matrix.shape[0]):
text = text + " " + reverse_dictionary[matrix[i]]
return text
def GetRandomTrajectory(motion):
length = seq_length
canvas_size = image_size - digit_size
y = random.randint(15, 85) / 100 # the starting point of the two numbers
x = random.randint(15, 85) / 100
start_y = []
start_x = []
start_y.append(y)
start_x.append(x)
if motion == 0:
v_y, v_x = 2., 0
else:
v_y, v_x = 0, 2.
direction = random.choice([1, 0]) # 1 is moving right or down, 0 is moving left or top
bounce = random.choice([1, 0])
for i in range(length):
if direction == 1:
y += v_y * step_length
x += v_x * step_length
if bounce == 0:
if x >= 1.0:
x, v_x = 1.0, 0
if y >= 1.0:
y, v_y = 1.0, 0
else:
if x >= 1.0:
x, v_x = 1.0, -v_x
if y >= 1.0:
y, v_y = 1.0, -v_y
if x <= 0:
x, v_x = 0, 0
if y <= 0:
y, v_y = 0, 0
else:
y -= v_y * step_length
x -= v_x * step_length
if bounce == 0:
if x <= 0:
x, v_x = 0, 0
if y <= 0:
y, v_y = 0, 0
else:
if x <= 0:
x, v_x = 0, -v_x
if y <= 0:
y, v_y = 0, -v_y
if x >= 1.0:
x, v_x = 1.0, 0
if y >= 1.0:
y, v_y = 1.0, 0
# print x, y
start_y.append(y)
start_x.append(x)
if v_y == 0 and v_x == 0:
break
# scale to the size of the canvas.
start_y = (canvas_size * np.array(start_y)).astype(np.int32)
start_x = (canvas_size * np.array(start_x)).astype(np.int32)
# print(start_y.shape)
return start_y, start_x, direction, bounce
def Overlap(a, b):
return np.maximum(a, b)
def create_gif(digit_imgs, motion, background):
# get an array of random numbers for indices
direction = np.zeros(2)
bounce = np.zeros(2)
start_y1, start_x1, direction[0], bounce[0] = GetRandomTrajectory(motion[0])
start_y2, start_x2, direction[1], bounce[1] = GetRandomTrajectory(motion[1])
if start_y1.shape[0] < start_y2.shape[0]:
start_y1 = np.concatenate([start_y1, np.repeat(start_y1[-1], start_y2.shape[0] - start_y1.shape[0])], axis=0)
start_x1 = np.concatenate([start_x1, np.repeat(start_x1[-1], start_x2.shape[0] - start_x1.shape[0])], axis=0)
elif start_y1.shape[0] > start_y2.shape[0]:
start_y2 = np.concatenate([start_y2, np.repeat(start_y2[-1], start_y1.shape[0] - start_y2.shape[0])], axis=0)
start_x2 = np.concatenate([start_x2, np.repeat(start_x2[-1], start_x1.shape[0] - start_x2.shape[0])], axis=0)
gifs = np.zeros((start_y1.shape[0], 1, image_size, image_size), dtype=np.float32)
start_y, start_x = np.concatenate([start_y1.reshape(-1, 1), start_y2.reshape(-1, 1)], axis=1), np.concatenate([start_x1.reshape(-1, 1), start_x2.reshape(-1, 1)], axis=1)
# print(start_x.shape, start_y.shape)
for n in range(num_digits):
digit_image = digit_imgs[n, :, :]
for i in range(gifs.shape[0]):
top = start_y[i, n]
left = start_x[i, n]
bottom = top + digit_size
right = left + digit_size
gifs[i, 0, top:bottom, left:right] = Overlap(gifs[i, 0, top:bottom, left:right], digit_image)
if_bg = random.choice([0, 1])
if if_bg == 1:
top = int((image_size - digit_size) * np.random.rand(1))
left = int((image_size - digit_size) * np.random.rand(1))
bottom = top + digit_size
right = left + digit_size
box_digits = np.array([start_y[0, :], start_x[0, :], start_y[0, :] + digit_size, start_x[0, :] + digit_size])
while IOU([top, left, bottom, right], box_digits[:, 0]) or IOU([top, left, bottom, right], box_digits[:, 1]):
top = int((image_size - digit_size) * np.random.rand(1))
left = int((image_size - digit_size) * np.random.rand(1))
bottom = top + digit_size
right = left + digit_size
gifs[:, 0, top:bottom, left:right] = Overlap(gifs[:, 0, top:bottom, left:right], background)
return gifs, direction, bounce
def IOU(box1, box2, threshold = 0.7):
top_d = max(box1[0], box2[0])
left_d = max(box1[1], box2[1])
bottom_d = min(box1[2], box2[2])
right_d = min(box1[3], box2[3])
iterbox = max(0, right_d - left_d) * max(0, bottom_d - top_d)
iou = iterbox / float(digit_size ** 2 * 2 - iterbox)
return True if iou > threshold else False
def create_gifs_for_data(dataset, data, labels, num):
final_gif_data = []
outer_index = 0
inner_digits = dataset % 100
motion_values = dataset // 100
while outer_index < num:
print(outer_index)
idxs = np.random.randint(data.shape[0], size=num_digits)
if 10 * labels[idxs[0]] + labels[idxs[1]] in inner_digits:
n = 10 * labels[idxs[0]] + labels[idxs[1]]
motion_list = np.where(inner_digits == n)[0]
random.shuffle(motion_list)
motion_idx = motion_idxs[motion_values[motion_list[0]]]
background_idxs = np.random.randint(data.shape[0], size=1)
while labels[background_idxs] == labels[idxs[0]] or labels[background_idxs] == labels[idxs[1]]:
background_idxs = np.random.randint(data.shape[0], size=1)
background = data[background_idxs]
digit = data[idxs]
dummy, direction, bounce = create_gif(digit, motion_idx, background)
direction, bounce = direction.astype(np.int32), bounce.astype(np.int32)
sentence = 'the digit %d is moving %s and the digit %d is moving %s .' % (
labels[idxs[0]], motion_strings[motion_idx[0] + 2 * direction[0] + 4 * bounce[0]],
labels[idxs[1]], motion_strings[motion_idx[1] + 2 * direction[1] + 4 * bounce[1]])
instance = {'video': dummy, 'caption': sentence}
final_gif_data.append(instance)
else:
outer_index -= 1
outer_index += 1
return final_gif_data
if __name__ == "__main__":
import tensorflow as tf
(train_x, train_y), (test_x, test_y) = tf.keras.datasets.mnist.load_data()
train_data = train_x
train_labels = train_y
val_data = test_x
val_labels = test_y
data = np.concatenate((train_data, val_data), axis=0)
labels = np.concatenate((train_labels, val_labels), axis=0)
train, val = create_dataset()
# print train, val
data_train = create_gifs_for_data(train, data, labels, 24000)
data_val = create_gifs_for_data(val, data, labels, 6000)
if not os.path.exists('./data/moving_mnist'):
os.makedirs('./data/moving_mnist')
map_size = 1099511627776 * 2 if platform.system() == "Linux" else 1280000
db = lmdb.open(
'./data/moving_mnist/mnist_double_modified_20f_30k_train.lmdb', map_size=map_size, subdir=False, meminit=False, map_async=True
)
INSTANCE_COUNTER: int = 0
txn = db.begin(write=True)
for instance in data_train:
instance = (instance["video"], instance["caption"])
txn.put(
f"{INSTANCE_COUNTER}".encode("ascii"),
pickle.dumps(instance, protocol=-1)
)
INSTANCE_COUNTER += 1
txn.commit()
db.sync()
db.close()
db2 = lmdb.open(
'./data/moving_mnist/mnist_double_modified_20f_30k_test.lmdb', map_size=map_size, subdir=False,
meminit=False, map_async=True
)
INSTANCE_COUNTER: int = 0
txn = db2.begin(write=True)
for instance in data_val:
instance = (instance["video"], instance["caption"])
txn.put(
f"{INSTANCE_COUNTER}".encode("ascii"),
pickle.dumps(instance, protocol=-1)
)
INSTANCE_COUNTER += 1
txn.commit()
db2.sync()
db2.close()
print('Finished!')
| [
"os.path.exists",
"random.choice",
"numpy.repeat",
"random.shuffle",
"os.makedirs",
"tensorflow.keras.datasets.mnist.load_data",
"pickle.dumps",
"numpy.random.rand",
"numpy.where",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"lmdb.open",
"platform.system",
"numpy.concatenate",
... | [((918, 960), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (926, 960), True, 'import numpy as np\n'), ((100, 126), 'numpy.random.randint', 'np.random.randint', (['(1 << 30)'], {}), '(1 << 30)\n', (117, 126), True, 'import numpy as np\n'), ((1078, 1101), 'random.shuffle', 'random.shuffle', (['numbers'], {}), '(numbers)\n', (1092, 1101), False, 'import random\n'), ((1116, 1133), 'numpy.array', 'np.array', (['numbers'], {}), '(numbers)\n', (1124, 1133), True, 'import numpy as np\n'), ((1148, 1183), 'numpy.zeros', 'np.zeros', (['(4, 10 * 9)'], {'dtype': 'np.int'}), '((4, 10 * 9), dtype=np.int)\n', (1156, 1183), True, 'import numpy as np\n'), ((2489, 2510), 'random.choice', 'random.choice', (['[1, 0]'], {}), '([1, 0])\n', (2502, 2510), False, 'import random\n'), ((2578, 2599), 'random.choice', 'random.choice', (['[1, 0]'], {}), '([1, 0])\n', (2591, 2599), False, 'import random\n'), ((4071, 4087), 'numpy.maximum', 'np.maximum', (['a', 'b'], {}), '(a, b)\n', (4081, 4087), True, 'import numpy as np\n'), ((4203, 4214), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4211, 4214), True, 'import numpy as np\n'), ((4228, 4239), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4236, 4239), True, 'import numpy as np\n'), ((4979, 5053), 'numpy.zeros', 'np.zeros', (['(start_y1.shape[0], 1, image_size, image_size)'], {'dtype': 'np.float32'}), '((start_y1.shape[0], 1, image_size, image_size), dtype=np.float32)\n', (4987, 5053), True, 'import numpy as np\n'), ((5642, 5663), 'random.choice', 'random.choice', (['[0, 1]'], {}), '([0, 1])\n', (5655, 5663), False, 'import random\n'), ((8474, 8509), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (8507, 8509), True, 'import tensorflow as tf\n'), ((8620, 8666), 'numpy.concatenate', 'np.concatenate', (['(train_data, val_data)'], {'axis': '(0)'}), '((train_data, val_data), axis=0)\n', (8634, 8666), True, 'import numpy as np\n'), ((8680, 8730), 'numpy.concatenate', 'np.concatenate', (['(train_labels, val_labels)'], {'axis': '(0)'}), '((train_labels, val_labels), axis=0)\n', (8694, 8730), True, 'import numpy as np\n'), ((9098, 9239), 'lmdb.open', 'lmdb.open', (['"""./data/moving_mnist/mnist_double_modified_20f_30k_train.lmdb"""'], {'map_size': 'map_size', 'subdir': '(False)', 'meminit': '(False)', 'map_async': '(True)'}), "('./data/moving_mnist/mnist_double_modified_20f_30k_train.lmdb',\n map_size=map_size, subdir=False, meminit=False, map_async=True)\n", (9107, 9239), False, 'import lmdb\n'), ((9616, 9756), 'lmdb.open', 'lmdb.open', (['"""./data/moving_mnist/mnist_double_modified_20f_30k_test.lmdb"""'], {'map_size': 'map_size', 'subdir': '(False)', 'meminit': '(False)', 'map_async': '(True)'}), "('./data/moving_mnist/mnist_double_modified_20f_30k_test.lmdb',\n map_size=map_size, subdir=False, meminit=False, map_async=True)\n", (9625, 9756), False, 'import lmdb\n'), ((1693, 1708), 'numpy.array', 'np.array', (['train'], {}), '(train)\n', (1701, 1708), True, 'import numpy as np\n'), ((1710, 1723), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (1718, 1723), True, 'import numpy as np\n'), ((2205, 2227), 'random.randint', 'random.randint', (['(15)', '(85)'], {}), '(15, 85)\n', (2219, 2227), False, 'import random\n'), ((2283, 2305), 'random.randint', 'random.randint', (['(15)', '(85)'], {}), '(15, 85)\n', (2297, 2305), False, 'import random\n'), ((5903, 6004), 'numpy.array', 'np.array', (['[start_y[0, :], start_x[0, :], start_y[0, :] + digit_size, start_x[0, :] +\n digit_size]'], {}), '([start_y[0, :], start_x[0, :], start_y[0, :] + digit_size, start_x\n [0, :] + digit_size])\n', (5911, 6004), True, 'import numpy as np\n'), ((7059, 7108), 'numpy.random.randint', 'np.random.randint', (['data.shape[0]'], {'size': 'num_digits'}), '(data.shape[0], size=num_digits)\n', (7076, 7108), True, 'import numpy as np\n'), ((8928, 8965), 'os.path.exists', 'os.path.exists', (['"""./data/moving_mnist"""'], {}), "('./data/moving_mnist')\n", (8942, 8965), False, 'import os\n'), ((8975, 9009), 'os.makedirs', 'os.makedirs', (['"""./data/moving_mnist"""'], {}), "('./data/moving_mnist')\n", (8986, 9009), False, 'import os\n'), ((7300, 7327), 'random.shuffle', 'random.shuffle', (['motion_list'], {}), '(motion_list)\n', (7314, 7327), False, 'import random\n'), ((7427, 7467), 'numpy.random.randint', 'np.random.randint', (['data.shape[0]'], {'size': '(1)'}), '(data.shape[0], size=1)\n', (7444, 7467), True, 'import numpy as np\n'), ((9047, 9064), 'platform.system', 'platform.system', ([], {}), '()\n', (9062, 9064), False, 'import platform\n'), ((9483, 9518), 'pickle.dumps', 'pickle.dumps', (['instance'], {'protocol': '(-1)'}), '(instance, protocol=-1)\n', (9495, 9518), False, 'import pickle\n'), ((10007, 10042), 'pickle.dumps', 'pickle.dumps', (['instance'], {'protocol': '(-1)'}), '(instance, protocol=-1)\n', (10019, 10042), False, 'import pickle\n'), ((3864, 3881), 'numpy.array', 'np.array', (['start_y'], {}), '(start_y)\n', (3872, 3881), True, 'import numpy as np\n'), ((3929, 3946), 'numpy.array', 'np.array', (['start_x'], {}), '(start_x)\n', (3937, 3946), True, 'import numpy as np\n'), ((4493, 4555), 'numpy.repeat', 'np.repeat', (['start_y1[-1]', '(start_y2.shape[0] - start_y1.shape[0])'], {}), '(start_y1[-1], start_y2.shape[0] - start_y1.shape[0])\n', (4502, 4555), True, 'import numpy as np\n'), ((4611, 4673), 'numpy.repeat', 'np.repeat', (['start_x1[-1]', '(start_x2.shape[0] - start_x1.shape[0])'], {}), '(start_x1[-1], start_x2.shape[0] - start_x1.shape[0])\n', (4620, 4673), True, 'import numpy as np\n'), ((5729, 5746), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (5743, 5746), True, 'import numpy as np\n'), ((5795, 5812), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (5809, 5812), True, 'import numpy as np\n'), ((7257, 7284), 'numpy.where', 'np.where', (['(inner_digits == n)'], {}), '(inner_digits == n)\n', (7265, 7284), True, 'import numpy as np\n'), ((7610, 7650), 'numpy.random.randint', 'np.random.randint', (['data.shape[0]'], {'size': '(1)'}), '(data.shape[0], size=1)\n', (7627, 7650), True, 'import numpy as np\n'), ((4777, 4839), 'numpy.repeat', 'np.repeat', (['start_y2[-1]', '(start_y1.shape[0] - start_y2.shape[0])'], {}), '(start_y2[-1], start_y1.shape[0] - start_y2.shape[0])\n', (4786, 4839), True, 'import numpy as np\n'), ((4895, 4957), 'numpy.repeat', 'np.repeat', (['start_x2[-1]', '(start_x1.shape[0] - start_x2.shape[0])'], {}), '(start_x2[-1], start_x1.shape[0] - start_x2.shape[0])\n', (4904, 4957), True, 'import numpy as np\n'), ((6168, 6185), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (6182, 6185), True, 'import numpy as np\n'), ((6238, 6255), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (6252, 6255), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import SGDClassifier
from shared import dataset_local_path, simple_boxplot
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
import json
from sklearn.tree import DecisionTreeClassifier
#%% load up the data
examples = []
ys = []
# Load our data to list of examples:
with open(dataset_local_path("poetry_id.jsonl")) as fp:
for line in fp:
info = json.loads(line)
keep = info["features"]
ys.append(info["poetry"])
examples.append(keep)
## CONVERT TO MATRIX:
feature_numbering = DictVectorizer(sort=True, sparse=False)
X = feature_numbering.fit_transform(examples)
del examples
## SPLIT DATA:
RANDOM_SEED = 12345678
# Numpy-arrays are more useful than python's lists.
y = np.array(ys)
# split off train/validate (tv) pieces.
rX_tv, rX_test, y_tv, y_test = train_test_split(
X, y, train_size=0.75, shuffle=True, random_state=RANDOM_SEED
)
# split off train, validate from (tv) pieces.
rX_train, rX_vali, y_train, y_vali = train_test_split(
rX_tv, y_tv, train_size=0.66, shuffle=True, random_state=RANDOM_SEED
)
scale = StandardScaler()
X_train = scale.fit_transform(rX_train)
X_vali: np.ndarray = scale.transform(rX_vali) # type:ignore
X_test: np.ndarray = scale.transform(rX_test) # type:ignore
#%% Actually compute performance for each % of training data
N = len(y_train)
num_trials = 100
percentages = list(range(5, 100, 5))
percentages.append(100)
scores = {}
acc_mean = []
acc_std = []
# Which subset of data will potentially really matter.
for train_percent in percentages:
n_samples = int((train_percent / 100) * N)
print("{}% == {} samples...".format(train_percent, n_samples))
label = "{} {}".format(train_percent, n_samples)
# So we consider num_trials=100 subsamples, and train a model on each.
scores[label] = []
for i in range(num_trials):
X_sample, y_sample = resample(
X_train, y_train, n_samples=n_samples, replace=False
) # type:ignore
# Note here, I'm using a simple classifier for speed, rather than the best.
clf = SGDClassifier(random_state=RANDOM_SEED + train_percent + i)
clf.fit(X_sample, y_sample)
# so we get 100 scores per percentage-point.
scores[label].append(clf.score(X_vali, y_vali))
# We'll first look at a line-plot of the mean:
acc_mean.append(np.mean(scores[label]))
acc_std.append(np.std(scores[label]))
# First, try a line plot, with shaded variance regions:
import matplotlib.pyplot as plt
# convert our list of means/std to numpy arrays so we can add & subtract them.
means = np.array(acc_mean)
std = np.array(acc_std)
# plot line from means
plt.plot(percentages, acc_mean, "o-")
# plot area from means & stddev
plt.fill_between(percentages, means - std, means + std, alpha=0.2)
# Manage axes/show:
plt.xlabel("Percent Training Data")
plt.ylabel("Mean Accuracy")
plt.xlim([0, 100])
plt.title("Shaded Accuracy Plot")
plt.savefig("graphs/p09-area-Accuracy.png")
plt.show()
# Second look at the boxplots in-order: (I like this better, IMO)
simple_boxplot(
scores,
"Learning Curve",
xlabel="Percent Training Data",
ylabel="Accuracy",
save="graphs/p09-boxplots-Accuracy.png",
)
# TODO: (practical tasks)
# 1. Swap in a better, but potentially more expensive classifier.
# - Even DecisionTreeClassifier has some more interesting behavior on these plots.
# 2. Change the plots to operate over multiples of 50 samples, instead of percentages.
# - This will likely be how you want to make these plots for your project.
# OPTIONAL CHALLENGE:
# Refactor the code so that you can evaluate multiple models in this fashion.
# Two different models at the same time will likely max out the visual utility of the plot.
# The boxplot will not be able to show both models at once. | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.array",
"shared.simple_boxplot",
"numpy.mean",
"sklearn.linear_model.SGDClassifier",
"sklearn.feature_extraction.DictVectorizer",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"shared.dataset_local_path",
"sklearn.utils... | [((700, 739), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {'sort': '(True)', 'sparse': '(False)'}), '(sort=True, sparse=False)\n', (714, 739), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((895, 907), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (903, 907), True, 'import numpy as np\n'), ((979, 1058), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'train_size': '(0.75)', 'shuffle': '(True)', 'random_state': 'RANDOM_SEED'}), '(X, y, train_size=0.75, shuffle=True, random_state=RANDOM_SEED)\n', (995, 1058), False, 'from sklearn.model_selection import train_test_split\n'), ((1148, 1239), 'sklearn.model_selection.train_test_split', 'train_test_split', (['rX_tv', 'y_tv'], {'train_size': '(0.66)', 'shuffle': '(True)', 'random_state': 'RANDOM_SEED'}), '(rX_tv, y_tv, train_size=0.66, shuffle=True, random_state=\n RANDOM_SEED)\n', (1164, 1239), False, 'from sklearn.model_selection import train_test_split\n'), ((1251, 1267), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1265, 1267), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2760, 2778), 'numpy.array', 'np.array', (['acc_mean'], {}), '(acc_mean)\n', (2768, 2778), True, 'import numpy as np\n'), ((2785, 2802), 'numpy.array', 'np.array', (['acc_std'], {}), '(acc_std)\n', (2793, 2802), True, 'import numpy as np\n'), ((2826, 2863), 'matplotlib.pyplot.plot', 'plt.plot', (['percentages', 'acc_mean', '"""o-"""'], {}), "(percentages, acc_mean, 'o-')\n", (2834, 2863), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2962), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['percentages', '(means - std)', '(means + std)'], {'alpha': '(0.2)'}), '(percentages, means - std, means + std, alpha=0.2)\n', (2912, 2962), True, 'import matplotlib.pyplot as plt\n'), ((2984, 3019), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Percent Training Data"""'], {}), "('Percent Training Data')\n", (2994, 3019), True, 'import matplotlib.pyplot as plt\n'), ((3020, 3047), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Accuracy"""'], {}), "('Mean Accuracy')\n", (3030, 3047), True, 'import matplotlib.pyplot as plt\n'), ((3048, 3066), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 100]'], {}), '([0, 100])\n', (3056, 3066), True, 'import matplotlib.pyplot as plt\n'), ((3067, 3100), 'matplotlib.pyplot.title', 'plt.title', (['"""Shaded Accuracy Plot"""'], {}), "('Shaded Accuracy Plot')\n", (3076, 3100), True, 'import matplotlib.pyplot as plt\n'), ((3101, 3144), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""graphs/p09-area-Accuracy.png"""'], {}), "('graphs/p09-area-Accuracy.png')\n", (3112, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3145, 3155), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3153, 3155), True, 'import matplotlib.pyplot as plt\n'), ((3224, 3360), 'shared.simple_boxplot', 'simple_boxplot', (['scores', '"""Learning Curve"""'], {'xlabel': '"""Percent Training Data"""', 'ylabel': '"""Accuracy"""', 'save': '"""graphs/p09-boxplots-Accuracy.png"""'}), "(scores, 'Learning Curve', xlabel='Percent Training Data',\n ylabel='Accuracy', save='graphs/p09-boxplots-Accuracy.png')\n", (3238, 3360), False, 'from shared import dataset_local_path, simple_boxplot\n'), ((463, 500), 'shared.dataset_local_path', 'dataset_local_path', (['"""poetry_id.jsonl"""'], {}), "('poetry_id.jsonl')\n", (481, 500), False, 'from shared import dataset_local_path, simple_boxplot\n'), ((544, 560), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (554, 560), False, 'import json\n'), ((2043, 2105), 'sklearn.utils.resample', 'resample', (['X_train', 'y_train'], {'n_samples': 'n_samples', 'replace': '(False)'}), '(X_train, y_train, n_samples=n_samples, replace=False)\n', (2051, 2105), False, 'from sklearn.utils import resample\n'), ((2241, 2300), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'random_state': '(RANDOM_SEED + train_percent + i)'}), '(random_state=RANDOM_SEED + train_percent + i)\n', (2254, 2300), False, 'from sklearn.linear_model import SGDClassifier\n'), ((2517, 2539), 'numpy.mean', 'np.mean', (['scores[label]'], {}), '(scores[label])\n', (2524, 2539), True, 'import numpy as np\n'), ((2560, 2581), 'numpy.std', 'np.std', (['scores[label]'], {}), '(scores[label])\n', (2566, 2581), True, 'import numpy as np\n')] |
import math
import numpy as np
class binomial_tree:
'''This class implements the CRR Bimonial Tree method to calculate European Option Price.
The volatility as an input is set to be an array instead of a constant to provide expanded
ability to incorpate volatility changes during the calculated period.
This implementation does not enforce recombination. So the Number of Steps is limited to <20.
Author: <NAME>, <NAME>
Date: April 27, 2019.
'''
def __init__ (self,StepNumber,Days2Maturity,SigmaList,InterestRate,UnderlyingPx,StrickPx,CallOption=True):
self.DoY = 365 # Day of Year
self.InterestRate = InterestRate
self.Days2Maturity = Days2Maturity
self.delta_t = (Days2Maturity/StepNumber)/self.DoY
self.ulist = [math.exp(x*math.sqrt(self.delta_t)) for x in SigmaList]
self.dlist = [1.0/x for x in self.ulist]
self.plist = [(math.exp(InterestRate*self.delta_t)-d)/(u-d) for u,d in zip(self.ulist, self.dlist)]
self.UD = np.asarray([self.ulist, self.dlist])
self.prob_matrix = np.asarray([self.plist, [1.0 - p for p in self.plist]])
self.binary_list = (bin(i)[2:].zfill(StepNumber) for i in range(2**StepNumber))
self.price_list = []
self.prob_list = []
for bn in self.binary_list:
PxM = 1;
PrM = 1;
for idx in range(len(bn)):
PxM = PxM * self.UD[int(bn[idx]), idx]
PrM = PrM * self.prob_matrix[int(bn[idx]), idx]
self.price_list.append(UnderlyingPx*PxM)
self.prob_list.append(PrM)
if CallOption:
self.value_list = [max(0, p - StrickPx) for p in self.price_list]
else:
self.value_list = [max(0, StrickPx - p) for p in self.price_list]
def option_value (self):
ExpValueList = [x*y for x, y in zip(self.value_list, self.prob_list)]
return sum(ExpValueList) * math.exp((-1.0)*self.InterestRate*(self.Days2Maturity/self.DoY))
| [
"math.sqrt",
"math.exp",
"numpy.asarray"
] | [((1044, 1080), 'numpy.asarray', 'np.asarray', (['[self.ulist, self.dlist]'], {}), '([self.ulist, self.dlist])\n', (1054, 1080), True, 'import numpy as np\n'), ((1108, 1165), 'numpy.asarray', 'np.asarray', (['[self.plist, [(1.0 - p) for p in self.plist]]'], {}), '([self.plist, [(1.0 - p) for p in self.plist]])\n', (1118, 1165), True, 'import numpy as np\n'), ((2012, 2080), 'math.exp', 'math.exp', (['(-1.0 * self.InterestRate * (self.Days2Maturity / self.DoY))'], {}), '(-1.0 * self.InterestRate * (self.Days2Maturity / self.DoY))\n', (2020, 2080), False, 'import math\n'), ((813, 836), 'math.sqrt', 'math.sqrt', (['self.delta_t'], {}), '(self.delta_t)\n', (822, 836), False, 'import math\n'), ((930, 967), 'math.exp', 'math.exp', (['(InterestRate * self.delta_t)'], {}), '(InterestRate * self.delta_t)\n', (938, 967), False, 'import math\n')] |
#!/usr/bin/env python
import os
import sys
sys.path.append('/home/bithika/src/House-Number-Detection')
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.io import loadmat
from skimage import color
from skimage import io
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
#import torch
import argparse
import h5py
plt.rcParams['figure.figsize'] = (16.0, 4.0)
###############################################################################
###############################################################################
#device = torch.device("cpu")
from preprocess_utils import *
from plot_utils import *
###############################################################################
###############################################################################
# Argument Parsing
#
parser = argparse.ArgumentParser(description='Train model')
parser.add_argument('--base-dir', type=str,
default='/home/bithika/src/House-Number-Detection', help='Input base directory ')
parser.add_argument('--train-dir', type=str,
default='/home/bithika/src/House-Number-Detection/data/raw/train_32x32.mat', help='Input data directory')
parser.add_argument('--test-dir', type=str,
default='/home/bithika/src/House-Number-Detection/data/raw/test_32x32.mat', help='Input data directory')
parser.add_argument('--output-dir', type=str,
default='/home/bithika/src/House-Number-Detection/reports', help='Input data directory')
parser.add_argument('--processed-data-dir', type=str,
default='/home/bithika/src/House-Number-Detection/data/processed', help='processed data directory')
parser.add_argument('--validation-data-fraction', type=float,
default=0.1, help='validation dataset split fraction (default: 0.1)')
args = parser.parse_args()
###############################################################################
###############################################################################
# Load dataset
#
# Reading the .mat files
X_train, y_train = load_data(args.train_dir)
X_test, y_test = load_data(args.test_dir)
print("Training Set", X_train.shape, y_train.shape)
print("Test Set", X_test.shape, y_test.shape)
# Calculate the total number of images
num_images = X_train.shape[0] + X_test.shape[0]
print("Total Number of Images", num_images)
# Transpose image arrays
# (width, height, channels, size) -> (size, width, height, channels)
X_train, y_train = X_train.transpose((3,0,1,2)), y_train[:,0]
X_test, y_test = X_test.transpose((3,0,1,2)), y_test[:,0]
print("Training Set", X_train.shape)
print("Test Set", X_test.shape)
print('')
# Plot some training set images
plot_images(X_train, y_train, 2, 8, args.output_dir, 'train_images.png')
# Plot some test set images
plot_images(X_test, y_test, 2, 8, args.output_dir, 'test_images.png')
# check for unique labesl
print(np.unique(y_train))
# data distribution
plot_data_distribution(y_train, y_test, args.output_dir, 'class_distribution.png')
# distributions are skewed in the positive direction i.e lesser data on the higher values
convert_labels_10to0(y_train)
convert_labels_10to0(y_test)
# check for unique labesl
print(np.unique(y_train))
# split training data into train and validation
#X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.13, random_state=7, stratify = y_train)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=args.validation_data_fraction, random_state=7)
plot_data_distribution(y_train, y_val, args.output_dir, 'train_val_class_distribution.png')
print(y_train.shape, y_val.shape, y_test.shape)
# convert to float for numpy computation
train_greyscale = rgb2gray(X_train).astype(np.float32)
test_greyscale = rgb2gray(X_test).astype(np.float32)
val_greyscale = rgb2gray(X_val).astype(np.float32)
print("Training Set", train_greyscale.shape)
print("Validation Set", val_greyscale.shape)
print("Test Set", test_greyscale.shape)
print('')
# remove RGB train, test and val set from RAM
del X_train, X_val, X_test
plot_images(train_greyscale, y_train, 1, 10,args.output_dir, 'train_images_greyscale.png' )
# Normalisation
# Liang et al. 2015 report that the pre-processed the images by removing the per-pixel mean value calculated over
#the entire set.
#Goodfellow et al. 2013 report that they subtract the mean from every image.
train_greyscale_norm, test_greyscale_norm, val_greyscale_norm = normalize(train_greyscale, test_greyscale, val_greyscale)
plot_images(train_greyscale, y_train, 1, 10, args.output_dir, 'train_images_greyscale_norm.png' )
#one hot label encoding
y_train, y_test, y_val = one_hot_labels(y_train, y_test, y_val )
print("Training set", y_train.shape)
print("Validation set", y_val.shape)
print("Test set", y_test.shape)
store_data('SVHN_grey.h5',
args.processed_data_dir,
train_greyscale_norm, test_greyscale_norm, val_greyscale_norm,
y_train, y_test, y_val)
| [
"sklearn.model_selection.train_test_split",
"sys.path.append",
"numpy.unique",
"argparse.ArgumentParser"
] | [((44, 103), 'sys.path.append', 'sys.path.append', (['"""/home/bithika/src/House-Number-Detection"""'], {}), "('/home/bithika/src/House-Number-Detection')\n", (59, 103), False, 'import sys\n'), ((899, 949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train model"""'}), "(description='Train model')\n", (922, 949), False, 'import argparse\n'), ((3537, 3632), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'test_size': 'args.validation_data_fraction', 'random_state': '(7)'}), '(X_train, y_train, test_size=args.validation_data_fraction,\n random_state=7)\n', (3553, 3632), False, 'from sklearn.model_selection import train_test_split\n'), ((3008, 3026), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (3017, 3026), True, 'import numpy as np\n'), ((3313, 3331), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (3322, 3331), True, 'import numpy as np\n')] |
import numpy as np
from os import path, mkdir, listdir, fsync
#### Concat all beta files
all_files = listdir('./')
all_res = []
for file in all_files:
print(file)
if 'beta' in file:
try:
all_res.append(np.load(file))
except:
print("File \"{}\" could not be loaded due to some reason".format(file))
continue
all_stacked = np.vstack(all_res)
np.save('beta_all', all_stacked)
print(all_stacked.shape)
#### Concat all eval files
all_files = listdir('../eval/')
all_res = []
for file in all_files:
if 'eval' in file:
print(file)
all_res.append(np.load(path.join('../eval/', file)))
all_stacked = np.hstack(all_res)
np.save('../eval/eval_all', all_stacked)
print(all_stacked.shape) | [
"os.listdir",
"numpy.hstack",
"os.path.join",
"numpy.vstack",
"numpy.load",
"numpy.save"
] | [((106, 119), 'os.listdir', 'listdir', (['"""./"""'], {}), "('./')\n", (113, 119), False, 'from os import path, mkdir, listdir, fsync\n'), ((387, 405), 'numpy.vstack', 'np.vstack', (['all_res'], {}), '(all_res)\n', (396, 405), True, 'import numpy as np\n'), ((406, 438), 'numpy.save', 'np.save', (['"""beta_all"""', 'all_stacked'], {}), "('beta_all', all_stacked)\n", (413, 438), True, 'import numpy as np\n'), ((506, 525), 'os.listdir', 'listdir', (['"""../eval/"""'], {}), "('../eval/')\n", (513, 525), False, 'from os import path, mkdir, listdir, fsync\n'), ((681, 699), 'numpy.hstack', 'np.hstack', (['all_res'], {}), '(all_res)\n', (690, 699), True, 'import numpy as np\n'), ((700, 740), 'numpy.save', 'np.save', (['"""../eval/eval_all"""', 'all_stacked'], {}), "('../eval/eval_all', all_stacked)\n", (707, 740), True, 'import numpy as np\n'), ((235, 248), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (242, 248), True, 'import numpy as np\n'), ((636, 663), 'os.path.join', 'path.join', (['"""../eval/"""', 'file'], {}), "('../eval/', file)\n", (645, 663), False, 'from os import path, mkdir, listdir, fsync\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME>
# Date: 2021/1/31 9:47 PM
"""
Lightweight class to record training and dataset file info for later retrieval.
Experiment ID is generated from the exp_ID_config.csv file; 1 if file not exist.
Also includes data FalsePositiveCategorizer class for categorizing false negatives and false positives
"""
import os
from datetime import date
import pickle
from typing import Union, Dict, Tuple, List
import pandas as pd
from tensorflow.keras import Sequential
import numpy as np
from processing.marsdataloader import MARSDataLoader, generate_all_feat_df
from processing.extract_features import extract_destabilize
import consts as C
class Recorder():
def __init__(self,
loader: MARSDataLoader,
train_args: dict,
seq_y: bool,
verbose=True):
self.loader = loader
self.verbose = verbose
self.train_args = train_args
self.configID = self.train_args["configID"]
self.exp_date = date.today().strftime("%B %d, %Y")
self.using_seq_label = seq_y
# get unique experiment ID for current project folder
self.exp_ID = int(_find_next_exp_ID())
# unique experiment folder path
# i.e. fill in exp{}_{}win_{}ahead_conf{}_{}
self.exp_dir = C.EXP_FORMAT.format(self.exp_ID,
self.train_args["window"],
self.train_args["ahead"],
self.train_args["configID"],
self.train_args["model"])
# get prediction path
self.pred_path = C.PRED_PATH.format(self.exp_ID,
self.train_args["window"],
self.train_args["ahead"],
self.train_args["configID"],
self.train_args["model"])
self.model_path = os.path.join(self.exp_dir, C.MODEL_PATH) # path to model
self.recorder_path = os.path.join(self.exp_dir, C.REC_BASENAME)
self.norm_stats_path = os.path.join(self.exp_dir, C.NORM_STATS_PATH)
# to be recorded on record_experiment
self.history: dict = {} # hisotry dict from keras history object, if any passed
self.time_taken: str = "" # string of time taken in this experiment
self.average_epochs: float = 0
self.std_epochs: float = 0
self.best_split: int = -1 # index of the best performing split, 0-based
if self.verbose:
print("Now recording experiment #{}".format(self.exp_ID))
def record_experiment(self,
test_results: dict,
time_taken: str,
epoch_list: list,
best_split: int,
model: Sequential = None,
norm_stats: dict = None,
train_history: list = None,
save_model: bool = False):
"""record experiment configuration and statistics"""
# link references
if train_history:
self.history = train_history
self.average_epochs = float(np.mean(epoch_list))
self.std_epochs = float(np.std(epoch_list))
self.best_split = best_split
self.time_taken = time_taken
# create new path in results and experiment folders
if not os.path.exists(self.exp_dir):
os.mkdir(self.exp_dir)
if model is not None and save_model:
self.__save_model(model)
if norm_stats is not None:
self.__save_norm_stats(norm_stats)
# append test set metrics to results/exp_results_all.csv
self.__save_results(test_results)
# once all of the above done, append experiment info to results/exp_ID_config.csv
self.__save_exp_config()
# pickle this recorder to its path
pickle.dump(self, open(self.recorder_path, "wb"))
if self.verbose:
print("Experiment {} recorded successfully!".format(self.exp_ID))
def save_predictions(self,
test_inds: Union[list, np.ndarray],
y_pred: Union[list, np.ndarray],
true_preds_path: str="",
false_preds_path: str="",
custom_ahead: float=None,
save_lookahead_windows=False) -> None:
"""save prediction for specified rows; separate files will be generated if no sequence label used and true and
false pred paths are given."""
# generate test DataFrame
test_df = generate_all_feat_df(self.loader, self.configID, inds=test_inds)
# append predictions
if y_pred.ndim <=2:
test_df[C.PRED_COL] = y_pred
else:
# squeeze sequence labels to (num_samples, sampling_rate)
y_pred = y_pred.squeeze(-1)
# convert to list of arrays for DataFrame to correctly append new column
test_df[C.PRED_COL] = [y_pred[i, :] for i in range(y_pred.shape[0])]
# reorder so that false predictions come up first and label true and false predictions
if self.using_seq_label:
# compare seq predictions by row
test_df["pred_seq_is_correct"] = test_df.apply(lambda row: np.array_equal(row.seq_label, row[C.PRED_COL]), axis=1)
test_df.sort_values("pred_seq_is_correct", inplace=True)
else:
# show false negatives first
test_df.sort_values(["label", C.PRED_COL], ascending=[False, True], inplace=True)
# pop seq_label column since not needed
test_df.drop(["seq_label"], axis=1, inplace=True)
# save correct and incorrect predictions separately if both paths are given; otherwise, save in one file
if true_preds_path and false_preds_path and not self.using_seq_label:
pred_label_is_correct = test_df.apply(lambda row: np.array_equal(row.label, row[C.PRED_COL]), axis=1)
# categorize false negatives for non-sequential labels
if not self.using_seq_label:
print("now processing destab joystick in lookahead windows...")
test_df = append_lookahead_stats(test_df, self, custom_ahead, save_lookahead_windows=save_lookahead_windows)
grouped = test_df.groupby(pred_label_is_correct)
# find respective rows and save separately
true_df = grouped.get_group(True)
false_df = grouped.get_group(False)
true_df.to_csv(true_preds_path, index=False)
print(f"saved {len(true_df)} true/correct predictions to {true_preds_path}")
false_df.to_csv(false_preds_path, index=False)
print(f"saved {len(false_df)} true/correct predictions to {false_preds_path}")
print(f"accuracy (for debugging): {len(true_df)/(len(true_df) + len(false_df))}")
else:
test_df.to_csv(self.pred_path, index=False)
if self.verbose:
print("Model test set input and prediction saved successfully!")
def list_training_columns(self) -> list:
return C.CONFIG_SPECS[self.configID][C.COLS_USED]
def __save_model(self, model) -> None:
"""helper to save models"""
assert type(model) == Sequential, "Only Keras Sequential models are supported! " \
"Consider adding new code and updating model saving methods."
# append number to avoid collision, if needed
collision_n = 0
if os.path.exists(self.model_path):
while os.path.exists(self.model_path + "_" + str(collision_n)):
collision_n += 1
self.model_path = self.model_path + "_" + str(collision_n)
if collision_n:
print("Model path has been revised to {} to avoid collision. \n"
"In principal, this shouldn't happen since model path has unique experiment ID.".format(
self.model_path))
model.save(self.model_path)
def __save_norm_stats(self, norm_stats: dict):
"""helper to save normalization stats"""
pickle.dump(norm_stats, open(self.norm_stats_path, "wb"))
def __save_results(self, cv_results: Dict[str, list]) -> None:
"""calculate and append CV test results to results/exp_results_all.csv"""
# compute mean and std of CV results
calculated_results = {}
for metric_name in cv_results:
calculated_results[metric_name + C.MEAN_SUFFIX] = np.nanmean(cv_results[metric_name])
calculated_results[metric_name + C.STD_SUFFIX] = np.nanstd(cv_results[metric_name])
# add ID to current results
calculated_results[C.EXP_COL_CONV[C.EXP_ID_COL]] = self.exp_ID
# retrieve previous results
try:
results_df = pd.read_csv(C.ALL_RES_CSV_PATH)
except IOError:
results_df = pd.read_csv(C.TEMPLATE_ALL_RES)
# save current results
results_df = results_df.append(calculated_results, ignore_index=True)
results_df.to_csv(C.ALL_RES_CSV_PATH, index=False)
def __save_exp_config(self) -> None:
"""save current configuration to exp_ID_config.csv for easy retrieval"""
# load configuration file
if os.path.exists(C.EXP_ID_LOG):
config_df = pd.read_csv(C.EXP_ID_LOG, dtype={C.EXP_ID_COL: int})
else:
config_df = pd.read_csv(C.TEMPLATE_ID_LOG, dtype={C.EXP_ID_COL: int})
config_df = config_df.append(self.__compile_exp_dict(), ignore_index=True)
config_df.to_csv(C.EXP_ID_LOG, index=False)
def __compile_exp_dict(self) -> dict:
"""compile experiment configuration dictionary"""
# put together attributes for extraction
all_atts = {**vars(self), **vars(self.loader), **self.train_args}
# keep only savable atts--filter out lists, dicts, etc.
savable_atts = _filter_values(all_atts)
# convert the convertable columns, if possible, for output
output = {}
for (column, value) in savable_atts.items():
if column in C.EXP_COL_CONV:
output[C.EXP_COL_CONV[column]] = value
else:
output[column] = value
# Lastly, add info not included in class fields.
# text description of dataset configuration (e.g. basic triple)
output[C.CONFIG_DESC_COL_NAME] = C.CONFIG_SPECS[self.configID][C.CONFIG_OVERVIEW]
return output
def _find_next_exp_ID() -> int:
"""helper to find the next unique exp ID in given exp dir, fast operation to avoid collision"""
# find ID based on ID record file
try:
with open(C.EXP_ID_RECORD, "r") as id_file:
next_id = int(id_file.read())
except IOError:
next_id = 1
# save ID to record
with open(C.EXP_ID_RECORD, 'w') as count_file:
count_file.write(str(next_id + 1))
return next_id
def _filter_values(vars_dict: dict)->dict:
"""helper function to filter out dictionary entries whose values are not str, num or bool; called before converting args to column names"""
output = {key: value for key, value in vars_dict.items() if type(value) in C.ACCEPTABLE_TYPES}
# ad-hoc popping duplicate keys
output.pop("seq_label") # same as using_seq_label in Recorder
# ad-hoc for adding layer sizes
if vars_dict["model"] in {C.CNN, C.MLP}:
output["layer_sizes"] = vars_dict["layer_sizes"]
else:
output["layer_sizes"] = "NA"
# ad-hoc change filter_number to NA for non-CNN models
if vars_dict["model"] != C.CNN:
output["filter_number"] = "NA"
return output
class TestSetProcessor:
def __init__(self,
recorder: Recorder,
current_ahead: float
):
if not os.path.exists(C.RAW_DATA_PATH):
raise FileNotFoundError("Raw data file cannot be found at {}".format(C.RAW_DATA_PATH))
# extract all needed columns
self.raw_data = pd.read_csv(C.RAW_DATA_PATH, usecols=C.ESSENTIAL_RAW_COLS)
# filter out non-human controls in data for faster processing
self.raw_data = self.raw_data[self.raw_data.trialPhase != 1]
# group by trials for easy locating
self.grouped = self.raw_data.groupby('peopleTrialKey')
# get data from recorder
self.window_size = recorder.loader.window
self.lookahead = current_ahead
self.velocity_col = "calculated_vel" if "velocity_cal" in recorder.list_training_columns() else "currentVelRoll"
def generate_categories(self, data_df: pd.DataFrame) -> Tuple[List[float], List[float], List[int], List[int], List[float], List[float]]:
"""append a new column containing entry stats"""
# apply categorization function to each data point to assign error type.
# ico = including carryover destabilizing joystick from input window (ie seen by machine); eco = exclude such
lookahead_avg_destab_mag_ico, lookahead_avg_destab_mag_eco = [], []
lookahead_total_destab_steps_ico, lookahead_total_destab_steps_eco = [], []
lookahead_destab_sustained_ico, lookahead_destab_sustained_eco = [], []
for _, row in data_df.iterrows():
avg_destab_mag_ico, avg_destab_mag_eco, \
total_destab_steps_ico, total_destab_steps_eco, \
destab_sustained_ico, destab_sustained_eco = self._extract_lookahead_stats(float(row.end_seconds),
self.grouped.get_group(row.trial_key))
lookahead_avg_destab_mag_ico.append(avg_destab_mag_ico)
lookahead_avg_destab_mag_eco.append(avg_destab_mag_eco)
lookahead_total_destab_steps_ico.append(total_destab_steps_ico)
lookahead_total_destab_steps_eco.append(total_destab_steps_eco)
lookahead_destab_sustained_ico.append(destab_sustained_ico)
lookahead_destab_sustained_eco.append(destab_sustained_eco)
return lookahead_avg_destab_mag_ico, lookahead_avg_destab_mag_eco, \
lookahead_total_destab_steps_ico, lookahead_total_destab_steps_eco, \
lookahead_destab_sustained_ico, lookahead_destab_sustained_eco
def save_lookahead_windows(self, data_df: pd.DataFrame) -> pd.DataFrame:
"""save lookahead windows of each entry in given DataFrame"""
# TODO where to put this? additional arg in predict.py?
# output: [trial key, window_end], vel, pos, joystick,
# locate lookahead sequences, note that first entry is last time step in input window
lookahead_df_dict = {key:[] for key in [# "trial_key", "window_end",
"lookahead_vel", "lookahead_pos", "lookahead_joy", "lookahead_times"]}
for _, row in data_df.iterrows():
end_sec = float(row.end_seconds)
trial_entries = self.grouped.get_group(row.trial_key)
lookahead_readings = trial_entries[
trial_entries.seconds.between(end_sec, end_sec + self.lookahead, inclusive="neither")]
# record data into df
# lookahead_df_dict["trial_key"].append(row.trial_key)
# lookahead_df_dict["window_end"].append(end_sec)
lookahead_df_dict["lookahead_vel"].append(lookahead_readings[self.velocity_col].to_numpy())
lookahead_df_dict["lookahead_pos"].append(lookahead_readings['currentPosRoll'].to_numpy())
lookahead_df_dict["lookahead_joy"].append(lookahead_readings['joystickX'].to_numpy())
lookahead_df_dict["lookahead_times"].append(lookahead_readings.seconds.to_numpy())
return data_df.assign(**lookahead_df_dict)
def _extract_lookahead_stats(self, end_sec: float,
trial_entries: pd.DataFrame) -> Tuple[float, float, int, int, float, float]:
"""for a single entry, return its avg destabilizing joystick magnitude, w/ or w/o carryover destabilizing
joystick, ie destab carried over from input window (i.e. "seen by machine", such as ...111 -> 1100);
if no such destab, return NaN. If no lookahead window (i.e. for end-of-trial neg samples), return NaN """
# locate lookahead sequences, note that first entry is last time step in input window
lookahead_readings = trial_entries[trial_entries.seconds.between(end_sec, end_sec + self.lookahead, inclusive="left")]
# see if deflection occurs
base_triples = lookahead_readings[[self.velocity_col, 'currentPosRoll', 'joystickX']].to_numpy()
# get an array of whether each reading in lookahead is destabilizing, (sampling_rate,)
has_deflections = extract_destabilize(base_triples, single_entry=True)
last_in_window_is_destab = has_deflections[0]
destab_ico = has_deflections[1:]
joystick_ico = lookahead_readings.joystickX.to_numpy()[1:]
assert destab_ico.shape == joystick_ico.shape, f"shape diff: {destab_ico.shape} vs {joystick_ico.shape}"
# time points for calculating length sustained
timepoints_ico = lookahead_readings.seconds.to_numpy()[1:]
# split lookahead destab into bool subarray chunks: 1110011000 -> 111 00 11 000
lookahead_destab_cutpoints = np.where(np.diff(destab_ico))[0] + 1
destab_chunks_ico = np.split(destab_ico, lookahead_destab_cutpoints) # destab chunks incl potential carryover destab
joystick_chunks_ico = np.split(joystick_ico, lookahead_destab_cutpoints) # corresponding joystick chunks incl potential carryover destab
timepoints_chunks_ico = np.split(timepoints_ico, lookahead_destab_cutpoints)
# take out carryover if any
destab_chunks_eco = destab_chunks_ico.copy() # destab chunks excl potential carryover destab
joystick_chunks_eco = joystick_chunks_ico.copy()
timepoints_chunks_eco = timepoints_chunks_ico.copy()
# note: end-of-trial neg samples do not have lookahead window
if destab_chunks_ico[0].shape[0] != 0 and destab_chunks_ico[0][0] == True:
if last_in_window_is_destab:
# if the first chunk is a carry over destab, pop it
destab_chunks_eco.pop(0)
joystick_chunks_eco.pop(0)
timepoints_chunks_eco.pop(0)
# piece the chunks back into one vector
assert len(destab_chunks_eco) == len(joystick_chunks_eco)
if not destab_chunks_eco:
# if chunk lists become empty after popping, assign empty arrays
destab_eco, joystick_eco = np.empty(0), np.empty(0)
else:
destab_eco, joystick_eco = np.hstack(destab_chunks_eco), np.hstack(joystick_chunks_eco)
lookahead_has_destab_ico = np.any(destab_ico)
lookahead_has_destab_eco = np.any(destab_eco) # returns False if empty
# Average Absolute magnitude of destabilizing joystick deflections: (dot product)/(# of destab, ie sum)
# avg is NaN iff lookahead has no such destab segment
avg_destab_magnitude_ico = destab_ico.dot(np.abs(joystick_ico)) / np.sum(destab_ico) if lookahead_has_destab_ico else np.nan
avg_destab_magnitude_eco = destab_eco.dot(np.abs(joystick_eco)) / np.sum(destab_eco) if lookahead_has_destab_eco else np.nan
# add up time diffs
destab_sustained_ico, destab_sustained_eco = 0, 0
for destab_chunk, time_chunk in zip(destab_chunks_ico, timepoints_chunks_ico):
if destab_chunk.size > 0 and destab_chunk[0] == True:
destab_sustained_ico += time_chunk[-1] - time_chunk[0]
for destab_chunk, time_chunk in zip(destab_chunks_eco, timepoints_chunks_eco):
if destab_chunk.size > 0 and destab_chunk[0] == True:
destab_sustained_eco += time_chunk[-1] - time_chunk[0]
return avg_destab_magnitude_ico, avg_destab_magnitude_eco, \
int(np.sum(destab_ico)), int(np.sum(destab_eco)), \
destab_sustained_ico, destab_sustained_eco
def append_lookahead_stats(dataset_df: pd.DataFrame,
recorder: Recorder,
current_ahead: float=None,
save_lookahead_windows=False) -> pd.DataFrame:
"""append prediction categories to given test set DataFrame"""
if not current_ahead:
current_ahead = recorder.loader.ahead
prc = TestSetProcessor(recorder, current_ahead)
lookahead_avg_destab_mag_ico, lookahead_avg_destab_mag_eco, \
lookahead_total_destab_steps_ico, lookahead_total_destab_steps_eco,\
lookahead_destab_sustained_ico, lookahead_destab_sustained_eco = prc.generate_categories(dataset_df)
new_appended_df = dataset_df.assign(lookahead_avg_destab_mag_ico=lookahead_avg_destab_mag_ico,
lookahead_avg_destab_mag_eco=lookahead_avg_destab_mag_eco,
lookahead_total_destab_steps_ico=lookahead_total_destab_steps_ico,
lookahead_total_destab_steps_eco=lookahead_total_destab_steps_eco,
lookahead_destab_sustained_ico=lookahead_destab_sustained_ico,
lookahead_destab_sustained_eco=lookahead_destab_sustained_eco)
if save_lookahead_windows:
new_appended_df = prc.save_lookahead_windows(new_appended_df)
return new_appended_df
if __name__ == "__main__":
# debugging categorization function
test_curr_ahead = 1.0
test_false_df = pd.read_csv("local/test_false_df.csv")
test_recorder = pickle.load(open("local/test_recorder.pkl", "rb"))
new_df = append_lookahead_stats(test_false_df, test_recorder, test_curr_ahead)
print("Done!")
| [
"consts.PRED_PATH.format",
"pandas.read_csv",
"numpy.hstack",
"numpy.nanmean",
"os.path.exists",
"numpy.mean",
"numpy.diff",
"numpy.empty",
"os.mkdir",
"numpy.abs",
"processing.marsdataloader.generate_all_feat_df",
"numpy.nanstd",
"numpy.any",
"numpy.std",
"processing.extract_features.ex... | [((21857, 21895), 'pandas.read_csv', 'pd.read_csv', (['"""local/test_false_df.csv"""'], {}), "('local/test_false_df.csv')\n", (21868, 21895), True, 'import pandas as pd\n'), ((1346, 1491), 'consts.EXP_FORMAT.format', 'C.EXP_FORMAT.format', (['self.exp_ID', "self.train_args['window']", "self.train_args['ahead']", "self.train_args['configID']", "self.train_args['model']"], {}), "(self.exp_ID, self.train_args['window'], self.train_args\n ['ahead'], self.train_args['configID'], self.train_args['model'])\n", (1365, 1491), True, 'import consts as C\n'), ((1715, 1859), 'consts.PRED_PATH.format', 'C.PRED_PATH.format', (['self.exp_ID', "self.train_args['window']", "self.train_args['ahead']", "self.train_args['configID']", "self.train_args['model']"], {}), "(self.exp_ID, self.train_args['window'], self.train_args[\n 'ahead'], self.train_args['configID'], self.train_args['model'])\n", (1733, 1859), True, 'import consts as C\n'), ((2059, 2099), 'os.path.join', 'os.path.join', (['self.exp_dir', 'C.MODEL_PATH'], {}), '(self.exp_dir, C.MODEL_PATH)\n', (2071, 2099), False, 'import os\n'), ((2146, 2188), 'os.path.join', 'os.path.join', (['self.exp_dir', 'C.REC_BASENAME'], {}), '(self.exp_dir, C.REC_BASENAME)\n', (2158, 2188), False, 'import os\n'), ((2220, 2265), 'os.path.join', 'os.path.join', (['self.exp_dir', 'C.NORM_STATS_PATH'], {}), '(self.exp_dir, C.NORM_STATS_PATH)\n', (2232, 2265), False, 'import os\n'), ((4814, 4878), 'processing.marsdataloader.generate_all_feat_df', 'generate_all_feat_df', (['self.loader', 'self.configID'], {'inds': 'test_inds'}), '(self.loader, self.configID, inds=test_inds)\n', (4834, 4878), False, 'from processing.marsdataloader import MARSDataLoader, generate_all_feat_df\n'), ((7765, 7796), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (7779, 7796), False, 'import os\n'), ((9516, 9544), 'os.path.exists', 'os.path.exists', (['C.EXP_ID_LOG'], {}), '(C.EXP_ID_LOG)\n', (9530, 9544), False, 'import os\n'), ((12268, 12326), 'pandas.read_csv', 'pd.read_csv', (['C.RAW_DATA_PATH'], {'usecols': 'C.ESSENTIAL_RAW_COLS'}), '(C.RAW_DATA_PATH, usecols=C.ESSENTIAL_RAW_COLS)\n', (12279, 12326), True, 'import pandas as pd\n'), ((16987, 17039), 'processing.extract_features.extract_destabilize', 'extract_destabilize', (['base_triples'], {'single_entry': '(True)'}), '(base_triples, single_entry=True)\n', (17006, 17039), False, 'from processing.extract_features import extract_destabilize\n'), ((17629, 17677), 'numpy.split', 'np.split', (['destab_ico', 'lookahead_destab_cutpoints'], {}), '(destab_ico, lookahead_destab_cutpoints)\n', (17637, 17677), True, 'import numpy as np\n'), ((17761, 17811), 'numpy.split', 'np.split', (['joystick_ico', 'lookahead_destab_cutpoints'], {}), '(joystick_ico, lookahead_destab_cutpoints)\n', (17769, 17811), True, 'import numpy as np\n'), ((17909, 17961), 'numpy.split', 'np.split', (['timepoints_ico', 'lookahead_destab_cutpoints'], {}), '(timepoints_ico, lookahead_destab_cutpoints)\n', (17917, 17961), True, 'import numpy as np\n'), ((19052, 19070), 'numpy.any', 'np.any', (['destab_ico'], {}), '(destab_ico)\n', (19058, 19070), True, 'import numpy as np\n'), ((19106, 19124), 'numpy.any', 'np.any', (['destab_eco'], {}), '(destab_eco)\n', (19112, 19124), True, 'import numpy as np\n'), ((3345, 3364), 'numpy.mean', 'np.mean', (['epoch_list'], {}), '(epoch_list)\n', (3352, 3364), True, 'import numpy as np\n'), ((3398, 3416), 'numpy.std', 'np.std', (['epoch_list'], {}), '(epoch_list)\n', (3404, 3416), True, 'import numpy as np\n'), ((3568, 3596), 'os.path.exists', 'os.path.exists', (['self.exp_dir'], {}), '(self.exp_dir)\n', (3582, 3596), False, 'import os\n'), ((3610, 3632), 'os.mkdir', 'os.mkdir', (['self.exp_dir'], {}), '(self.exp_dir)\n', (3618, 3632), False, 'import os\n'), ((8751, 8786), 'numpy.nanmean', 'np.nanmean', (['cv_results[metric_name]'], {}), '(cv_results[metric_name])\n', (8761, 8786), True, 'import numpy as np\n'), ((8848, 8882), 'numpy.nanstd', 'np.nanstd', (['cv_results[metric_name]'], {}), '(cv_results[metric_name])\n', (8857, 8882), True, 'import numpy as np\n'), ((9066, 9097), 'pandas.read_csv', 'pd.read_csv', (['C.ALL_RES_CSV_PATH'], {}), '(C.ALL_RES_CSV_PATH)\n', (9077, 9097), True, 'import pandas as pd\n'), ((9570, 9622), 'pandas.read_csv', 'pd.read_csv', (['C.EXP_ID_LOG'], {'dtype': '{C.EXP_ID_COL: int}'}), '(C.EXP_ID_LOG, dtype={C.EXP_ID_COL: int})\n', (9581, 9622), True, 'import pandas as pd\n'), ((9661, 9718), 'pandas.read_csv', 'pd.read_csv', (['C.TEMPLATE_ID_LOG'], {'dtype': '{C.EXP_ID_COL: int}'}), '(C.TEMPLATE_ID_LOG, dtype={C.EXP_ID_COL: int})\n', (9672, 9718), True, 'import pandas as pd\n'), ((12075, 12106), 'os.path.exists', 'os.path.exists', (['C.RAW_DATA_PATH'], {}), '(C.RAW_DATA_PATH)\n', (12089, 12106), False, 'import os\n'), ((1047, 1059), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1057, 1059), False, 'from datetime import date\n'), ((9147, 9178), 'pandas.read_csv', 'pd.read_csv', (['C.TEMPLATE_ALL_RES'], {}), '(C.TEMPLATE_ALL_RES)\n', (9158, 9178), True, 'import pandas as pd\n'), ((18878, 18889), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (18886, 18889), True, 'import numpy as np\n'), ((18891, 18902), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (18899, 18902), True, 'import numpy as np\n'), ((18956, 18984), 'numpy.hstack', 'np.hstack', (['destab_chunks_eco'], {}), '(destab_chunks_eco)\n', (18965, 18984), True, 'import numpy as np\n'), ((18986, 19016), 'numpy.hstack', 'np.hstack', (['joystick_chunks_eco'], {}), '(joystick_chunks_eco)\n', (18995, 19016), True, 'import numpy as np\n'), ((19401, 19419), 'numpy.sum', 'np.sum', (['destab_ico'], {}), '(destab_ico)\n', (19407, 19419), True, 'import numpy as np\n'), ((19534, 19552), 'numpy.sum', 'np.sum', (['destab_eco'], {}), '(destab_eco)\n', (19540, 19552), True, 'import numpy as np\n'), ((20217, 20235), 'numpy.sum', 'np.sum', (['destab_ico'], {}), '(destab_ico)\n', (20223, 20235), True, 'import numpy as np\n'), ((20242, 20260), 'numpy.sum', 'np.sum', (['destab_eco'], {}), '(destab_eco)\n', (20248, 20260), True, 'import numpy as np\n'), ((5513, 5559), 'numpy.array_equal', 'np.array_equal', (['row.seq_label', 'row[C.PRED_COL]'], {}), '(row.seq_label, row[C.PRED_COL])\n', (5527, 5559), True, 'import numpy as np\n'), ((6155, 6197), 'numpy.array_equal', 'np.array_equal', (['row.label', 'row[C.PRED_COL]'], {}), '(row.label, row[C.PRED_COL])\n', (6169, 6197), True, 'import numpy as np\n'), ((17573, 17592), 'numpy.diff', 'np.diff', (['destab_ico'], {}), '(destab_ico)\n', (17580, 17592), True, 'import numpy as np\n'), ((19377, 19397), 'numpy.abs', 'np.abs', (['joystick_ico'], {}), '(joystick_ico)\n', (19383, 19397), True, 'import numpy as np\n'), ((19510, 19530), 'numpy.abs', 'np.abs', (['joystick_eco'], {}), '(joystick_eco)\n', (19516, 19530), True, 'import numpy as np\n')] |
import cv2
import os
import sys
import numpy as np
import datetime
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox
from PyQt5.uic import loadUi
from PyQt5.QtCore import pyqtSlot, QTimer, QDate, Qt
import csv
import face_recognition
class USER(QDialog): # Dialog box for entering name and key of new dataset.
"""USER Dialog """
def __init__(self):
super(USER, self).__init__()
loadUi("user_info.ui", self)
def get_name_key(self):
name = self.name_label.text()
key = int(self.key_label.text())
return name, key
class AUFR(QMainWindow): # Main application
"""Main Class"""
def __init__(self):
super(AUFR, self).__init__()
loadUi("mainwindow.ui", self)
self.setWindowTitle("Hệ thống điểm danh dựa trên nhận diện khuôn mặt - Nhóm 05")
# Classifiers, frontal face, eyes and smiles.
self.face_classifier = cv2.CascadeClassifier("classifiers/haarcascade_frontalface_default.xml")
self.eye_classifier = cv2.CascadeClassifier("classifiers/haarcascade_eye.xml")
self.smile_classifier = cv2.CascadeClassifier("classifiers/haarcascade_smile.xml")
# date and time
now = QDate.currentDate()
current_date = now.toString('ddd dd MMMM yyyy')
current_time = datetime.datetime.now().strftime("%I:%M %p")
self.date_label.setText(current_date)
self.time_label.setText(current_time)
self.image = None
# Variables
self.camera_id = 0 # can also be a url of Video
self.dataset_per_subject = 50
self.ret = False
self.trained_model = 0
self.image = cv2.imread("icon/app_icon.jpg", 1)
self.modified_image = self.image.copy()
self.display()
# Actions
self.generate_dataset_btn.setCheckable(True)
self.train_model_btn.setCheckable(True)
self.recognize_face_btn.setCheckable(True)
# Menu
self.about_menu = self.menu_bar.addAction("About")
self.help_menu = self.menu_bar.addAction("Help")
self.about_menu.triggered.connect(self.about_info)
self.help_menu.triggered.connect(self.help_info)
# Algorithms
self.algo_radio_group.buttonClicked.connect(self.algorithm_radio_changed)
# Recangle
self.face_rect_radio.setChecked(True)
self.eye_rect_radio.setChecked(False)
self.smile_rect_radio.setChecked(False)
# Events
self.generate_dataset_btn.clicked.connect(self.generate)
self.train_model_btn.clicked.connect(self.train)
self.recognize_face_btn.clicked.connect(self.recognize)
self.checkin_btn.clicked.connect(self.attendance)
self.checkout_btn.clicked.connect(self.attendance)
self.video_recording_btn.clicked.connect(self.save_video)
# Recognizers
self.update_recognizer()
self.assign_algorithms()
def start_timer(self): # start the timeer for execution.
self.capture = cv2.VideoCapture(self.camera_id)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.timer = QtCore.QTimer()
path = 'ImagesAttendance'
if not os.path.exists(path):
os.mkdir(path)
# known face encoding and known face name list
images = []
self.class_names = []
self.encode_list = []
self.TimeList1 = []
self.TimeList2 = []
attendance_list = os.listdir(path)
if self.generate_dataset_btn.isChecked():
self.timer.timeout.connect(self.save_dataset)
elif self.recognize_face_btn.isChecked():
self.timer.timeout.connect(self.update_image)
self.timer.start(5)
def stop_timer(self): # stop timer or come out of the loop.
self.timer.stop()
self.ret = False
self.capture.release()
def update_image(self): # update canvas every time according to time set in the timer.
if self.recognize_face_btn.isChecked():
self.ret, self.image = self.capture.read()
self.image = cv2.flip(self.image, 1)
faces = self.get_faces()
self.draw_rectangle(faces)
if self.video_recording_btn.isChecked():
self.recording()
self.display()
def attendance(self, roi_gray):
faces = self.get_faces()
for (x, y, w, h) in faces:
roi_gray_original = self.get_gray_image()[y:y + h, x:x + w]
roi_gray = self.resize_image(roi_gray_original, 92, 112)
roi_color = self.image[y:y + h, x:x + w]
predicted, confidence = self.face_recognizer.predict(roi_gray)
name = self.get_all_key_name_pairs().get(str(predicted)) # Save image captured using the save button.
if self.checkin_btn.isChecked():
self.checkin_btn.setEnabled(False)
with open('Attendance.csv', 'a') as f:
if (name != 'Unknown'):
buttonReply = QMessageBox.question(self, 'Welcome', 'Are you Check In?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if buttonReply == QMessageBox.Yes:
date_time_string = datetime.datetime.now().strftime("%y/%m/%d %H:%M:%S")
f.writelines(f'\n {name},{date_time_string},Checked In')
self.checkin_btn.setChecked(False)
self.name_label.setText(str(name))
self.id_label.setText('Check In')
self.Time1 = datetime.datetime.now()
# print(self.Time1)
self.checkin_btn.setEnabled(True)
else:
print('Not clicked.')
self.checkin_btn.setEnabled(True)
elif self.checkout_btn.isChecked():
self.checkout_btn.setEnabled(False)
with open('Attendance.csv', 'a') as f:
if (name != 'Unknown'):
buttonReply = QMessageBox.question(self, 'Good Bye ', 'Are you Clocking Out?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if buttonReply == QMessageBox.Yes:
date_time_string = datetime.datetime.now().strftime("%y/%m/%d %H:%M:%S")
f.writelines(f'\n{name},{date_time_string},Clock Out')
self.checkout_btn.setChecked(False)
self.name_label.setText(str(name))
self.id_label.setText('Checkin Out')
self.Time2 = datetime.datetime.now()
# print(self.Time2)
self.ElapseList(name)
self.TimeList2.append(datetime.datetime.now())
CheckInTime = self.Time1
CheckOutTime = self.Time2
self.ElapseHours = (CheckOutTime - CheckInTime)
self.min_label.setText(
"{:.0f}".format(abs(self.ElapseHours.total_seconds() / 60) % 60) + 'm')
self.hour_label.setText(
"{:.0f}".format(abs(self.ElapseHours.total_seconds() / 60 ** 2)) + 'h')
self.checkout_btn.setEnabled(True)
else:
print('Not clicked.')
self.checkout_btn.setEnabled(True)
def ElapseList(self, name):
with open('Attendance.csv', "r") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 2
Time1 = datetime.datetime.now()
Time2 = datetime.datetime.now()
for row in csv_reader:
for field in row:
if field in row:
if field == 'Clock In':
if row[0] == name:
# print(f'\t ROW 0 {row[0]} ROW 1 {row[1]} ROW2 {row[2]}.')
Time1 = (datetime.datetime.strptime(row[1], '%y/%m/%d %H:%M:%S'))
self.TimeList1.append(Time1)
if field == 'Clock Out':
if row[0] == name:
# print(f'\t ROW 0 {row[0]} ROW 1 {row[1]} ROW2 {row[2]}.')
Time2 = (datetime.datetime.strptime(row[1], '%y/%m/%d %H:%M:%S'))
self.TimeList2.append(Time2)
# print(Time2)
def save_dataset(self): # Save images of new dataset generated using generate dataset button.
location = os.path.join(self.current_path, str(self.dataset_per_subject) + ".jpg")
if self.dataset_per_subject < 1:
QMessageBox().about(self, "Dataset Generated",
"Your response is recorded now you can train the Model \n or Generate New Dataset.")
self.generate_dataset_btn.setText("Generate Dataset")
self.generate_dataset_btn.setChecked(False)
self.stop_timer()
self.dataset_per_subject = 50 # again setting max datasets
if self.generate_dataset_btn.isChecked():
self.ret, self.image = self.capture.read()
self.image = cv2.flip(self.image, 1)
faces = self.get_faces()
self.draw_rectangle(faces)
if len(faces) is not 1:
self.draw_text("Only One Person at a time")
else:
for (x, y, w, h) in faces:
cv2.imwrite(location, self.resize_image(self.get_gray_image()[y:y + h, x:x + w], 92, 112))
self.draw_text("/".join(location.split("/")[-3:]), 20, 20 + self.dataset_per_subject)
self.dataset_per_subject -= 1
self.progress_bar_generate.setValue(100 - self.dataset_per_subject * 2 % 100)
if self.video_recording_btn.isChecked():
self.recording()
self.display()
def display(self): # Display in the canvas, video feed.
pixImage = self.pix_image(self.image)
self.video_feed.setPixmap(QtGui.QPixmap.fromImage(pixImage))
self.video_feed.setScaledContents(True)
def pix_image(self, image): # Converting image from OpenCv to PyQT compatible image.
qformat = QtGui.QImage.Format_RGB888 # only RGB Image
if len(image.shape) >= 3:
r, c, ch = image.shape
else:
r, c = image.shape
qformat = QtGui.QImage.Format_Indexed8
pixImage = QtGui.QImage(image, c, r, image.strides[0], qformat)
return pixImage.rgbSwapped()
def generate(self): # Envoke user dialog and enter name and key.
if self.generate_dataset_btn.isChecked():
try:
user = USER()
user.exec_()
name, key = user.get_name_key()
self.current_path = os.path.join(os.getcwd(), "datasets", str(key) + "-" + name)
os.makedirs(self.current_path, exist_ok=True)
self.start_timer()
self.generate_dataset_btn.setText("Generating")
except:
msg = QMessageBox()
msg.about(self, "User Information", '''Provide Information Please! \n name[string]\n key[integer]''')
self.generate_dataset_btn.setChecked(False)
def algorithm_radio_changed(
self): # When radio button change, either model is training or recognizing in respective algorithm.
self.assign_algorithms() # 1. update current radio button
self.update_recognizer() # 2. update face Recognizer
self.read_model() # 3. read trained data of recognizer set in step 2
if self.train_model_btn.isChecked():
self.train()
def update_recognizer(self): # whenever algoritm radio buttons changes this function need to be invoked.
if self.eigen_algo_radio.isChecked():
self.face_recognizer = cv2.face.EigenFaceRecognizer_create()
elif self.fisher_algo_radio.isChecked():
self.face_recognizer = cv2.face.FisherFaceRecognizer_create()
else:
self.face_recognizer = cv2.face.LBPHFaceRecognizer_create()
def assign_algorithms(self): # Assigning anyone of algorithm to current woring algorithm.
if self.eigen_algo_radio.isChecked():
self.algorithm = "EIGEN"
elif self.fisher_algo_radio.isChecked():
self.algorithm = "FISHER"
else:
self.algorithm = "LBPH"
def read_model(self): # Reading trained model.
if self.recognize_face_btn.isChecked():
try: # Need to to invoked when algoritm radio button change
self.face_recognizer.read("training/" + self.algorithm.lower() + "_trained_model.yml")
except Exception as e:
self.print_custom_error("Unable to read Trained Model due to")
print(e)
def save_model(self): # Save anyone model.
try:
self.face_recognizer.save("training/" + self.algorithm.lower() + "_trained_model.yml")
msg = self.algorithm + " model trained, stop training or train another model"
self.trained_model += 1
self.progress_bar_train.setValue(self.trained_model)
QMessageBox().about(self, "Training Completed", msg)
except Exception as e:
self.print_custom_error("Unable to save Trained Model due to")
print(e)
def train(self): # When train button is clicked.
if self.train_model_btn.isChecked():
button = self.algo_radio_group.checkedButton()
button.setEnabled(False)
self.train_model_btn.setText("Stop Training")
os.makedirs("training", exist_ok=True)
labels, faces = self.get_labels_and_faces()
try:
msg = self.algorithm + " model training started"
QMessageBox().about(self, "Training Started", msg)
self.face_recognizer.train(faces, np.array(labels))
self.save_model()
except Exception as e:
self.print_custom_error("Unable To Train the Model Due to: ")
print(e)
else:
self.eigen_algo_radio.setEnabled(True)
self.fisher_algo_radio.setEnabled(True)
self.lbph_algo_radio.setEnabled(True)
self.train_model_btn.setChecked(False)
self.train_model_btn.setText("Train Model")
def recognize(self): # When recognized button is called.
if self.recognize_face_btn.isChecked():
self.start_timer()
self.recognize_face_btn.setText("Stop Recognition")
self.read_model()
else:
self.recognize_face_btn.setText("Recognize Face")
self.stop_timer()
def get_all_key_name_pairs(self): # Get all (key, name) pair of datasets present in datasets.
return dict(
[subfolder.split('-') for _, folders, _ in os.walk(os.path.join(os.getcwd(), "datasets")) for subfolder in
folders], )
def absolute_path_generator(self): # Generate all path in dataset folder.
separator = "-"
for folder, folders, _ in os.walk(os.path.join(os.getcwd(), "datasets")):
for subfolder in folders:
subject_path = os.path.join(folder, subfolder)
key, _ = subfolder.split(separator)
for image in os.listdir(subject_path):
absolute_path = os.path.join(subject_path, image)
yield absolute_path, key
def get_labels_and_faces(self): # Get label and faces.
labels, faces = [], []
for path, key in self.absolute_path_generator():
faces.append(cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY))
labels.append(int(key))
return labels, faces
def get_gray_image(self): # Convert BGR image to GRAY image.
return cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
def get_faces(self): # Get all faces in a image.
# variables
scale_factor = 1.1
min_neighbors = 8
min_size = (100, 100)
faces = self.face_classifier.detectMultiScale(
self.get_gray_image(),
scaleFactor=scale_factor,
minNeighbors=min_neighbors,
minSize=min_size)
return faces
def get_smiles(self, roi_gray): # Get all smiles in a image.
scale_factor = 1.7
min_neighbors = 22
min_size = (25, 25)
# window_size = (200, 200)
smiles = self.smile_classifier.detectMultiScale(
roi_gray,
scaleFactor=scale_factor,
minNeighbors=min_neighbors,
minSize=min_size
)
return smiles
def get_eyes(self, roi_gray): # Get all eyes in a image.
scale_factor = 1.1
min_neighbors = 6
min_size = (30, 30)
eyes = self.eye_classifier.detectMultiScale(
roi_gray,
scaleFactor=scale_factor,
minNeighbors=min_neighbors,
# minSize = min_size
)
return eyes
def draw_rectangle(self, faces): # Draw rectangle either in face, eyes or smile.
for (x, y, w, h) in faces:
roi_gray_original = self.get_gray_image()[y:y + h, x:x + w]
roi_gray = self.resize_image(roi_gray_original, 92, 112)
roi_color = self.image[y:y + h, x:x + w]
if self.recognize_face_btn.isChecked():
try:
predicted, confidence = self.face_recognizer.predict(roi_gray)
name = self.get_all_key_name_pairs().get(str(predicted))
self.draw_text("Recognizing using: " + self.algorithm, 70, 50)
if self.lbph_algo_radio.isChecked():
if confidence > 105:
msg = "Unknown"
else:
confidence = "{:.2f}".format(100 - confidence)
msg = name
self.progress_bar_recognize.setValue(float(confidence))
else:
msg = name
self.progress_bar_recognize.setValue(int(confidence % 100))
confidence = "{:.2f}".format(confidence)
self.draw_text(msg, x - 5, y - 5)
except Exception as e:
self.print_custom_error("Unable to Pridict due to")
print(e)
if self.eye_rect_radio.isChecked(): # If eye radio button is checked.
eyes = self.get_eyes(roi_gray_original)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
elif self.smile_rect_radio.isChecked(): # If smile radio button is checked.
smiles = self.get_smiles(roi_gray_original)
for (sx, sy, sw, sh) in smiles:
cv2.rectangle(roi_color, (sx, sy), (sx + sw, sy + sh), (0, 255, 0), 2)
else: # If face radio button is checked.
cv2.rectangle(self.image, (x, y), (x + w, y + h), (0, 255, 0), 2)
def time(self): # Get current time.
return datetime.now().strftime("%d-%b-%Y:%I-%M-%S")
def draw_text(self, text, x=20, y=20, font_size=2,
color=(0, 255, 0)): # Draw text in current image in particular color.
cv2.putText(self.image, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.6, color, font_size)
def resize_image(self, image, width=280, height=280): # Resize image before storing.
return cv2.resize(image, (width, height), interpolation=cv2.INTER_CUBIC)
def print_custom_error(self, msg): # Print custom error message/
print("=" * 100)
print(msg)
print("=" * 100)
def recording(self): # Record Video when either recognizing or generating.
if self.ret:
self.video_output.write(self.image)
def save_video(self): # Saving video.
if self.video_recording_btn.isChecked() and self.ret:
self.video_recording_btn.setText("Stop")
try:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_file_name = self.time() + '.avi'
path = os.path.join(os.getcwd(), "recordings")
os.makedirs(path, exist_ok=True)
self.video_output = cv2.VideoWriter(os.path.join(path, output_file_name), fourcc, 20.0, (640, 480))
except Exception as e:
self.print_custom_error("Unable to Record Video Due to")
print(e)
else:
self.video_recording_btn.setText("Record")
self.video_recording_btn.setChecked(False)
if self.ret:
QMessageBox().about(self, "Recording Complete",
"Video clip successfully recorded into current recording folder")
else:
QMessageBox().about(self, "Information", '''Start either datasets generation or recognition First! ''')
# Main Menu
def about_info(self): # Menu Information of info button of application.
msg_box = QMessageBox()
msg_box.setText('''
Face Recognition Attendance System
Cảm ơn sự giúp đỡ của thầy đã giúp chúng em hoàn thành bài tập này
''')
msg_box.setInformativeText('''
Học Viện Kỹ Thuật Mật Mã
Thị giác máy tính trên nền nhúng - C2N02
Giáo viên hướng dẫn: Lê Đức Thuận
Nhóm 05:
Hoàng Trung Kiên
Nguyễn Vân Khanh
Mạc Văn Nam
''')
msg_box.setWindowTitle("About")
msg_box.exec_()
def help_info(self): # Menu Information of help button of application.
msg_box = QMessageBox()
msg_box.setText('''
Phần mềm này có khả năng tạo tập dữ liệu, tạo mô hình, nhận diện các khuôn mặt.
Phần mềm cũng bao gồm các chức năng nhận diện khuôn mặt, đôi mắt, khóe miệng cười.
''')
msg_box.setInformativeText('''
Làm theo các bước sau để sử dụng phần mềm
1. Tạo ít nhất hai bộ dữ liệu.
2. Huấn luyện tất cả các mô hình xử lý ảnh bằng cách sử dụng 3 nút radio đã cho.
3. Nhận diện khuôn mặt.
''')
msg_box.setWindowTitle("Help")
msg_box.exec_()
if __name__ == "__main__":
app = QApplication(sys.argv)
ui = AUFR() # Running application loop.
ui.show()
sys.exit(app.exec_()) # Exit application. | [
"cv2.rectangle",
"PyQt5.QtWidgets.QMessageBox",
"PyQt5.uic.loadUi",
"PyQt5.QtGui.QPixmap.fromImage",
"cv2.face.LBPHFaceRecognizer_create",
"PyQt5.QtGui.QImage",
"cv2.face.EigenFaceRecognizer_create",
"numpy.array",
"PyQt5.QtWidgets.QMessageBox.question",
"PyQt5.QtWidgets.QApplication",
"cv2.Casc... | [((23586, 23608), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (23598, 23608), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((482, 510), 'PyQt5.uic.loadUi', 'loadUi', (['"""user_info.ui"""', 'self'], {}), "('user_info.ui', self)\n", (488, 510), False, 'from PyQt5.uic import loadUi\n'), ((795, 824), 'PyQt5.uic.loadUi', 'loadUi', (['"""mainwindow.ui"""', 'self'], {}), "('mainwindow.ui', self)\n", (801, 824), False, 'from PyQt5.uic import loadUi\n'), ((1002, 1074), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""classifiers/haarcascade_frontalface_default.xml"""'], {}), "('classifiers/haarcascade_frontalface_default.xml')\n", (1023, 1074), False, 'import cv2\n'), ((1106, 1162), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""classifiers/haarcascade_eye.xml"""'], {}), "('classifiers/haarcascade_eye.xml')\n", (1127, 1162), False, 'import cv2\n'), ((1196, 1254), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""classifiers/haarcascade_smile.xml"""'], {}), "('classifiers/haarcascade_smile.xml')\n", (1217, 1254), False, 'import cv2\n'), ((1297, 1316), 'PyQt5.QtCore.QDate.currentDate', 'QDate.currentDate', ([], {}), '()\n', (1314, 1316), False, 'from PyQt5.QtCore import pyqtSlot, QTimer, QDate, Qt\n'), ((1768, 1802), 'cv2.imread', 'cv2.imread', (['"""icon/app_icon.jpg"""', '(1)'], {}), "('icon/app_icon.jpg', 1)\n", (1778, 1802), False, 'import cv2\n'), ((3145, 3177), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.camera_id'], {}), '(self.camera_id)\n', (3161, 3177), False, 'import cv2\n'), ((3315, 3330), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (3328, 3330), False, 'from PyQt5 import QtGui, QtCore\n'), ((3656, 3672), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3666, 3672), False, 'import os\n'), ((11225, 11277), 'PyQt5.QtGui.QImage', 'QtGui.QImage', (['image', 'c', 'r', 'image.strides[0]', 'qformat'], {}), '(image, c, r, image.strides[0], qformat)\n', (11237, 11277), False, 'from PyQt5 import QtGui, QtCore\n'), ((16821, 16865), 'cv2.cvtColor', 'cv2.cvtColor', (['self.image', 'cv2.COLOR_BGR2GRAY'], {}), '(self.image, cv2.COLOR_BGR2GRAY)\n', (16833, 16865), False, 'import cv2\n'), ((20462, 20550), 'cv2.putText', 'cv2.putText', (['self.image', 'text', '(x, y)', 'cv2.FONT_HERSHEY_PLAIN', '(1.6)', 'color', 'font_size'], {}), '(self.image, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.6, color,\n font_size)\n', (20473, 20550), False, 'import cv2\n'), ((20656, 20721), 'cv2.resize', 'cv2.resize', (['image', '(width, height)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image, (width, height), interpolation=cv2.INTER_CUBIC)\n', (20666, 20721), False, 'import cv2\n'), ((22264, 22277), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (22275, 22277), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((22942, 22955), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (22953, 22955), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((3382, 3402), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3396, 3402), False, 'import os\n'), ((3417, 3431), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (3425, 3431), False, 'import os\n'), ((4301, 4324), 'cv2.flip', 'cv2.flip', (['self.image', '(1)'], {}), '(self.image, 1)\n', (4309, 4324), False, 'import cv2\n'), ((8113, 8148), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (8123, 8148), False, 'import csv\n'), ((8200, 8223), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8221, 8223), False, 'import datetime\n'), ((8245, 8268), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8266, 8268), False, 'import datetime\n'), ((9910, 9933), 'cv2.flip', 'cv2.flip', (['self.image', '(1)'], {}), '(self.image, 1)\n', (9918, 9933), False, 'import cv2\n'), ((10794, 10827), 'PyQt5.QtGui.QPixmap.fromImage', 'QtGui.QPixmap.fromImage', (['pixImage'], {}), '(pixImage)\n', (10817, 10827), False, 'from PyQt5 import QtGui, QtCore\n'), ((12696, 12733), 'cv2.face.EigenFaceRecognizer_create', 'cv2.face.EigenFaceRecognizer_create', ([], {}), '()\n', (12731, 12733), False, 'import cv2\n'), ((14523, 14561), 'os.makedirs', 'os.makedirs', (['"""training"""'], {'exist_ok': '(True)'}), "('training', exist_ok=True)\n", (14534, 14561), False, 'import os\n'), ((1398, 1421), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1419, 1421), False, 'import datetime\n'), ((11683, 11728), 'os.makedirs', 'os.makedirs', (['self.current_path'], {'exist_ok': '(True)'}), '(self.current_path, exist_ok=True)\n', (11694, 11728), False, 'import os\n'), ((12820, 12858), 'cv2.face.FisherFaceRecognizer_create', 'cv2.face.FisherFaceRecognizer_create', ([], {}), '()\n', (12856, 12858), False, 'import cv2\n'), ((12910, 12946), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (12944, 12946), False, 'import cv2\n'), ((16082, 16093), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16091, 16093), False, 'import os\n'), ((16180, 16211), 'os.path.join', 'os.path.join', (['folder', 'subfolder'], {}), '(folder, subfolder)\n', (16192, 16211), False, 'import os\n'), ((16295, 16319), 'os.listdir', 'os.listdir', (['subject_path'], {}), '(subject_path)\n', (16305, 16319), False, 'import os\n'), ((20260, 20274), 'datetime.now', 'datetime.now', ([], {}), '()\n', (20272, 20274), False, 'import datetime\n'), ((21228, 21259), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (21250, 21259), False, 'import cv2\n'), ((21398, 21430), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (21409, 21430), False, 'import os\n'), ((9382, 9395), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (9393, 9395), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((11618, 11629), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11627, 11629), False, 'import os\n'), ((11874, 11887), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (11885, 11887), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((14067, 14080), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (14078, 14080), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((14824, 14840), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (14832, 14840), True, 'import numpy as np\n'), ((16358, 16391), 'os.path.join', 'os.path.join', (['subject_path', 'image'], {}), '(subject_path, image)\n', (16370, 16391), False, 'import os\n'), ((16630, 16646), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (16640, 16646), False, 'import cv2\n'), ((19699, 19769), 'cv2.rectangle', 'cv2.rectangle', (['roi_color', '(ex, ey)', '(ex + ew, ey + eh)', '(0, 255, 0)', '(2)'], {}), '(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)\n', (19712, 19769), False, 'import cv2\n'), ((20134, 20199), 'cv2.rectangle', 'cv2.rectangle', (['self.image', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(self.image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (20147, 20199), False, 'import cv2\n'), ((21354, 21365), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (21363, 21365), False, 'import os\n'), ((21484, 21520), 'os.path.join', 'os.path.join', (['path', 'output_file_name'], {}), '(path, output_file_name)\n', (21496, 21520), False, 'import os\n'), ((5245, 5357), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""Welcome"""', '"""Are you Check In?"""', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.No'], {}), "(self, 'Welcome', 'Are you Check In?', QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n", (5265, 5357), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((14720, 14733), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (14731, 14733), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((19991, 20061), 'cv2.rectangle', 'cv2.rectangle', (['roi_color', '(sx, sy)', '(sx + sw, sy + sh)', '(0, 255, 0)', '(2)'], {}), '(roi_color, (sx, sy), (sx + sw, sy + sh), (0, 255, 0), 2)\n', (20004, 20061), False, 'import cv2\n'), ((21854, 21867), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (21865, 21867), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((22041, 22054), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (22052, 22054), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((5897, 5920), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5918, 5920), False, 'import datetime\n'), ((6422, 6541), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""Good Bye """', '"""Are you Clocking Out?"""', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.No'], {}), "(self, 'Good Bye ', 'Are you Clocking Out?', \n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n", (6442, 6541), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox\n'), ((15850, 15861), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15859, 15861), False, 'import os\n'), ((7082, 7105), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7103, 7105), False, 'import datetime\n'), ((8611, 8666), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['row[1]', '"""%y/%m/%d %H:%M:%S"""'], {}), "(row[1], '%y/%m/%d %H:%M:%S')\n", (8637, 8666), False, 'import datetime\n'), ((8964, 9019), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['row[1]', '"""%y/%m/%d %H:%M:%S"""'], {}), "(row[1], '%y/%m/%d %H:%M:%S')\n", (8990, 9019), False, 'import datetime\n'), ((5522, 5545), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5543, 5545), False, 'import datetime\n'), ((7259, 7282), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7280, 7282), False, 'import datetime\n'), ((6705, 6728), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6726, 6728), False, 'import datetime\n')] |
import numpy as np
import pytest
from src.gen_preference_matrix import PreferenceMatrix
class TestPreferenceMatrix:
@classmethod
def setup_class(self):
self.pm = PreferenceMatrix(3)
def test_reset_state(self):
self.pm.reset_state()
assert not np.any(self.pm.data)
assert not np.any(self.pm.num_observations)
assert self.pm.curr_condorcet_winner is None
def test_set_matrix_wrong_size(self):
wrong_matrix = np.zeros((4, 5))
with pytest.raises(Exception):
self.pm.set_matrix_explicit(wrong_matrix)
def test_set_matrix_wrong_type(self):
wrong_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
with pytest.raises(Exception):
self.pm.set_matrix_explicit(wrong_matrix)
def test_record_win(self):
winner = 0
loser = 1
self.pm.record_win(winner, loser)
assert self.pm.data[0, 1] == 1
assert self.pm.num_observations[0] == 1
assert self.pm.num_observations[1] == 1
def test_get_condorcet_winner_empty(self):
self.pm.reset_state()
assert self.pm.condorcet_winner() == -1
def test_get_condorcet_winner_not_exists(self):
self.pm.reset_state()
no_winner = np.array([[0, 2, 1],
[1, 0, 2],
[2, 1, 0]])
self.pm.set_matrix_explicit(no_winner)
assert self.pm.condorcet_winner() == -1
def test_get_condorcet_winner_exists(self):
self.pm.reset_state()
winner = np.array([[0, 186, 405],
[305, 0, 272],
[78, 105, 0]]) #example is from wikipedia (Condorcet criterion)
self.pm.set_matrix_explicit(winner)
print(self.pm.num_observations)
assert self.pm.condorcet_winner() == 1
| [
"src.gen_preference_matrix.PreferenceMatrix",
"numpy.any",
"numpy.array",
"numpy.zeros",
"pytest.raises"
] | [((188, 207), 'src.gen_preference_matrix.PreferenceMatrix', 'PreferenceMatrix', (['(3)'], {}), '(3)\n', (204, 207), False, 'from src.gen_preference_matrix import PreferenceMatrix\n'), ((495, 511), 'numpy.zeros', 'np.zeros', (['(4, 5)'], {}), '((4, 5))\n', (503, 511), True, 'import numpy as np\n'), ((1308, 1351), 'numpy.array', 'np.array', (['[[0, 2, 1], [1, 0, 2], [2, 1, 0]]'], {}), '([[0, 2, 1], [1, 0, 2], [2, 1, 0]])\n', (1316, 1351), True, 'import numpy as np\n'), ((1575, 1629), 'numpy.array', 'np.array', (['[[0, 186, 405], [305, 0, 272], [78, 105, 0]]'], {}), '([[0, 186, 405], [305, 0, 272], [78, 105, 0]])\n', (1583, 1629), True, 'import numpy as np\n'), ((298, 318), 'numpy.any', 'np.any', (['self.pm.data'], {}), '(self.pm.data)\n', (304, 318), True, 'import numpy as np\n'), ((339, 371), 'numpy.any', 'np.any', (['self.pm.num_observations'], {}), '(self.pm.num_observations)\n', (345, 371), True, 'import numpy as np\n'), ((526, 550), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (539, 550), False, 'import pytest\n'), ((728, 752), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (741, 752), False, 'import pytest\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import igraph as ig
class BasicNode: # should be interfaced to from a graph object
def __init__(self, content=None, labels=None):
self.content = content
self.incident_edges = set([])
self.incident_outward_edges = set([])
self.incident_inward_edges = set([])
self.label = None
def add_edge(self, Edge):
self.incident_edges.add(Edge)
if Edge.source() == self:
self.incident_outward_edges.add(Edge)
else:
self.incident_inward_edges.add(Edge)
def remove_edge(self, Edge):
self.incident_edges.discard(Edge)
def get_neighbors(self):
neighbors = [Edge.ends for Edge in self.incident_edges]
unique_neighbors = list(set(reduce(lambda x,y: x+y, neighbors)))
if [self, self] not in neighbors: #checks for a loop
unique_neighbors.remove(self)
return set(unique_neighbors)
def get_targets(self):
# targets = set([Edge.target() for Edge in self.incident_outward_edges])
targets = set(map(lambda x: x.target(), self.incident_outward_edges))
return targets
def get_sources(self):
# sources = set([Edge.source() for Edge in self.incident_inward_edges])
sources = set(map(lambda x: x.source(), self.incident_inward_edges))
return sources
def add_label(self, label):
self.label = label
def remove_label(self, label):
self.label = None
class BasicEdge: # should be interfaced to from a graph object
def __init__(self, content=None, ends=[], labels=None):
self.content = content
self.ends = ends
if labels is None:
self.labels = set([])
else:
self.labels = labels
def source(self):
return self.ends[0]
def target(self):
return self.ends[1]
def add_label(self, label):
self.labels.add(label)
def remove_label(self, label):
self.labels.discard(label)
def update_up(class_method):
def inner(self, *args, **kwargs):
method_name = class_method.func_name
class_method(self, *args, **kwargs)
for Supergraph in self.supergraphs:
getattr(Supergraph, method_name)(*args, **kwargs)
return inner
def update_up_down(class_method):
def inner(self, *args, **kwargs):
method_name = class_method.func_name
if class_method(self, *args, **kwargs):
for Supergraph in self.supergraphs:
getattr(Supergraph, method_name)(*args, **kwargs)
for Subgraph in self.subgraphs:
getattr(Subgraph, method_name)(*args, **kwargs)
return inner
class Graph(object):
def __init__(self, vertices=None, edges=None, Vertex=BasicNode, Edge=BasicEdge):
if edges == None:
edges = []
self.edges = set(edges)
if vertices == None:
vertices = []
self.vertices = vertices
self.vertex_dict = {}
self.edges_dict = {}
self.Vertex = Vertex
self.Edge = Edge
def create_vertex(self, *args, **kwargs):
self.vertices.append(self.Vertex(*args, **kwargs))
def create_vertices(self, no_create):
for i in range(no_create):
self.create_vertex()
def add_vertex(self, Vertex):
self.vertices.append(Vertex)
def create_edge(self, ends):
NewEdge = self.Edge(ends=ends)
self.edges.add(NewEdge)
for Vertex in ends:
Vertex.add_edge(NewEdge)
def remove_edge(self, Edge):
self.edges.discard(Edge)
def get_incident_edges(self, Vertex):
incident_edges = Vertex.incident_edges
return incident_edges
def remove_vertex(self, Vertex):
edges_to_remove = self.get_incident_edges(Vertex)
for Edge in edges_to_remove:
self.remove_edge(Edge)
self.vertices.remove(Vertex)
def get_vertex_neighbors(self, Vertex):
neighbors = Vertex.get_neighbors()
return neighbors
def get_degree(self, Vertex):
return len(self.get_incident_edges(Vertex))
def get_number_vertices(self):
return len(self.vertices)
def get_number_edges(self):
return len(self.edges)
def get_adjacency_matrix(self):
adj_list = list(map(
lambda x: self.get_adjacency_list_of_vertex(x), self.vertices))
adj_mat = np.array(adj_list)
return adj_mat
def get_adjacency_matrix_as_list(self):
return self.get_adjacency_matrix().tolist()
def set_adjacency_list(self, adj_list):
self.vertices = []
self.edges = []
def is_in(self,vertex_or_edge):
if (vertex_or_edge in self.edges) or (vertex_or_edge in self.vertices):
return True
else:
return False
def get_incident_outward_edges(self,Vertex):
return Vertex.incident_outward_edges
def get_incident_inward_edges(self,Vertex):
return Vertex.incident_inward_edges
def get_vertex_targets(self, Vertex):
targets = Vertex.get_targets()
return targets
def get_vertex_sources(self, Vertex):
sources = Vertex.get_sources()
return sources
def add_vertex_label(self, vertex, label):
self.vertex_dict[label] = vertex
vertex.add_label(label)
def get_vertex(self,label):
if label in self.vertex_dict.keys():
return self.vertex_dict[label]
def get_vertex_label(self, vertex):
labels = vertex.get_labels()
labels = labels & self.vertex_dict.keys()
labels = filter(lambda x: self.get_vertex[x] == vertex, labels)
def remove_vertex_label(self, label):
vertex = self.vertex_dict.pop(label, 'Not Found')
if vertex == 'Not Found':
return
else:
vertex.remove_label(label)
def add_edge_label(self, edge, label):
self.edge_dict[label] = edge
edge.add_label(label)
def get_edge(self,label):
if label in self.edge_dict.keys():
return self.edge_dict[label]
else:
return None
def get_edge_label(self, edge):
labels = edge.get_labels()
labels = filter(lambda x: self.get_edge[x] == edge, labels)
def remove_edge_label(self, label):
edge = self.edge_dict.pop(label, 'Not Found')
if edge == 'Not Found':
return
else:
edge.remove_label(label)
class UnDirGraph(Graph, object):
def get_adjacency_list_of_vertex(self, Vertex):
N = self.get_number_vertices()
adj_list = [0 for x in range(N)]
incident_edges = self.get_incident_edges(Vertex)
for Edge in incident_edges:
ends = Edge.ends
if ends[0] != Vertex:
index = self.vertices.index(ends[0])
else:
index = self.vertices.index(ends[1])
adj_list[index] += 1
return adj_list
def set_adjacency_matrix(self, adj_mat):
shape = np.shape(adj_mat)
if shape[0] != shape[1]:
print('Wrong shape, expecting square matrix.')
return
n = shape[0]
self.vertices = []
self.edges = set([])
self.create_vertices(n)
for row in range(n):
Source = self.vertices[row]
for col in range(row + 1):
no_edges = adj_mat[row, col]
Target = self.vertices[col]
for Edge in range(no_edges):
self.create_edge(ends=[Source, Target])
def plot(self):
A = self.get_adjacency_matrix_as_list()
g = ig.Graph.Adjacency(A, mode='undirected')
for vertex in self.vertices:
if vertex.label !=None:
# print vertex.label
index = self.vertices.index(vertex)
g.vs[index]['label'] = vertex.label
# layout = g.layout("rt", root=0)
visual_style = {}
visual_style["vertex_size"] = 20
visual_style["vertex_label_angle"] = 0
visual_style["vertex_label_dist"] = 1
# layout.rotate(180)
ig.plot(g, margin=60, **visual_style)
class DirGraph(Graph):
def get_adjacency_list_of_vertex(self, Vertex):
N = self.get_number_vertices()
adj_list = [0 for x in range(N)]
incident_edges = self.get_incident_outward_edges(Vertex)
for Edge in incident_edges:
target = Edge.target()
index = self.vertices.index(target)
adj_list[index] += 1
return adj_list
def set_adjacency_matrix(self, adj_mat):
shape = np.shape(adj_mat)
if shape[0] != shape[1]:
print('Wrong shape, expecting square matrix.')
return
n = shape[0]
self.vertices = []
self.edges = set([])
self.create_vertices(n)
for row in range(n):
for col in range(n):
no_edges = adj_mat[row, col]
Source = self.vertices[row]
Target = self.vertices[col]
for Edge in range(no_edges):
self.create_edge(ends=[Source, Target])
def get_vertex_targets(self, Vertex):
targets = Vertex.get_targets()
return targets
def get_vertex_sources(self, Vertex):
sources = Vertex.get_sources()
return sources
def plot(self):
A = self.get_adjacency_matrix_as_list()
g = ig.Graph.Adjacency(A)
for vertex in self.vertices:
if vertex.label != None:
index = self.vertices.index(vertex)
g.vs[index]['label'] = vertex.label
g.vs[index]['label_dist'] = 2
layout = g.layout("circle")
ig.plot(g)
#This is a wrapper to a class definition, deciding whether to inherit
#from DirGraph or UnDirGraph at runtime. It can be initialised by
#number of vertices or the number of edges.
def return_linear_class(directed=False):
if directed:
base = DirGraph
else:
base = UnDirGraph
class Linear(base, object):
def __init__(self, number_vertices=0, number_edges=0, **kwargs):
super(Linear, self).__init__(**kwargs)
self.linear_generate(number_vertices, number_edges)
def linear_generate(self, number_vertices, number_edges):
if (not number_edges == 0) and (not number_vertices == 0):
if not number_vertices == number_edges + 1:
print('Number of edges and vertices incompatible!')
return
else:
self.number_vertices = number_vertices
elif not number_edges == 0:
self.number_vertices = number_edges + 1
else:
self.number_vertices = number_vertices
self.create_vertices(self.number_vertices)
for index in range(self.number_vertices - 1):
Source = self.vertices[index]
Target = self.vertices[index+1]
self.create_edge([Source, Target])
return Linear
#instantiates the Linear class
def create_linear(directed=False, number_vertices=0, number_edges=0, **kwargs):
linear = return_linear_class(directed)(number_vertices, number_edges, **kwargs)
return linear
#Class definition wrapper to dynamically inherti from DirGraph or UnDirGraph.
#Also has a composition from Linear, to create a cycle it joins the ends
#of a linear graph.
def return_cycle_class(directed=False):
if directed:
base = DirGraph
else:
base = UnDirGraph
class Cycle(base, object):
def __init__(self, number_vertices=0, number_edges=0, **kwargs):
super(Cycle, self).__init__(**kwargs)
if (not number_edges == 0) and (not number_vertices == 0):
if not number_edges == number_vertices:
print('Numbers of edges and vertices incompatible!')
return
elif not number_edges == 0:
number_vertices = number_edges
Linear_part = create_linear()
Linear_part.linear_generate(number_vertices, number_edges-1)
self.vertices = Linear_part.vertices
self.edges = Linear_part.edges
self.cycle_generate(number_vertices)
def cycle_generate(self, number_vertices):
Target = self.vertices[0]
Source = self.vertices[number_vertices-1]
self.create_edge(ends=[Source, Target])
return Cycle
def create_cycle(directed=False, number_vertices=0, number_edges=0, **kwargs):
cycle = return_cycle_class(directed)(number_vertices, number_edges, **kwargs)
return cycle
class Complete(UnDirGraph, object):
def __init__(self, number_vertices=0, **kwargs):
super(Complete, self).__init__(**kwargs)
self.create_vertices(no_create=number_vertices)
ends = []
for Source in self.vertices:
for Target in self.vertices:
if [Source,Target] not in ends:
if not Source == Target:
self.create_edge(ends=[Source,Target])
ends.append([Source,Target])
ends.append([Target, Source])
def return_tree_class(directed=False):
if directed:
base = DirGraph
else:
base = UnDirGraph
class Tree(base, object):
def __init__(self, **kwargs):
super(Tree, self).__init__(**kwargs)
self.leaves = set([])
self.find_leaves()
def is_leaf(self, vertex):
if self.get_degree(vertex) == 1:
return True
elif self.get_number_vertices() == 1:
return True
else:
return False
def set_root(self, vertex):
if vertex in self.vertices:
self.remove_vertex_label('Root')
self.add_vertex_label(vertex, label='Root')
def get_root(self):
return self.get_vertex('Root')
def find_leaves(self):
self.leaves = set(filter(self.is_leaf, self.vertices))
return [leaf for leaf in self.leaves]
return Tree
def create_tree(directed=False, **kwargs):
tree = return_tree_class(directed)(**kwargs)
return tree
def return_nary_tree_class(directed=False):
base = return_tree_class(directed)
class NaryRootedTree(base, object):
def __init__(self, N=0, **kwargs):
super(NaryRootedTree, self).__init__(**kwargs)
self.N = N
def split_vertex(self, vertex):
if vertex in self.leaves:
children = [self.Vertex() for i in range(self.N)]
vertex.children = children
self.leaves.discard(vertex)
for Child in children:
self.add_vertex(Child)
self.leaves.add(Child)
Child.parent = vertex
self.create_edge(ends=[vertex, Child])
def fuse_vertex(self, vertex):
self.leaves.add(vertex)
try:
children = vertex.children
except AttributeError:
return
if children == None:
return
for child in children:
self.fuse_vertex(child)
self.leaves.discard(child)
self.remove_vertex(child)
child.parent = None
vertex.children = None
def create_full_n_level(self, n):
self.vertices = []
self.edges = set([])
self.create_vertex()
self.set_root(self.vertices[0])
for level in range(n):
leaves = self.find_leaves()
for Leaf in leaves:
self.split_vertex(Leaf)
def get_descendants(self, node, desc=set({})):
try:
children = node.children
except AttributeError:
node.children = None
if children != None:
for child in children:
desc = desc.union(set({child}))
desc = desc.union(self.get_descendants(child))
return desc
else:
return desc
return NaryRootedTree
def create_nary_tree(directed=False, N=0, **kwargs):
nary_tree = return_nary_tree_class(directed)(N, **kwargs)
return nary_tree
| [
"numpy.array",
"numpy.shape",
"igraph.Graph.Adjacency",
"igraph.plot"
] | [((4606, 4624), 'numpy.array', 'np.array', (['adj_list'], {}), '(adj_list)\n', (4614, 4624), True, 'import numpy as np\n'), ((7433, 7450), 'numpy.shape', 'np.shape', (['adj_mat'], {}), '(adj_mat)\n', (7441, 7450), True, 'import numpy as np\n'), ((8074, 8114), 'igraph.Graph.Adjacency', 'ig.Graph.Adjacency', (['A'], {'mode': '"""undirected"""'}), "(A, mode='undirected')\n", (8092, 8114), True, 'import igraph as ig\n'), ((8568, 8605), 'igraph.plot', 'ig.plot', (['g'], {'margin': '(60)'}), '(g, margin=60, **visual_style)\n', (8575, 8605), True, 'import igraph as ig\n'), ((9077, 9094), 'numpy.shape', 'np.shape', (['adj_mat'], {}), '(adj_mat)\n', (9085, 9094), True, 'import numpy as np\n'), ((9923, 9944), 'igraph.Graph.Adjacency', 'ig.Graph.Adjacency', (['A'], {}), '(A)\n', (9941, 9944), True, 'import igraph as ig\n'), ((10222, 10232), 'igraph.plot', 'ig.plot', (['g'], {}), '(g)\n', (10229, 10232), True, 'import igraph as ig\n')] |
# -*- coding: utf-8 -*-
import sys
import os
from os.path import join, dirname, abspath, exists
sys.path.insert(0, dirname(dirname(abspath(__file__))))
sys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))
import utils.config_loader as config
from utils.config_loader import logger, path_parser
import summ.rank_sent as rank_sent
import utils.tools as tools
import tools.tfidf_tools as tfidf_tools
import tools.general_tools as general_tools
import frame.ir.ir_tools as ir_tools
from tqdm import tqdm
import shutil
import frame.ir.ir_config as ir_config
import summ.compute_rouge as rouge
from frame.ir.ir_tools import load_retrieved_passages
import numpy as np
if config.grain != 'passage':
raise ValueError('Invalid grain: {}'.format(config.grain))
def _rank(cid, query):
pid2score = tfidf_tools.build_rel_scores_tf_passage(cid, query)
# rank scores
sid_score_list = rank_sent.sort_sid2score(pid2score)
# include sentences in records
rank_records = rank_sent.get_rank_records(sid_score_list, sents=None)
# rank_records = rank_sent.get_rank_records(sid_score_list)
return rank_records
def rank_e2e():
"""
:param pool_func: avg, max, or None (for integrated query).
:return:
"""
rank_dp = join(path_parser.summary_rank, ir_config.IR_MODEL_NAME_TF)
test_cid_query_dicts = general_tools.build_test_cid_query_dicts(tokenize_narr=False,
concat_title_narr=ir_config.CONCAT_TITLE_NARR,
query_type=ir_config.QUERY_TYPE)
if exists(rank_dp):
raise ValueError('rank_dp exists: {}'.format(rank_dp))
os.mkdir(rank_dp)
for cid_query_dict in tqdm(test_cid_query_dicts):
params = {
**cid_query_dict,
}
rank_records = _rank(**params)
rank_sent.dump_rank_records(rank_records, out_fp=join(rank_dp, params['cid']), with_rank_idx=False)
logger.info('Successfully dumped rankings to: {}'.format(rank_dp))
def ir_rank2records():
ir_rec_dp = join(path_parser.summary_rank, ir_config.IR_RECORDS_DIR_NAME_TF)
if exists(ir_rec_dp):
raise ValueError('qa_rec_dp exists: {}'.format(ir_rec_dp))
os.mkdir(ir_rec_dp)
cids = tools.get_test_cc_ids()
for cid in tqdm(cids):
retrieval_params = {
'model_name': ir_config.IR_MODEL_NAME_TF,
'cid': cid,
'filter_var': ir_config.FILTER_VAR,
'filter': ir_config.FILTER,
'deduplicate': ir_config.DEDUPLICATE,
'prune': True,
}
retrieved_items = ir_tools.retrieve(**retrieval_params)
ir_tools.dump_retrieval(fp=join(ir_rec_dp, cid), retrieved_items=retrieved_items)
def tune():
"""
Tune IR confidence / compression rate based on Recall Rouge 2.
:return:
"""
if ir_config.FILTER == 'conf':
tune_range = np.arange(0.05, 1.05, 0.05)
else:
interval = 10
tune_range = range(interval, 500 + interval, interval)
ir_tune_dp = join(path_parser.summary_rank, ir_config.IR_TUNE_DIR_NAME_TF)
ir_tune_result_fp = join(path_parser.tune, ir_config.IR_TUNE_DIR_NAME_TF)
with open(ir_tune_result_fp, mode='a', encoding='utf-8') as out_f:
headline = 'Filter\tRecall\tF1\n'
out_f.write(headline)
cids = tools.get_test_cc_ids()
for filter_var in tune_range:
if exists(ir_tune_dp): # remove previous output
shutil.rmtree(ir_tune_dp)
os.mkdir(ir_tune_dp)
for cid in tqdm(cids):
retrieval_params = {
'model_name': ir_config.IR_MODEL_NAME_TF,
'cid': cid,
'filter_var': filter_var,
'filter': ir_config.FILTER,
'deduplicate': ir_config.DEDUPLICATE,
'prune': True,
}
retrieved_items = ir_tools.retrieve(**retrieval_params) # pid, score
passage_ids = [item[0] for item in retrieved_items]
original_passages, _, _ = load_retrieved_passages(cid=cid,
get_sents=True,
passage_ids=passage_ids)
passages = ['\n'.join(sents) for sents in original_passages]
summary = '\n'.join(passages)
print(summary)
# print(summary)
with open(join(ir_tune_dp, cid), mode='a', encoding='utf-8') as out_f:
out_f.write(summary)
performance = rouge.compute_rouge_for_dev(ir_tune_dp, tune_centrality=False)
with open(ir_tune_result_fp, mode='a', encoding='utf-8') as out_f:
if ir_config.FILTER == 'conf':
rec = '{0:.2f}\t{1}\n'.format(filter_var, performance)
else:
rec = '{0}\t{1}\n'.format(filter_var, performance)
out_f.write(rec)
if __name__ == '__main__':
rank_e2e()
# ir_rank2records()
tune()
| [
"os.path.exists",
"summ.compute_rouge.compute_rouge_for_dev",
"summ.rank_sent.get_rank_records",
"tools.general_tools.build_test_cid_query_dicts",
"os.path.join",
"tqdm.tqdm",
"frame.ir.ir_tools.retrieve",
"frame.ir.ir_tools.load_retrieved_passages",
"os.mkdir",
"shutil.rmtree",
"os.path.abspath... | [((812, 863), 'tools.tfidf_tools.build_rel_scores_tf_passage', 'tfidf_tools.build_rel_scores_tf_passage', (['cid', 'query'], {}), '(cid, query)\n', (851, 863), True, 'import tools.tfidf_tools as tfidf_tools\n'), ((903, 938), 'summ.rank_sent.sort_sid2score', 'rank_sent.sort_sid2score', (['pid2score'], {}), '(pid2score)\n', (927, 938), True, 'import summ.rank_sent as rank_sent\n'), ((993, 1047), 'summ.rank_sent.get_rank_records', 'rank_sent.get_rank_records', (['sid_score_list'], {'sents': 'None'}), '(sid_score_list, sents=None)\n', (1019, 1047), True, 'import summ.rank_sent as rank_sent\n'), ((1263, 1321), 'os.path.join', 'join', (['path_parser.summary_rank', 'ir_config.IR_MODEL_NAME_TF'], {}), '(path_parser.summary_rank, ir_config.IR_MODEL_NAME_TF)\n', (1267, 1321), False, 'from os.path import join, dirname, abspath, exists\n'), ((1349, 1499), 'tools.general_tools.build_test_cid_query_dicts', 'general_tools.build_test_cid_query_dicts', ([], {'tokenize_narr': '(False)', 'concat_title_narr': 'ir_config.CONCAT_TITLE_NARR', 'query_type': 'ir_config.QUERY_TYPE'}), '(tokenize_narr=False,\n concat_title_narr=ir_config.CONCAT_TITLE_NARR, query_type=ir_config.\n QUERY_TYPE)\n', (1389, 1499), True, 'import tools.general_tools as general_tools\n'), ((1635, 1650), 'os.path.exists', 'exists', (['rank_dp'], {}), '(rank_dp)\n', (1641, 1650), False, 'from os.path import join, dirname, abspath, exists\n'), ((1719, 1736), 'os.mkdir', 'os.mkdir', (['rank_dp'], {}), '(rank_dp)\n', (1727, 1736), False, 'import os\n'), ((1764, 1790), 'tqdm.tqdm', 'tqdm', (['test_cid_query_dicts'], {}), '(test_cid_query_dicts)\n', (1768, 1790), False, 'from tqdm import tqdm\n'), ((2111, 2175), 'os.path.join', 'join', (['path_parser.summary_rank', 'ir_config.IR_RECORDS_DIR_NAME_TF'], {}), '(path_parser.summary_rank, ir_config.IR_RECORDS_DIR_NAME_TF)\n', (2115, 2175), False, 'from os.path import join, dirname, abspath, exists\n'), ((2184, 2201), 'os.path.exists', 'exists', (['ir_rec_dp'], {}), '(ir_rec_dp)\n', (2190, 2201), False, 'from os.path import join, dirname, abspath, exists\n'), ((2274, 2293), 'os.mkdir', 'os.mkdir', (['ir_rec_dp'], {}), '(ir_rec_dp)\n', (2282, 2293), False, 'import os\n'), ((2306, 2329), 'utils.tools.get_test_cc_ids', 'tools.get_test_cc_ids', ([], {}), '()\n', (2327, 2329), True, 'import utils.tools as tools\n'), ((2345, 2355), 'tqdm.tqdm', 'tqdm', (['cids'], {}), '(cids)\n', (2349, 2355), False, 'from tqdm import tqdm\n'), ((3105, 3166), 'os.path.join', 'join', (['path_parser.summary_rank', 'ir_config.IR_TUNE_DIR_NAME_TF'], {}), '(path_parser.summary_rank, ir_config.IR_TUNE_DIR_NAME_TF)\n', (3109, 3166), False, 'from os.path import join, dirname, abspath, exists\n'), ((3191, 3244), 'os.path.join', 'join', (['path_parser.tune', 'ir_config.IR_TUNE_DIR_NAME_TF'], {}), '(path_parser.tune, ir_config.IR_TUNE_DIR_NAME_TF)\n', (3195, 3244), False, 'from os.path import join, dirname, abspath, exists\n'), ((3400, 3423), 'utils.tools.get_test_cc_ids', 'tools.get_test_cc_ids', ([], {}), '()\n', (3421, 3423), True, 'import utils.tools as tools\n'), ((2666, 2703), 'frame.ir.ir_tools.retrieve', 'ir_tools.retrieve', ([], {}), '(**retrieval_params)\n', (2683, 2703), True, 'import frame.ir.ir_tools as ir_tools\n'), ((2964, 2991), 'numpy.arange', 'np.arange', (['(0.05)', '(1.05)', '(0.05)'], {}), '(0.05, 1.05, 0.05)\n', (2973, 2991), True, 'import numpy as np\n'), ((3469, 3487), 'os.path.exists', 'exists', (['ir_tune_dp'], {}), '(ir_tune_dp)\n', (3475, 3487), False, 'from os.path import join, dirname, abspath, exists\n'), ((3561, 3581), 'os.mkdir', 'os.mkdir', (['ir_tune_dp'], {}), '(ir_tune_dp)\n', (3569, 3581), False, 'import os\n'), ((3602, 3612), 'tqdm.tqdm', 'tqdm', (['cids'], {}), '(cids)\n', (3606, 3612), False, 'from tqdm import tqdm\n'), ((4616, 4678), 'summ.compute_rouge.compute_rouge_for_dev', 'rouge.compute_rouge_for_dev', (['ir_tune_dp'], {'tune_centrality': '(False)'}), '(ir_tune_dp, tune_centrality=False)\n', (4643, 4678), True, 'import summ.compute_rouge as rouge\n'), ((133, 150), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (140, 150), False, 'from os.path import join, dirname, abspath, exists\n'), ((3527, 3552), 'shutil.rmtree', 'shutil.rmtree', (['ir_tune_dp'], {}), '(ir_tune_dp)\n', (3540, 3552), False, 'import shutil\n'), ((3949, 3986), 'frame.ir.ir_tools.retrieve', 'ir_tools.retrieve', ([], {}), '(**retrieval_params)\n', (3966, 3986), True, 'import frame.ir.ir_tools as ir_tools\n'), ((4104, 4177), 'frame.ir.ir_tools.load_retrieved_passages', 'load_retrieved_passages', ([], {'cid': 'cid', 'get_sents': '(True)', 'passage_ids': 'passage_ids'}), '(cid=cid, get_sents=True, passage_ids=passage_ids)\n', (4127, 4177), False, 'from frame.ir.ir_tools import load_retrieved_passages\n'), ((197, 214), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (204, 214), False, 'from os.path import join, dirname, abspath, exists\n'), ((1947, 1975), 'os.path.join', 'join', (['rank_dp', "params['cid']"], {}), "(rank_dp, params['cid'])\n", (1951, 1975), False, 'from os.path import join, dirname, abspath, exists\n'), ((2739, 2759), 'os.path.join', 'join', (['ir_rec_dp', 'cid'], {}), '(ir_rec_dp, cid)\n', (2743, 2759), False, 'from os.path import join, dirname, abspath, exists\n'), ((4495, 4516), 'os.path.join', 'join', (['ir_tune_dp', 'cid'], {}), '(ir_tune_dp, cid)\n', (4499, 4516), False, 'from os.path import join, dirname, abspath, exists\n')] |
"""Node that calls the MaskRCNN module for object detection."""
# Copyright (c) 2022, ABB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with
# or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the
# above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the
# distribution.
# * Neither the name of ABB nor the names of its
# contributors may be used to endorse or promote
# products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from copy import copy
import os
from typing import Dict
import cv2
import cv_bridge
from execution_interfaces.msg import TaskStatus
from geometry_msgs.msg import TransformStamped
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import object_detection.maskRCNN as maskRCNN
import object_detection.mask_centroid_heuristic as heuristic
from object_recognition_interfaces.msg import BoolList, BoolMask, DetectedObject
from object_recognition_interfaces.srv import GetObjects, UpdateObject
from perception_utils.homogeneous_matrix import homogeneous_matrix
import perception_utils.pinhole_camera as camera_utils
import perception_utils.process_detection as detect_utils
from perception_utils.transformations import translation_matrix
from rcl_interfaces.msg import ParameterDescriptor
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import CameraInfo, Image
from tf2_ros import TransformBroadcaster
from tf2_ros.buffer import Buffer
from tf2_ros.transform_listener import TransformListener
_cv_bridge = cv_bridge.CvBridge()
class MaskRCNNDetection(Node):
"""
This node performs object detection and publishes object frames using heuristics.
Moreover, it exposes two services to get the detected objects and update their informations.
An example can be to use it together with the disambiguation framework to change the <unique>
value of an item. Frame names will be updated consequently.
"""
def __init__(self):
super().__init__('object_publisher')
# Parameter description
objects_ = ParameterDescriptor(
description='List of objects to detect.')
freq_ = ParameterDescriptor(
description='Timer frequence for the object detection.')
detect_ = ParameterDescriptor(
description='Perform detection only.',
additional_constraints='If false, a frame is attached to every object.'
)
rotation_ = ParameterDescriptor(
description='Use the same rotation as the base frame.',
additional_constraints='If false, rotation is given from the heuristic.'
)
# Declare and read parameters
self.declare_parameter('base_frame', 'base_link')
self.declare_parameter('camera_frame', 'camera_link')
self.declare_parameter('objects', ['banana'], objects_)
self.declare_parameter('detection_freq', 1.0, freq_)
self.declare_parameter('only_detect', True, detect_)
self.declare_parameter('simple_rotation', True, rotation_)
self.base_frame = self.get_parameter('base_frame').get_parameter_value().string_value
self.camera_frame = self.get_parameter('camera_frame').get_parameter_value().string_value
self.objects = self.get_parameter('objects').get_parameter_value().string_array_value
self.frequence = self.get_parameter('detection_freq').get_parameter_value().double_value
self.only_detect = self.get_parameter('only_detect').get_parameter_value().bool_value
self.simple_rot = self.get_parameter('simple_rotation').get_parameter_value().bool_value
self.camera_proj_matrix = None
self.camera_model = None
self.latest_color_ros_image = None
self.latest_depth_ros_image = None
self.depth_image_avg = None
self.image_saved = False
self.task = None
self.status = None
self.trigger_place = False
self.standby_detection_counter = 0
self.first_detection = True
self.detected_objects = {}
self.objects_list = []
# Initialize MaskRCNN
self.get_logger().warn('Initializing detection.')
self.detectron = maskRCNN.MaskRCNN_detectron()
# ---------------- SUBSCRIBERS
self._camera_info_subscriber = self.create_subscription(
CameraInfo,
'~/camera_info',
self.camera_info_callback,
qos_profile=rclpy.qos.qos_profile_sensor_data
)
self._color_subscriber = self.create_subscription(
Image,
'~/rgb_to_depth/image_rect',
self.color_callback,
qos_profile=rclpy.qos.qos_profile_sensor_data,
)
self._depth_subscriber = self.create_subscription(
Image,
'~/depth/image_rect',
self.depth_callback,
qos_profile=rclpy.qos.qos_profile_sensor_data,
)
self._task_subscriber = self.create_subscription(
TaskStatus,
'~/robot/task_status',
self.task_callback,
qos_profile=rclpy.qos.qos_profile_sensor_data,
)
# ---------------- PUBLISHER
self._masked_image_publisher = self.create_publisher(
Image,
'~/rgb_to_depth/image_masked',
10
)
# ---------------- TF
# Initialize the transform broadcaster
self.br = TransformBroadcaster(self)
# Initialize the transform listener
# Make the buffer retain past transforms for 5 seconds
self.tf_buffer = Buffer(rclpy.duration.Duration(seconds=5))
self.tf_listener = TransformListener(self.tf_buffer, self)
timer_period = 1.0/self.frequence # seconds
self.timer = self.create_timer(timer_period, self.maskRCNN_detection)
# ---------------- SERVICES
self.get_obj_srv = self.create_service(
GetObjects, '~/detection_srv', self.get_objects_callback)
self.update_obj_srv = self.create_service(
UpdateObject, '~/update_objects_srv', self.update_object_callback)
# --------------------------------------------------------------------------------------------
# ---------------------------------------- SUBSCRIBERS ---------------------------------------
# --------------------------------------------------------------------------------------------
def camera_info_callback(self, camera_info: CameraInfo) -> None:
"""
Store the projection matrix for the camera.
Args
----
camera_info:
the camera info for the color image and depth image;
depth and color are assumed to have the same camera info.
"""
self.get_logger().debug('Received camera info.')
self.camera_proj_matrix = np.array(camera_info.p).reshape((3, 4))
self.camera_model = camera_utils.Camera(
width=camera_info.width,
height=camera_info.height,
fx=self.camera_proj_matrix[0, 0],
fy=self.camera_proj_matrix[1, 1],
cx=self.camera_proj_matrix[0, 2],
cy=self.camera_proj_matrix[1, 2],
pixel_center=0.0, # not sure what this is in ROS
)
def color_callback(self, color_ros_image: Image):
"""
Store the most recent color image.
Args:
----
color_ros_image: the color image to store.
"""
self.get_logger().debug('Received color image.')
self.latest_color_ros_image = color_ros_image
# This is used to save the depth image for later testing.
if not self.image_saved:
root_path = '/home/wasp/abb/ros2/core_ws/src'
repo_path = 'behavior-tree-learning/hri/disambiguate/disambiguate/data'
save_path = os.path.join(root_path, repo_path)
cv_image = _cv_bridge.imgmsg_to_cv2(color_ros_image, 'bgr8')
cv2.imwrite(os.path.join(save_path, 'color_img.jpg'), cv_image)
self.image_saved = True
def depth_callback(self, depth_ros_image: Image):
"""
Store the most recent aligned depth image.
Args:
----
depth_ros_image: the aligned depth image to store.
"""
self.get_logger().debug('Received depth image.')
self.latest_depth_ros_image = depth_ros_image
def task_callback(self, task_status: TaskStatus):
"""Retrieve the task status and save it locally."""
if task_status.current_task != 'IDLE':
self.task = task_status.current_task
if task_status.execution_status != 'INVALID':
self.status = task_status.execution_status
# --------------------------------------------------------------------------------------------
# ---------------------------------------- PUBLISHERS ----------------------------------------
# --------------------------------------------------------------------------------------------
def maskRCNN_detection(self):
"""Detect and process image with MaskRCNN detectron."""
# convert ROS image to numpy array
color_img_np = camera_utils.ros_img_to_np_img(self.latest_color_ros_image, 'rgb8')
# predict instances
detection_result = self.detectron.detect(color_img_np)
# Detect frisbee as bowl
if 'frisbee' in detection_result['names']:
idx = detection_result['names'].index('frisbee')
detection_result['names'][idx] = 'bowl'
detection_result['class_ids'][idx] = 46
results = copy(detection_result)
np_image = copy(color_img_np)
self.__publish_masks(results, np_image[:, :, ::-1])
if not self.only_detect:
self.get_logger().warn(f'Executing {self.task}, with status {self.status}.')
if (self.task == 'Place' and self.status == 'RUNNING') or\
((self.task == 'Pick' and self.status != 'FAILURE') and
self.standby_detection_counter < 5):
self.get_logger().warn('Pausing object detection while Manipulating.')
self.trigger_place = True
self.standby_detection_counter += 1
else:
self.detected_objects, self.objects_list, self.first_detection =\
detect_utils.process_maskRCNN_results(
detection_result,
self.detected_objects,
self.objects_list,
self.first_detection,
only_update=self.trigger_place
)
self.get_logger().warn(f'Detected: {self.objects_list}.')
self.get_logger().warn(f'Keys: {self.detected_objects.keys()}.')
self.get_logger().debug('Publishing transforms.')
self.__publish_transformations(self.detected_objects)
self.trigger_place = False
self.standby_detection_counter = 0
# --------------------------------------------------------------------------------------------
# ----------------------------------------- SERVICES -----------------------------------------
# --------------------------------------------------------------------------------------------
def get_objects_callback(self, request, response):
"""Get the current detected objects."""
if not self.detected_objects:
# The dictionary is empty!
response.success = False
response.message = 'No object detected!'
response.objects = []
else:
response.success = True
response.message = ''
for i, key in enumerate(self.detected_objects.keys()):
object_item = DetectedObject()
object_item.header.stamp = self.get_clock().now().to_msg()
object_item.header.frame_id = key
object_item.category_str = self.detected_objects[key]['category']
object_item.object_id = self.detected_objects[key]['id']
object_item.bounding_box = self.detected_objects[key]['bounding_box'].tolist()
mask_list = self.detected_objects[key]['mask'].tolist()
mask_msg = BoolMask()
row_msg = BoolList()
for i, row in enumerate(mask_list):
row_msg.list = row
mask_msg.bool_list.append(row_msg)
object_item.mask = mask_msg
object_item.disambiguated = self.detected_objects[key]['unique']
response.objects.append(object_item)
self.get_logger().warn('Finished creating response message.')
return response
def update_object_callback(self, request, response):
"""Update the dictionary of detected objects with the incoming data."""
object_name = request.object.header.frame_id
try:
object_data = self.detected_objects[object_name]
except KeyError:
response.message = 'Something is wrong, trying to update the wrong object!'
response.success = False
return response
# Now we can create a new entry on the dictionary of detected objects.
area, _ = detect_utils.boundingbox_intersection(
request.object.bounding_box, object_data.__getitem__('bounding_box'))
if not area > 0:
response.message = 'Something is wrong, trying to update the wrong object!'
response.success = False
return response
# Sanity check, is the object already disambiguated?
if object_data.__getitem__('unique') is False:
new_object = detect_utils.ObjectData(copy(detect_utils.TEMPLATE_OBJ_DICT))
new_object.__setitem__('category', request.object.category_str)
new_object.__setitem__('bounding_box', copy(object_data.__getitem__('bounding_box')))
new_object.__setitem__('mask', copy(object_data.__getitem__('mask')))
# The object is unique!
new_object.__setitem__('id', 0)
new_object.__setitem__('unique', request.object.disambiguated)
new_object_name = request.object.category_str
self.detected_objects[new_object_name] = new_object
# Since this object is not ambiguous anymore, we can delete from the dictionary
# the element corresponding to 'class_id'.
del self.detected_objects[object_name]
response.success = True
response.message = f'Updated infrormation for object {request.object.category_str}.'
return response
# --------------------------------------------------------------------------------------------
# ----------------------------------------- AUXILIARY ----------------------------------------
# --------------------------------------------------------------------------------------------
def __publish_transformations(self, detected_objects: Dict):
"""Publish the a TF frame for every detected object."""
now = rclpy.time.Time()
heuristic_params = heuristic.Parameters()
heuristic_params.use_max = True
# Echo the TF between camera and base frame
constant_tf = self.tf_buffer.lookup_transform(self.base_frame, self.camera_frame, now)
base_T_camera = homogeneous_matrix(
constant_tf.transform.translation, constant_tf.transform.rotation)
# Iterate the detected objects and compute the transform
for key in detected_objects.keys():
if self.detected_objects[key]['category'] not in self.objects:
self.get_logger().debug(f'Key {key} not relevant, ignoring.')
continue
t = TransformStamped()
t.header.stamp = self.get_clock().now().to_msg()
t.header.frame_id = self.base_frame
t.child_frame_id = str(key)
# Compute centroid of the bounding box
try:
self.get_logger().debug(
f'Mask: {self.detected_objects[key]["mask"].shape}')
self.get_logger().debug(
f'Bounding box: {self.detected_objects[key]["bounding_box"]}')
point, orientation = heuristic.compute_mask_frame(
self.objects,
self.detected_objects[key]['category'],
self.detected_objects[key]['bounding_box'],
self.detected_objects[key]['mask'],
heuristic_params
)
except ValueError:
continue
# Transform the centroid in a 3D point
self.get_logger().debug(f'{key} 2D point: {point}.')
np_depth_image = camera_utils.ros_img_to_np_img(self.latest_depth_ros_image, 'mono16')
if self.get_logger().get_effective_level() == 'DEBUG':
# For debugging:
plt.imshow(np_depth_image)
plt.show()
point_3D = camera_utils.pixel_to_point_transform(
point, np_depth_image, self.camera_model)
self.get_logger().debug(f'{key} 3D point: {point_3D}.')
# Centroid transform matrix
camera_T_obj = translation_matrix(point_3D)
self.get_logger().debug(f'Translation:\n {camera_T_obj}.')
base_T_obj = base_T_camera@camera_T_obj
self.get_logger().debug(f'Transform:\n {base_T_obj}.')
# Build the TF message
t.transform.translation.x = base_T_obj[0, 3]
t.transform.translation.y = base_T_obj[1, 3]
t.transform.translation.z = base_T_obj[2, 3]
# Use same orientation as the base frame
t.transform.rotation.x = 0.0
t.transform.rotation.y = 0.0
t.transform.rotation.z = 0.0
t.transform.rotation.w = 1.0
if not self.simple_rot:
# Get rotation from heuristic
try:
t.transform.rotation.x = orientation[0]
t.transform.rotation.y = orientation[1]
t.transform.rotation.z = orientation[2]
t.transform.rotation.w = orientation[3]
except AssertionError:
self.get_logger().warn(f'Orientation error for item {key}.')
self.get_logger().warn(f'Got value: {orientation}.')
# Send the transformation
self.br.sendTransform(t)
def __publish_masks(self, result: Dict, np_image: np.array):
"""Publish an Image with the detected masks."""
masked_image = self.__visualize(result, np_image)
cv_result = np.zeros(shape=masked_image.shape, dtype=np.uint8)
cv2.convertScaleAbs(masked_image, cv_result)
image_msg = _cv_bridge.cv2_to_imgmsg(cv_result, 'bgr8')
image_msg.header = self.latest_color_ros_image.header
self._masked_image_publisher.publish(image_msg)
def __visualize(self, result: Dict, image: np.ndarray):
fig = Figure()
canvas = FigureCanvasAgg(fig)
axes = fig.gca()
self.detectron.print_results_on_image(image, result, axes=axes)
fig.tight_layout()
canvas.draw()
result = np.fromstring(canvas.tostring_rgb(), dtype='uint8')
_, _, w, h = fig.bbox.bounds
result = result.reshape((int(h), int(w), 3))
return result
def main(args=None):
rclpy.init(args=args)
maskRCNN_detection = MaskRCNNDetection()
rclpy.spin(maskRCNN_detection)
rclpy.shutdown()
if __name__ == '__main__':
main()
| [
"geometry_msgs.msg.TransformStamped",
"cv2.convertScaleAbs",
"perception_utils.transformations.translation_matrix",
"tf2_ros.transform_listener.TransformListener",
"numpy.array",
"rclpy.init",
"copy.copy",
"rclpy.time.Time",
"matplotlib.pyplot.imshow",
"object_recognition_interfaces.msg.BoolMask",... | [((2732, 2752), 'cv_bridge.CvBridge', 'cv_bridge.CvBridge', ([], {}), '()\n', (2750, 2752), False, 'import cv_bridge\n'), ((20753, 20774), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (20763, 20774), False, 'import rclpy\n'), ((20826, 20856), 'rclpy.spin', 'rclpy.spin', (['maskRCNN_detection'], {}), '(maskRCNN_detection)\n', (20836, 20856), False, 'import rclpy\n'), ((20862, 20878), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (20876, 20878), False, 'import rclpy\n'), ((3270, 3331), 'rcl_interfaces.msg.ParameterDescriptor', 'ParameterDescriptor', ([], {'description': '"""List of objects to detect."""'}), "(description='List of objects to detect.')\n", (3289, 3331), False, 'from rcl_interfaces.msg import ParameterDescriptor\n'), ((3361, 3437), 'rcl_interfaces.msg.ParameterDescriptor', 'ParameterDescriptor', ([], {'description': '"""Timer frequence for the object detection."""'}), "(description='Timer frequence for the object detection.')\n", (3380, 3437), False, 'from rcl_interfaces.msg import ParameterDescriptor\n'), ((3469, 3604), 'rcl_interfaces.msg.ParameterDescriptor', 'ParameterDescriptor', ([], {'description': '"""Perform detection only."""', 'additional_constraints': '"""If false, a frame is attached to every object."""'}), "(description='Perform detection only.',\n additional_constraints='If false, a frame is attached to every object.')\n", (3488, 3604), False, 'from rcl_interfaces.msg import ParameterDescriptor\n'), ((3655, 3808), 'rcl_interfaces.msg.ParameterDescriptor', 'ParameterDescriptor', ([], {'description': '"""Use the same rotation as the base frame."""', 'additional_constraints': '"""If false, rotation is given from the heuristic."""'}), "(description='Use the same rotation as the base frame.',\n additional_constraints='If false, rotation is given from the heuristic.')\n", (3674, 3808), False, 'from rcl_interfaces.msg import ParameterDescriptor\n'), ((5401, 5430), 'object_detection.maskRCNN.MaskRCNN_detectron', 'maskRCNN.MaskRCNN_detectron', ([], {}), '()\n', (5428, 5430), True, 'import object_detection.maskRCNN as maskRCNN\n'), ((6632, 6658), 'tf2_ros.TransformBroadcaster', 'TransformBroadcaster', (['self'], {}), '(self)\n', (6652, 6658), False, 'from tf2_ros import TransformBroadcaster\n'), ((6861, 6900), 'tf2_ros.transform_listener.TransformListener', 'TransformListener', (['self.tf_buffer', 'self'], {}), '(self.tf_buffer, self)\n', (6878, 6900), False, 'from tf2_ros.transform_listener import TransformListener\n'), ((8116, 8355), 'perception_utils.pinhole_camera.Camera', 'camera_utils.Camera', ([], {'width': 'camera_info.width', 'height': 'camera_info.height', 'fx': 'self.camera_proj_matrix[0, 0]', 'fy': 'self.camera_proj_matrix[1, 1]', 'cx': 'self.camera_proj_matrix[0, 2]', 'cy': 'self.camera_proj_matrix[1, 2]', 'pixel_center': '(0.0)'}), '(width=camera_info.width, height=camera_info.height, fx=\n self.camera_proj_matrix[0, 0], fy=self.camera_proj_matrix[1, 1], cx=\n self.camera_proj_matrix[0, 2], cy=self.camera_proj_matrix[1, 2],\n pixel_center=0.0)\n', (8135, 8355), True, 'import perception_utils.pinhole_camera as camera_utils\n'), ((10386, 10453), 'perception_utils.pinhole_camera.ros_img_to_np_img', 'camera_utils.ros_img_to_np_img', (['self.latest_color_ros_image', '"""rgb8"""'], {}), "(self.latest_color_ros_image, 'rgb8')\n", (10416, 10453), True, 'import perception_utils.pinhole_camera as camera_utils\n'), ((10814, 10836), 'copy.copy', 'copy', (['detection_result'], {}), '(detection_result)\n', (10818, 10836), False, 'from copy import copy\n'), ((10856, 10874), 'copy.copy', 'copy', (['color_img_np'], {}), '(color_img_np)\n', (10860, 10874), False, 'from copy import copy\n'), ((16345, 16362), 'rclpy.time.Time', 'rclpy.time.Time', ([], {}), '()\n', (16360, 16362), False, 'import rclpy\n'), ((16390, 16412), 'object_detection.mask_centroid_heuristic.Parameters', 'heuristic.Parameters', ([], {}), '()\n', (16410, 16412), True, 'import object_detection.mask_centroid_heuristic as heuristic\n'), ((16625, 16715), 'perception_utils.homogeneous_matrix.homogeneous_matrix', 'homogeneous_matrix', (['constant_tf.transform.translation', 'constant_tf.transform.rotation'], {}), '(constant_tf.transform.translation, constant_tf.transform\n .rotation)\n', (16643, 16715), False, 'from perception_utils.homogeneous_matrix import homogeneous_matrix\n'), ((19991, 20041), 'numpy.zeros', 'np.zeros', ([], {'shape': 'masked_image.shape', 'dtype': 'np.uint8'}), '(shape=masked_image.shape, dtype=np.uint8)\n', (19999, 20041), True, 'import numpy as np\n'), ((20050, 20094), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['masked_image', 'cv_result'], {}), '(masked_image, cv_result)\n', (20069, 20094), False, 'import cv2\n'), ((20352, 20360), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (20358, 20360), False, 'from matplotlib.figure import Figure\n'), ((20378, 20398), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvasAgg', (['fig'], {}), '(fig)\n', (20393, 20398), False, 'from matplotlib.backends.backend_agg import FigureCanvasAgg\n'), ((6798, 6832), 'rclpy.duration.Duration', 'rclpy.duration.Duration', ([], {'seconds': '(5)'}), '(seconds=5)\n', (6821, 6832), False, 'import rclpy\n'), ((9051, 9085), 'os.path.join', 'os.path.join', (['root_path', 'repo_path'], {}), '(root_path, repo_path)\n', (9063, 9085), False, 'import os\n'), ((17028, 17046), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ([], {}), '()\n', (17044, 17046), False, 'from geometry_msgs.msg import TransformStamped\n'), ((18039, 18108), 'perception_utils.pinhole_camera.ros_img_to_np_img', 'camera_utils.ros_img_to_np_img', (['self.latest_depth_ros_image', '"""mono16"""'], {}), "(self.latest_depth_ros_image, 'mono16')\n", (18069, 18108), True, 'import perception_utils.pinhole_camera as camera_utils\n'), ((18303, 18382), 'perception_utils.pinhole_camera.pixel_to_point_transform', 'camera_utils.pixel_to_point_transform', (['point', 'np_depth_image', 'self.camera_model'], {}), '(point, np_depth_image, self.camera_model)\n', (18340, 18382), True, 'import perception_utils.pinhole_camera as camera_utils\n'), ((18536, 18564), 'perception_utils.transformations.translation_matrix', 'translation_matrix', (['point_3D'], {}), '(point_3D)\n', (18554, 18564), False, 'from perception_utils.transformations import translation_matrix\n'), ((8048, 8071), 'numpy.array', 'np.array', (['camera_info.p'], {}), '(camera_info.p)\n', (8056, 8071), True, 'import numpy as np\n'), ((9183, 9223), 'os.path.join', 'os.path.join', (['save_path', '"""color_img.jpg"""'], {}), "(save_path, 'color_img.jpg')\n", (9195, 9223), False, 'import os\n'), ((11553, 11714), 'perception_utils.process_detection.process_maskRCNN_results', 'detect_utils.process_maskRCNN_results', (['detection_result', 'self.detected_objects', 'self.objects_list', 'self.first_detection'], {'only_update': 'self.trigger_place'}), '(detection_result, self.\n detected_objects, self.objects_list, self.first_detection, only_update=\n self.trigger_place)\n', (11590, 11714), True, 'import perception_utils.process_detection as detect_utils\n'), ((13016, 13032), 'object_recognition_interfaces.msg.DetectedObject', 'DetectedObject', ([], {}), '()\n', (13030, 13032), False, 'from object_recognition_interfaces.msg import BoolList, BoolMask, DetectedObject\n'), ((13507, 13517), 'object_recognition_interfaces.msg.BoolMask', 'BoolMask', ([], {}), '()\n', (13515, 13517), False, 'from object_recognition_interfaces.msg import BoolList, BoolMask, DetectedObject\n'), ((13544, 13554), 'object_recognition_interfaces.msg.BoolList', 'BoolList', ([], {}), '()\n', (13552, 13554), False, 'from object_recognition_interfaces.msg import BoolList, BoolMask, DetectedObject\n'), ((14983, 15019), 'copy.copy', 'copy', (['detect_utils.TEMPLATE_OBJ_DICT'], {}), '(detect_utils.TEMPLATE_OBJ_DICT)\n', (14987, 15019), False, 'from copy import copy\n'), ((17539, 17729), 'object_detection.mask_centroid_heuristic.compute_mask_frame', 'heuristic.compute_mask_frame', (['self.objects', "self.detected_objects[key]['category']", "self.detected_objects[key]['bounding_box']", "self.detected_objects[key]['mask']", 'heuristic_params'], {}), "(self.objects, self.detected_objects[key][\n 'category'], self.detected_objects[key]['bounding_box'], self.\n detected_objects[key]['mask'], heuristic_params)\n", (17567, 17729), True, 'import object_detection.mask_centroid_heuristic as heuristic\n'), ((18225, 18251), 'matplotlib.pyplot.imshow', 'plt.imshow', (['np_depth_image'], {}), '(np_depth_image)\n', (18235, 18251), True, 'import matplotlib.pyplot as plt\n'), ((18268, 18278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18276, 18278), True, 'import matplotlib.pyplot as plt\n')] |
import pickle
import os
import numpy as np
import tensorflow as tf
def get_model_predictions(original_image_features_pickle_file):
filenames_and_features = []
with open(original_image_features_pickle_file, "rb") as input_file:
filenames_and_features = pickle.load(input_file)
filenames = filenames_and_features[0]
features = filenames_and_features[1]
max_x = -1
max_y = -1
index_level_x_y = []
for filename in filenames:
tile_name = os.path.split(filename)[-1]
tile_name = tile_name.split(".")[0]
splited_tile_name = tile_name.split("_")
tile_level = int(splited_tile_name[-3])
tile_x = int(splited_tile_name[-2])
tile_y = int(splited_tile_name[-1])
index_level_x_y.append([tile_level,tile_x,tile_y])
if max_x<tile_x:
max_x = tile_x
if max_y<tile_y:
max_y = tile_y
num_feature = np.array(features[0]).shape[-1]
feature_map = {}
# print(index_level_x_y)
for i in range(len(features)):
feature_i = np.mean(np.mean(features[i],axis=0),axis=0)
feature_i = np.reshape(feature_i,(num_feature))
start_channel = num_feature*index_level_x_y[i][0]
end_channel = num_feature*(index_level_x_y[i][0]+1)
x = index_level_x_y[i][1]
y = index_level_x_y[i][2]
if (x,y) in feature_map:
temp_feature = feature_map[(x,y)]
temp_feature[start_channel:end_channel] = feature_i
feature_map[(x,y)] = temp_feature
else:
temp_feature = np.zeros(3*num_feature)
temp_feature[start_channel:end_channel] = feature_i
feature_map[(x,y)] = temp_feature
return list(feature_map.values())
def pca_transform_tensors(pca, num_feature, tensor_width, tensor_height, tensor_3D):
number_of_samples = len(tensor_3D)
resized_tensor = []
for sample in range(number_of_samples):
w = len(tensor_3D[sample])
h = len(tensor_3D[sample][0])
feature_map = np.zeros((w,h,num_feature))
for i in range(w):
for j in range(h):
feature_i_j = tensor_3D[sample][i][j]
transformed_feature_i_j = pca.transform([feature_i_j])[0]
feature_map[i,j,:] = transformed_feature_i_j
resized_tensor.append(tf.image.resize_with_crop_or_pad(feature_map, tensor_width, tensor_height))
resized_tensor = np.asarray(resized_tensor)
return resized_tensor
def create_tensors(original_image_features_pickle_file, pca, pca_num_feature, tensors_size, saving_folder):
filenames_and_features = []
with open(original_image_features_pickle_file, "rb") as input_file:
filenames_and_features = pickle.load(input_file)
filenames = filenames_and_features[0]
features = filenames_and_features[1]
### find the height and width of input tensor using filenames_and_features
### filename pattern tile_path = os.path.join(tile_folder,image_name+"_"+str(i)+"_"+str(x)+"_"+str(y)+".png")
max_x = -1
max_y = -1
index_level_x_y = []
for filename in filenames:
tile_name = os.path.split(filename)[-1]
tile_name = tile_name.split(".")[0]
splited_tile_name = tile_name.split("_")
tile_level = int(splited_tile_name[-3])
tile_x = int(splited_tile_name[-2])
tile_y = int(splited_tile_name[-1])
index_level_x_y.append([tile_level,tile_x,tile_y])
if max_x<tile_x:
max_x = tile_x
if max_y<tile_y:
max_y = tile_y
num_feature = np.array(features[0]).shape[-1]
feature_map = np.zeros(((max_x+1),(max_y+1),num_feature*3))
for i in range(len(features)):
feature_i = np.mean(np.mean(features[i],axis=0),axis=0)
feature_i = np.reshape(feature_i,(num_feature))
start_channel = num_feature*index_level_x_y[i][0]
end_channel = num_feature*(index_level_x_y[i][0]+1)
x = index_level_x_y[i][1]
y = index_level_x_y[i][2]
feature_map[x,y,start_channel:end_channel] = feature_i
w = len(feature_map)
h = len(feature_map[0])
resized_tensor = np.zeros((w,h,pca_num_feature))
for i in range(w):
for j in range(h):
feature_i_j = feature_map[i][j]
transformed_feature_i_j = pca.transform([feature_i_j])[0]
resized_tensor[i,j,:] = transformed_feature_i_j
resized_tensor = tf.image.resize_with_crop_or_pad(resized_tensor, tensors_size, tensors_size)
original_image_features_pickle_file_name = os.path.split(original_image_features_pickle_file)[-1]
# print(original_image_features_pickle_file_name)
with open(os.path.join(saving_folder,original_image_features_pickle_file_name), 'wb') as handle:
pickle.dump(resized_tensor, handle)
# return resized_tensor
# for i in range(len(features)):
# feature_i = np.mean(np.mean(features[i],axis=0),axis=0)
# feature_i = np.reshape(feature_i,(num_feature))
# start_channel = num_feature*index_level_x_y[i][0]
# end_channel = num_feature*(index_level_x_y[i][0]+1)
# x = index_level_x_y[i][1]
# y = index_level_x_y[i][2]
# feature_map[x,y,start_channel:end_channel] = feature_i
# return np.amax(np.amax(feature_map,axis=0),axis=0)
# num_feature = np.array(features[0]).shape[-1]
# feature_map = np.zeros(num_feature)
# for i in range(len(features)):
# feature_i = np.mean(np.mean(features[i],axis=0),axis=0)
# feature_i = np.reshape(feature_i,(num_feature))
# if index_level_x_y[i][0]==1:
# feature_map = feature_map+feature_i
# return feature_map/(len(features)/3.0)
# feature_map = np.zeros((42,42,num_feature))
# for i in range(len(features)):
# if index_level_x_y[i][0]==0:
# feature_i = np.mean(np.mean(features[i],axis=0),axis=0)
# feature_i = np.reshape(feature_i,(num_feature))
# x = index_level_x_y[i][1]
# y = index_level_x_y[i][2]
# feature_map[x,y,:] = feature_i
# return np.amax(np.amax(feature_map,axis=0),axis=0)
# def create_tensors(original_image_features_pickle_file):
# filenames_and_features = []
# with open(original_image_features_pickle_file, "rb") as input_file:
# filenames_and_features = pickle.load(input_file)
# filenames = filenames_and_features[0]
# features = filenames_and_features[1]
# ### find the height and width of input tensor using filenames_and_features
# ### filename pattern tile_path = os.path.join(tile_folder,image_name+"_"+str(i)+"_"+str(x)+"_"+str(y)+".png")
# max_x = -1
# max_y = -1
# index_level_x_y = []
# for filename in filenames:
# tile_name = os.path.split(filename)[-1]
# tile_name = tile_name.split(".")[0]
# splited_tile_name = tile_name.split("_")
# tile_level = int(splited_tile_name[-3])
# tile_x = int(splited_tile_name[-2])
# tile_y = int(splited_tile_name[-1])
# index_level_x_y.append([tile_level,tile_x,tile_y])
# if max_x<tile_x:
# max_x = tile_x
# if max_y<tile_y:
# max_y = tile_y
# # for i in range(len(features)):
# # feature_i = np.mean(np.mean(features[i],axis=0),axis=0)
# # feature_i = np.reshape(feature_i,(num_feature))
# # start_channel = num_feature*index_level_x_y[i][0]
# # end_channel = num_feature*(index_level_x_y[i][0]+1)
# # x = index_level_x_y[i][1]
# # y = index_level_x_y[i][2]
# # feature_map[x,y,start_channel:end_channel] = feature_i
# # return np.amax(np.amax(feature_map,axis=0),axis=0)
#
# num_feature = np.array(features[0]).shape[-1]
# feature_map = np.zeros(num_feature)
# for i in range(len(features)):
# feature_i = np.mean(np.mean(features[i],axis=0),axis=0)
# feature_i = np.reshape(feature_i,(num_feature))
# if index_level_x_y[i][0]==1:
# feature_map = feature_map+feature_i
# return feature_map/(len(features)/3.0)
#
# # feature_map = np.zeros((42,42,num_feature))
# # for i in range(len(features)):
# # if index_level_x_y[i][0]==0:
# # feature_i = np.mean(np.mean(features[i],axis=0),axis=0)
# # feature_i = np.reshape(feature_i,(num_feature))
# # x = index_level_x_y[i][1]
# # y = index_level_x_y[i][2]
# # feature_map[x,y,:] = feature_i
# # return np.amax(np.amax(feature_map,axis=0),axis=0)
| [
"tensorflow.image.resize_with_crop_or_pad",
"numpy.mean",
"numpy.reshape",
"pickle.dump",
"pickle.load",
"numpy.asarray",
"os.path.join",
"os.path.split",
"numpy.array",
"numpy.zeros"
] | [((2496, 2522), 'numpy.asarray', 'np.asarray', (['resized_tensor'], {}), '(resized_tensor)\n', (2506, 2522), True, 'import numpy as np\n'), ((3716, 3765), 'numpy.zeros', 'np.zeros', (['(max_x + 1, max_y + 1, num_feature * 3)'], {}), '((max_x + 1, max_y + 1, num_feature * 3))\n', (3724, 3765), True, 'import numpy as np\n'), ((4253, 4286), 'numpy.zeros', 'np.zeros', (['(w, h, pca_num_feature)'], {}), '((w, h, pca_num_feature))\n', (4261, 4286), True, 'import numpy as np\n'), ((4536, 4612), 'tensorflow.image.resize_with_crop_or_pad', 'tf.image.resize_with_crop_or_pad', (['resized_tensor', 'tensors_size', 'tensors_size'], {}), '(resized_tensor, tensors_size, tensors_size)\n', (4568, 4612), True, 'import tensorflow as tf\n'), ((277, 300), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (288, 300), False, 'import pickle\n'), ((1152, 1186), 'numpy.reshape', 'np.reshape', (['feature_i', 'num_feature'], {}), '(feature_i, num_feature)\n', (1162, 1186), True, 'import numpy as np\n'), ((2087, 2116), 'numpy.zeros', 'np.zeros', (['(w, h, num_feature)'], {}), '((w, h, num_feature))\n', (2095, 2116), True, 'import numpy as np\n'), ((2801, 2824), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (2812, 2824), False, 'import pickle\n'), ((3884, 3918), 'numpy.reshape', 'np.reshape', (['feature_i', 'num_feature'], {}), '(feature_i, num_feature)\n', (3894, 3918), True, 'import numpy as np\n'), ((4661, 4711), 'os.path.split', 'os.path.split', (['original_image_features_pickle_file'], {}), '(original_image_features_pickle_file)\n', (4674, 4711), False, 'import os\n'), ((4882, 4917), 'pickle.dump', 'pickle.dump', (['resized_tensor', 'handle'], {}), '(resized_tensor, handle)\n', (4893, 4917), False, 'import pickle\n'), ((497, 520), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (510, 520), False, 'import os\n'), ((946, 967), 'numpy.array', 'np.array', (['features[0]'], {}), '(features[0])\n', (954, 967), True, 'import numpy as np\n'), ((1095, 1123), 'numpy.mean', 'np.mean', (['features[i]'], {'axis': '(0)'}), '(features[i], axis=0)\n', (1102, 1123), True, 'import numpy as np\n'), ((1614, 1639), 'numpy.zeros', 'np.zeros', (['(3 * num_feature)'], {}), '(3 * num_feature)\n', (1622, 1639), True, 'import numpy as np\n'), ((2398, 2472), 'tensorflow.image.resize_with_crop_or_pad', 'tf.image.resize_with_crop_or_pad', (['feature_map', 'tensor_width', 'tensor_height'], {}), '(feature_map, tensor_width, tensor_height)\n', (2430, 2472), True, 'import tensorflow as tf\n'), ((3216, 3239), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (3229, 3239), False, 'import os\n'), ((3665, 3686), 'numpy.array', 'np.array', (['features[0]'], {}), '(features[0])\n', (3673, 3686), True, 'import numpy as np\n'), ((3827, 3855), 'numpy.mean', 'np.mean', (['features[i]'], {'axis': '(0)'}), '(features[i], axis=0)\n', (3834, 3855), True, 'import numpy as np\n'), ((4786, 4855), 'os.path.join', 'os.path.join', (['saving_folder', 'original_image_features_pickle_file_name'], {}), '(saving_folder, original_image_features_pickle_file_name)\n', (4798, 4855), False, 'import os\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import astropy.units as u
from astropy.utils.misc import isiterable
__all__ = ['calc_total_error']
def calc_total_error(data, bkg_error, effective_gain):
"""
Calculate a total error array, combining a background-only error
array with the Poisson noise of sources.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The data array.
bkg_error : array_like or `~astropy.units.Quantity`
The pixel-wise Gaussian 1-sigma background-only errors of the
input ``data``. ``error`` should include all sources of
"background" error but *exclude* the Poisson error of the
sources. ``error`` must have the same shape as ``data``.
effective_gain : float, array-like, or `~astropy.units.Quantity`
Ratio of counts (e.g., electrons or photons) to the units of
``data`` used to calculate the Poisson error of the sources.
Returns
-------
total_error : `~numpy.ndarray` or `~astropy.units.Quantity`
The total error array. If ``data``, ``bkg_error``, and
``effective_gain`` are all `~astropy.units.Quantity` objects,
then ``total_error`` will also be returned as a
`~astropy.units.Quantity` object. Otherwise, a `~numpy.ndarray`
will be returned.
Notes
-----
To use units, ``data``, ``bkg_error``, and ``effective_gain`` must *all*
be `~astropy.units.Quantity` objects. A `ValueError` will be raised if
only some of the inputs are `~astropy.units.Quantity` objects.
The total error array, :math:`\sigma_{\mathrm{tot}}` is:
.. math:: \\sigma_{\\mathrm{tot}} = \\sqrt{\\sigma_{\\mathrm{b}}^2 +
\\frac{I}{g}}
where :math:`\sigma_b`, :math:`I`, and :math:`g` are the background
``bkg_error`` image, ``data`` image, and ``effective_gain``,
respectively.
Pixels where ``data`` (:math:`I_i)` is negative do not contribute
additional Poisson noise to the total error, i.e.
:math:`\sigma_{\mathrm{tot}, i} = \sigma_{\mathrm{b}, i}`. Note
that this is different from `SExtractor`_, which sums the total
variance in the segment, including pixels where :math:`I_i` is
negative. In such cases, `SExtractor`_ underestimates the total
errors. Also note that ``data`` should be background-subtracted to
match SExtractor's errors.
``effective_gain`` can either be a scalar value or a 2D image with
the same shape as the ``data``. A 2D image is useful with mosaic
images that have variable depths (i.e., exposure times) across the
field. For example, one should use an exposure-time map as the
``effective_gain`` for a variable depth mosaic image in count-rate
units.
If your input ``data`` are in units of ADU, then ``effective_gain``
should represent electrons/ADU. If your input ``data`` are in units
of electrons/s then ``effective_gain`` should be the exposure time
or an exposure time map (e.g., for mosaics with non-uniform exposure
times).
.. _SExtractor: http://www.astromatic.net/software/sextractor
"""
data = np.asanyarray(data)
bkg_error = np.asanyarray(bkg_error)
inputs = [data, bkg_error, effective_gain]
has_unit = [hasattr(x, 'unit') for x in inputs]
use_units = all(has_unit)
if any(has_unit) and not all(has_unit):
raise ValueError('If any of data, bkg_error, or effective_gain has '
'units, then they all must all have units.')
if use_units:
count_units = [u.electron, u.photon]
datagain_unit = (data * effective_gain).unit
if datagain_unit not in count_units:
raise u.UnitsError('(data * effective_gain) has units of "{0}", '
'but it must have count units (u.electron '
'or u.photon).'.format(datagain_unit))
if not isiterable(effective_gain):
effective_gain = np.zeros(data.shape) + effective_gain
else:
effective_gain = np.asanyarray(effective_gain)
if effective_gain.shape != data.shape:
raise ValueError('If input effective_gain is 2D, then it must '
'have the same shape as the input data.')
if np.any(effective_gain <= 0):
raise ValueError('effective_gain must be strictly positive '
'everywhere.')
if use_units:
source_variance = np.maximum((data * data.unit) /
effective_gain.value,
0. * bkg_error.unit**2)
else:
source_variance = np.maximum(data / effective_gain, 0)
return np.sqrt(bkg_error**2 + source_variance)
| [
"numpy.sqrt",
"astropy.utils.misc.isiterable",
"numpy.any",
"numpy.asanyarray",
"numpy.zeros",
"numpy.maximum"
] | [((3306, 3325), 'numpy.asanyarray', 'np.asanyarray', (['data'], {}), '(data)\n', (3319, 3325), True, 'import numpy as np\n'), ((3342, 3366), 'numpy.asanyarray', 'np.asanyarray', (['bkg_error'], {}), '(bkg_error)\n', (3355, 3366), True, 'import numpy as np\n'), ((4442, 4469), 'numpy.any', 'np.any', (['(effective_gain <= 0)'], {}), '(effective_gain <= 0)\n', (4448, 4469), True, 'import numpy as np\n'), ((4862, 4903), 'numpy.sqrt', 'np.sqrt', (['(bkg_error ** 2 + source_variance)'], {}), '(bkg_error ** 2 + source_variance)\n', (4869, 4903), True, 'import numpy as np\n'), ((4085, 4111), 'astropy.utils.misc.isiterable', 'isiterable', (['effective_gain'], {}), '(effective_gain)\n', (4095, 4111), False, 'from astropy.utils.misc import isiterable\n'), ((4211, 4240), 'numpy.asanyarray', 'np.asanyarray', (['effective_gain'], {}), '(effective_gain)\n', (4224, 4240), True, 'import numpy as np\n'), ((4625, 4703), 'numpy.maximum', 'np.maximum', (['(data * data.unit / effective_gain.value)', '(0.0 * bkg_error.unit ** 2)'], {}), '(data * data.unit / effective_gain.value, 0.0 * bkg_error.unit ** 2)\n', (4635, 4703), True, 'import numpy as np\n'), ((4813, 4849), 'numpy.maximum', 'np.maximum', (['(data / effective_gain)', '(0)'], {}), '(data / effective_gain, 0)\n', (4823, 4849), True, 'import numpy as np\n'), ((4138, 4158), 'numpy.zeros', 'np.zeros', (['data.shape'], {}), '(data.shape)\n', (4146, 4158), True, 'import numpy as np\n')] |
import time
import argparse
import json
import numpy as np
from itertools import chain
from mlir.dialects import linalg
from mlir.dialects.linalg.opdsl.lang import OperandKind
from mlir.runtime import *
from ..core.compilation import numpy_type
from ..core.deprecated_compilation import compile_and_callback
from ..core.search_vars import collect_variables
from ..core import experts
def parse_args(argv):
parser = argparse.ArgumentParser(description='Command-line directed search.')
parser.add_argument(
'--op',
type=str,
required=True,
help='Name of the linalg op to instantiate.')
parser.add_argument(
'--expert',
type=str,
required=True,
help='Name of the expert to use for compilation.')
parser.add_argument(
'--assign',
type=str,
help='A json dictionary of key-value pairs to specify op or expert variables.'
)
parser.add_argument(
'--iters',
type=int,
default=100,
help='Number of iterations of the MLIR loop.')
parser.add_argument(
'--runs',
type=int,
default=10,
help='Number of times the MLIR program is run to measure runtime.')
return parser.parse_args(argv[1:])
def validate_args(args):
no_errors = True
def error(msg):
nonlocal no_errors
no_errors = False
print(msg)
if not hasattr(linalg, args.op):
error(f'Unknown op: {args.op}.')
if not hasattr(experts, args.expert):
error(f'Unknown expert name: {args.expert}')
op = getattr(linalg, args.op)
expert = getattr(experts, args.expert)
variables = collect_variables(op)
variables.update(expert.variables)
assignments = json.loads(args.assign)
for var_name in assignments.keys():
if var_name not in assignments:
error(f'Variable {variable.name} was not assigned.')
iters = args.iters
if iters < 0:
error(f'Number of iterations must be non-negative.')
runs = args.runs
if runs < 0:
error(f'Number of runs must be non-negative.')
if no_errors:
return (op, expert, assignments, iters, runs)
else:
return None
def invoke(op, expert, assignments, iters, runs):
def section(name):
print(f'--- {name}')
def timed(callback, *args):
start = time.time()
callback(*args)
end = time.time()
return end - start
def random_array_inputs():
results = []
for odef in op.model.registered_operands.values():
assert (odef.kind == OperandKind.InputTensor or
odef.kind == OperandKind.OutputTensor)
np_type = numpy_type(assignments[odef.type_var.name])
shape = [assignments[sym.symname] for sym in odef.size_exprs]
arr0 = np.random.rand(*shape)
arr = arr0.astype(np_type)
results.append(arr)
return results
def to_memref_ptr(arr):
memref_descr = get_ranked_memref_descriptor(arr)
return ctypes.pointer(ctypes.pointer(memref_descr))
def measure_runtime(execution_engine):
array_inputs = random_array_inputs()
memref_inputs = list(map(to_memref_ptr, array_inputs))
index_ptr_t = ctypes.c_longlong * 1
def invoke(iters):
timing_data = (index_ptr_t)()
execution_engine.invoke(
'main', *memref_inputs, index_ptr_t(iters),
ctypes.cast(timing_data, ctypes.POINTER(index_ptr_t)))
# Benchmarking function returns time in nanoseconds.
return timing_data[0] / 1.e9
# Dry-run.
invoke(iters=1)
# Measure.
times = []
for _ in range(runs):
times.append(invoke(iters=iters))
# Report best of the runs.
return min(times)
def callback(module, execution_engine):
section('mlir')
print(module)
if iters > 0 and runs > 0:
elapsed_time = measure_runtime(execution_engine)
section('runtime')
print(f'time: {elapsed_time}')
print(f'iters: {iters}')
print(f'throughput: {iters/elapsed_time}')
compile_and_callback(
op, expert('matmul_on_tensors', 'linalg.' + op.op_name, **assignments),
callback, **assignments).print_ir(after_all=True)
def main(argv):
args = parse_args(argv)
validated = validate_args(args)
if validated is None:
return
invoke(*validated)
if __name__ == '__main__':
import sys
main(sys.argv)
| [
"json.loads",
"time.time",
"numpy.random.rand",
"argparse.ArgumentParser"
] | [((421, 489), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command-line directed search."""'}), "(description='Command-line directed search.')\n", (444, 489), False, 'import argparse\n'), ((1660, 1683), 'json.loads', 'json.loads', (['args.assign'], {}), '(args.assign)\n', (1670, 1683), False, 'import json\n'), ((2231, 2242), 'time.time', 'time.time', ([], {}), '()\n', (2240, 2242), False, 'import time\n'), ((2273, 2284), 'time.time', 'time.time', ([], {}), '()\n', (2282, 2284), False, 'import time\n'), ((2658, 2680), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (2672, 2680), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core import Data
from glue.external.echo import delay_callback
from glue.viewers.matplotlib.state import (MatplotlibDataViewerState,
MatplotlibLayerState,
DeferredDrawCallbackProperty as DDCProperty,
DeferredDrawSelectionCallbackProperty as DDSCProperty)
from glue.core.state_objects import (StateAttributeLimitsHelper,
StateAttributeHistogramHelper)
from glue.core.exceptions import IncompatibleAttribute
from glue.core.data_combo_helper import ComponentIDComboHelper
from glue.utils import defer_draw
__all__ = ['HistogramViewerState', 'HistogramLayerState']
class HistogramViewerState(MatplotlibDataViewerState):
"""
A state class that includes all the attributes for a histogram viewer.
"""
x_att = DDSCProperty(docstring='The attribute to compute the histograms for')
cumulative = DDCProperty(False, docstring='Whether to show the histogram as '
'a cumulative histogram')
normalize = DDCProperty(False, docstring='Whether to normalize the histogram '
'(based on the total sum)')
hist_x_min = DDCProperty(docstring='The minimum value used to compute the '
'histogram')
hist_x_max = DDCProperty(docstring='The maxumum value used to compute the '
'histogram')
hist_n_bin = DDCProperty(docstring='The number of bins in the histogram')
common_n_bin = DDCProperty(True, docstring='The number of bins to use for '
'all numerical components')
def __init__(self, **kwargs):
super(HistogramViewerState, self).__init__()
self.hist_helper = StateAttributeHistogramHelper(self, 'x_att', lower='hist_x_min',
upper='hist_x_max', n_bin='hist_n_bin',
common_n_bin='common_n_bin')
self.x_lim_helper = StateAttributeLimitsHelper(self, 'x_att', lower='x_min',
upper='x_max', log='x_log')
self.add_callback('layers', self._layers_changed)
self.x_att_helper = ComponentIDComboHelper(self, 'x_att')
self.update_from_dict(kwargs)
def _update_priority(self, name):
if name == 'layers':
return 2
elif name.endswith('_log'):
return 0.5
elif name.endswith(('_min', '_max', '_bin')):
return 0
else:
return 1
def flip_x(self):
"""
Flip the x_min/x_max limits.
"""
self.x_lim_helper.flip_limits()
def update_bins_to_view(self):
"""
Update the bins to match the current view.
"""
with delay_callback(self, 'hist_x_min', 'hist_x_max'):
if self.x_max > self.x_min:
self.hist_x_min = self.x_min
self.hist_x_max = self.x_max
else:
self.hist_x_min = self.x_max
self.hist_x_max = self.x_min
def _get_x_components(self):
if self.x_att is None:
return []
# Construct list of components over all layers
components = []
for layer_state in self.layers:
if isinstance(layer_state.layer, Data):
layer = layer_state.layer
else:
layer = layer_state.layer.data
try:
components.append(layer.get_component(self.x_att))
except IncompatibleAttribute:
pass
return components
@property
def bins(self):
"""
The position of the bins for the histogram based on the current state.
"""
if self.x_log:
return np.logspace(np.log10(self.hist_x_min),
np.log10(self.hist_x_max),
self.hist_n_bin + 1)
else:
return np.linspace(self.hist_x_min, self.hist_x_max,
self.hist_n_bin + 1)
@defer_draw
def _layers_changed(self, *args):
self.x_att_helper.set_multiple_data(self.layers_data)
class HistogramLayerState(MatplotlibLayerState):
"""
A state class that includes all the attributes for layers in a histogram plot.
"""
| [
"glue.core.state_objects.StateAttributeLimitsHelper",
"numpy.log10",
"glue.external.echo.delay_callback",
"glue.viewers.matplotlib.state.DeferredDrawSelectionCallbackProperty",
"glue.core.state_objects.StateAttributeHistogramHelper",
"numpy.linspace",
"glue.viewers.matplotlib.state.DeferredDrawCallbackP... | [((986, 1055), 'glue.viewers.matplotlib.state.DeferredDrawSelectionCallbackProperty', 'DDSCProperty', ([], {'docstring': '"""The attribute to compute the histograms for"""'}), "(docstring='The attribute to compute the histograms for')\n", (998, 1055), True, 'from glue.viewers.matplotlib.state import MatplotlibDataViewerState, MatplotlibLayerState, DeferredDrawCallbackProperty as DDCProperty, DeferredDrawSelectionCallbackProperty as DDSCProperty\n'), ((1074, 1166), 'glue.viewers.matplotlib.state.DeferredDrawCallbackProperty', 'DDCProperty', (['(False)'], {'docstring': '"""Whether to show the histogram as a cumulative histogram"""'}), "(False, docstring=\n 'Whether to show the histogram as a cumulative histogram')\n", (1085, 1166), True, 'from glue.viewers.matplotlib.state import MatplotlibDataViewerState, MatplotlibLayerState, DeferredDrawCallbackProperty as DDCProperty, DeferredDrawSelectionCallbackProperty as DDSCProperty\n'), ((1227, 1323), 'glue.viewers.matplotlib.state.DeferredDrawCallbackProperty', 'DDCProperty', (['(False)'], {'docstring': '"""Whether to normalize the histogram (based on the total sum)"""'}), "(False, docstring=\n 'Whether to normalize the histogram (based on the total sum)')\n", (1238, 1323), True, 'from glue.viewers.matplotlib.state import MatplotlibDataViewerState, MatplotlibLayerState, DeferredDrawCallbackProperty as DDCProperty, DeferredDrawSelectionCallbackProperty as DDSCProperty\n'), ((1385, 1457), 'glue.viewers.matplotlib.state.DeferredDrawCallbackProperty', 'DDCProperty', ([], {'docstring': '"""The minimum value used to compute the histogram"""'}), "(docstring='The minimum value used to compute the histogram')\n", (1396, 1457), True, 'from glue.viewers.matplotlib.state import MatplotlibDataViewerState, MatplotlibLayerState, DeferredDrawCallbackProperty as DDCProperty, DeferredDrawSelectionCallbackProperty as DDSCProperty\n'), ((1517, 1589), 'glue.viewers.matplotlib.state.DeferredDrawCallbackProperty', 'DDCProperty', ([], {'docstring': '"""The maxumum value used to compute the histogram"""'}), "(docstring='The maxumum value used to compute the histogram')\n", (1528, 1589), True, 'from glue.viewers.matplotlib.state import MatplotlibDataViewerState, MatplotlibLayerState, DeferredDrawCallbackProperty as DDCProperty, DeferredDrawSelectionCallbackProperty as DDSCProperty\n'), ((1649, 1709), 'glue.viewers.matplotlib.state.DeferredDrawCallbackProperty', 'DDCProperty', ([], {'docstring': '"""The number of bins in the histogram"""'}), "(docstring='The number of bins in the histogram')\n", (1660, 1709), True, 'from glue.viewers.matplotlib.state import MatplotlibDataViewerState, MatplotlibLayerState, DeferredDrawCallbackProperty as DDCProperty, DeferredDrawSelectionCallbackProperty as DDSCProperty\n'), ((1730, 1820), 'glue.viewers.matplotlib.state.DeferredDrawCallbackProperty', 'DDCProperty', (['(True)'], {'docstring': '"""The number of bins to use for all numerical components"""'}), "(True, docstring=\n 'The number of bins to use for all numerical components')\n", (1741, 1820), True, 'from glue.viewers.matplotlib.state import MatplotlibDataViewerState, MatplotlibLayerState, DeferredDrawCallbackProperty as DDCProperty, DeferredDrawSelectionCallbackProperty as DDSCProperty\n'), ((1983, 2121), 'glue.core.state_objects.StateAttributeHistogramHelper', 'StateAttributeHistogramHelper', (['self', '"""x_att"""'], {'lower': '"""hist_x_min"""', 'upper': '"""hist_x_max"""', 'n_bin': '"""hist_n_bin"""', 'common_n_bin': '"""common_n_bin"""'}), "(self, 'x_att', lower='hist_x_min', upper=\n 'hist_x_max', n_bin='hist_n_bin', common_n_bin='common_n_bin')\n", (2012, 2121), False, 'from glue.core.state_objects import StateAttributeLimitsHelper, StateAttributeHistogramHelper\n'), ((2260, 2349), 'glue.core.state_objects.StateAttributeLimitsHelper', 'StateAttributeLimitsHelper', (['self', '"""x_att"""'], {'lower': '"""x_min"""', 'upper': '"""x_max"""', 'log': '"""x_log"""'}), "(self, 'x_att', lower='x_min', upper='x_max', log\n ='x_log')\n", (2286, 2349), False, 'from glue.core.state_objects import StateAttributeLimitsHelper, StateAttributeHistogramHelper\n'), ((2488, 2525), 'glue.core.data_combo_helper.ComponentIDComboHelper', 'ComponentIDComboHelper', (['self', '"""x_att"""'], {}), "(self, 'x_att')\n", (2510, 2525), False, 'from glue.core.data_combo_helper import ComponentIDComboHelper\n'), ((3071, 3119), 'glue.external.echo.delay_callback', 'delay_callback', (['self', '"""hist_x_min"""', '"""hist_x_max"""'], {}), "(self, 'hist_x_min', 'hist_x_max')\n", (3085, 3119), False, 'from glue.external.echo import delay_callback\n'), ((4266, 4332), 'numpy.linspace', 'np.linspace', (['self.hist_x_min', 'self.hist_x_max', '(self.hist_n_bin + 1)'], {}), '(self.hist_x_min, self.hist_x_max, self.hist_n_bin + 1)\n', (4277, 4332), True, 'import numpy as np\n'), ((4096, 4121), 'numpy.log10', 'np.log10', (['self.hist_x_min'], {}), '(self.hist_x_min)\n', (4104, 4121), True, 'import numpy as np\n'), ((4154, 4179), 'numpy.log10', 'np.log10', (['self.hist_x_max'], {}), '(self.hist_x_max)\n', (4162, 4179), True, 'import numpy as np\n')] |
from __future__ import annotations
from pathlib import Path
from typing import Optional
import numpy as np
import skimage.io
import skimage.measure
import bsmu.retinal_fundus.models.temp as temp
from bsmu.retinal_fundus.models.unet import trainer
from bsmu.retinal_fundus.models.unet.config_optic_disk_cup import OpticDiskCupModelTrainerConfig
from bsmu.retinal_fundus.models.utils import debug as debug_utils
def predict_on_dir_images(model_trainer: ModelTrainer, image_dir: Path, save_dir: Path,
zero_all_except_foreground_largest_connected_component: bool):
for image_path in image_dir.iterdir():
if not image_path.is_file():
continue
image = skimage.io.imread(str(image_path))
# predicted_mask = predict_on_splitted_into_tiles(model_trainer, image, (3, 3), border_size=10)
# skimage.io.imsave(str(save_dir / image_path.name), predicted_mask)
predicted_masks = model_trainer.predict_on_images(images=[image], resize_mask_to_image=True, save=False)
predicted_mask = predicted_masks[0]
# debug_utils.print_info(predicted_mask, 'predicted_mask')
if zero_all_except_foreground_largest_connected_component:
predicted_mask = temp.zero_all_except_foreground_largest_connected_component(predicted_mask, threshold=0.5)
skimage.io.imsave(str(save_dir / image_path.name), predicted_mask)
class AreaProperties:
def __init__(self, centroid, radius):
self.centroid = centroid
self.radius = radius
@property
def x_start(self):
return self.centroid[0] - self.radius
@property
def x_stop(self):
return self.centroid[0] + self.radius
@property
def y_start(self):
return self.centroid[1] - self.radius
@property
def y_stop(self):
return self.centroid[1] + self.radius
def object_area_properties(object_mask, threshold=128, diameter_factor=1.5) -> Optional[AreaProperties]:
object_mask[object_mask < threshold] = 0
object_mask[object_mask > 0] = 255
properties = skimage.measure.regionprops(object_mask)
print('number of connected components', len(properties))
if len(properties) != 1:
return None
object_properties = properties[0]
radius = int(object_properties.equivalent_diameter * diameter_factor)
centroid = np.round(object_properties.centroid).astype(np.int)
return AreaProperties(centroid, radius)
def calculate_pad_width_for_image_to_cut_area(image, area_properties: AreaProperties):
pad_x_before = abs(area_properties.x_start) if area_properties.x_start < 0 else 0
pad_x_after = area_properties.x_stop - image.shape[0] if area_properties.x_stop > image.shape[0] else 0
pad_y_before = abs(area_properties.y_start) if area_properties.y_start < 0 else 0
pad_y_after = area_properties.y_stop - image.shape[1] if area_properties.y_stop > image.shape[1] else 0
pad_width = ((pad_x_before, pad_x_after), (pad_y_before, pad_y_after))
if len(image.shape) == 3:
pad_width = pad_width + ((0, 0),)
return pad_width
def cut_out_area(image, object_mask):
area_props = object_area_properties(object_mask)
if area_props is None:
return None
pad_width = calculate_pad_width_for_image_to_cut_area(image, area_props)
padded_image = np.pad(image, pad_width)
pad_x_before = pad_width[0][0]
pad_y_before = pad_width[1][0]
padded_crop_x_start = area_props.x_start + pad_x_before
padded_crop_x_stop = area_props.x_stop + pad_x_before
padded_crop_y_start = area_props.y_start + pad_y_before
padded_crop_y_stop = area_props.y_stop + pad_y_before
cropped_image = padded_image[padded_crop_x_start:padded_crop_x_stop, padded_crop_y_start:padded_crop_y_stop]
return cropped_image
# def predict_optic_cup_on_dir_images(
# model_trainer: ModelTrainer,
# image_dir: Path,
# optic_disk_mask_dir: Path,
# save_dir: Path):
# for image_path in image_dir.iterdir():
# if not image_path.is_file():
# continue
#
# image = skimage.io.imread(str(image_path))
#
#
# cut_out_disk_area
if __name__ == '__main__':
model_trainer = trainer.UnetModelTrainer(OpticDiskCupModelTrainerConfig)
file_name = 'MS-ParhimovichAlexander19940101(70700).jpg'
image = skimage.io.imread(str(Path(r'D:\Projects\retinal-fundus-models\databases\OUR_IMAGES\normalizedImages') / file_name))
# b = image[..., 2]
# image[..., 0] = b
# image[..., 1] = b
# skimage.io.imsave(str(Path(r'D:\Temp\crop-tests') / f'{Path(file_name).stem}-RGB-B.png'), image)
disk_mask = skimage.io.imread(str(Path(r'D:\Projects\retinal-fundus-models\databases\OUR_IMAGES\normalizedImages-opticDisksMasks-largestCC-v3-3db') / file_name))
cropped_image = cut_out_area(image, disk_mask)
skimage.io.imsave(str(Path(r'D:\Temp\crop-tests\new') / f'{Path(file_name).stem}-CROPPED-DISK.png'), cropped_image)
predicted_masks = model_trainer.predict_on_images(images=[cropped_image], resize_mask_to_image=True, save=False)
predicted_mask = predicted_masks[0]
skimage.io.imsave(str(Path(r'D:\Temp\crop-tests\new') / f'{Path(file_name).stem}-CUP-MASK.png'), predicted_mask)
debug_utils.print_info(disk_mask, 'disk_mask')
debug_utils.print_info(predicted_mask, 'predicted_mask')
disk_number_of_pixels = np.count_nonzero(disk_mask)
cup_number_of_pixels = np.count_nonzero(np.round(predicted_mask))
print('disk_number_of_pixels', disk_number_of_pixels)
print('cup_number_of_pixels', cup_number_of_pixels)
print('cup/disk ratio', cup_number_of_pixels / disk_number_of_pixels)
exit()
model_trainer = trainer.UnetModelTrainer(OpticDiskCupModelTrainerConfig)
# model_trainer = trainer.UnetModelTrainer(UnetModelTrainerConfig)
predict_on_dir_images(model_trainer,
Path(r'D:\Projects\retinal-fundus-models\databases\OUR_IMAGES\normalizedImages'),
Path(r'D:\Projects\retinal-fundus-models\databases\OUR_IMAGES\normalizedImages-opticDisksMasks-largestCC-v3-3db'),
zero_all_except_foreground_largest_connected_component=True)
# predict_optic_cup_on_dir_images(
# model_trainer,
# Path(r'D:\Projects\retinal-fundus-models\databases\OUR_IMAGES\normalizedImages'),
# Path(r'D:\Projects\retinal-fundus-models\databases\OUR_IMAGES\normalizedImages-opticDisksMasks-largestCC-v2'),
# )
| [
"pathlib.Path",
"numpy.count_nonzero",
"bsmu.retinal_fundus.models.unet.trainer.UnetModelTrainer",
"bsmu.retinal_fundus.models.utils.debug.print_info",
"numpy.pad",
"numpy.round",
"bsmu.retinal_fundus.models.temp.zero_all_except_foreground_largest_connected_component"
] | [((3344, 3368), 'numpy.pad', 'np.pad', (['image', 'pad_width'], {}), '(image, pad_width)\n', (3350, 3368), True, 'import numpy as np\n'), ((4230, 4286), 'bsmu.retinal_fundus.models.unet.trainer.UnetModelTrainer', 'trainer.UnetModelTrainer', (['OpticDiskCupModelTrainerConfig'], {}), '(OpticDiskCupModelTrainerConfig)\n', (4254, 4286), False, 'from bsmu.retinal_fundus.models.unet import trainer\n'), ((5270, 5316), 'bsmu.retinal_fundus.models.utils.debug.print_info', 'debug_utils.print_info', (['disk_mask', '"""disk_mask"""'], {}), "(disk_mask, 'disk_mask')\n", (5292, 5316), True, 'from bsmu.retinal_fundus.models.utils import debug as debug_utils\n'), ((5321, 5377), 'bsmu.retinal_fundus.models.utils.debug.print_info', 'debug_utils.print_info', (['predicted_mask', '"""predicted_mask"""'], {}), "(predicted_mask, 'predicted_mask')\n", (5343, 5377), True, 'from bsmu.retinal_fundus.models.utils import debug as debug_utils\n'), ((5407, 5434), 'numpy.count_nonzero', 'np.count_nonzero', (['disk_mask'], {}), '(disk_mask)\n', (5423, 5434), True, 'import numpy as np\n'), ((5727, 5783), 'bsmu.retinal_fundus.models.unet.trainer.UnetModelTrainer', 'trainer.UnetModelTrainer', (['OpticDiskCupModelTrainerConfig'], {}), '(OpticDiskCupModelTrainerConfig)\n', (5751, 5783), False, 'from bsmu.retinal_fundus.models.unet import trainer\n'), ((5479, 5503), 'numpy.round', 'np.round', (['predicted_mask'], {}), '(predicted_mask)\n', (5487, 5503), True, 'import numpy as np\n'), ((5923, 6017), 'pathlib.Path', 'Path', (['"""D:\\\\Projects\\\\retinal-fundus-models\\\\databases\\\\OUR_IMAGES\\\\normalizedImages"""'], {}), "(\n 'D:\\\\Projects\\\\retinal-fundus-models\\\\databases\\\\OUR_IMAGES\\\\normalizedImages'\n )\n", (5927, 6017), False, 'from pathlib import Path\n'), ((6031, 6158), 'pathlib.Path', 'Path', (['"""D:\\\\Projects\\\\retinal-fundus-models\\\\databases\\\\OUR_IMAGES\\\\normalizedImages-opticDisksMasks-largestCC-v3-3db"""'], {}), "(\n 'D:\\\\Projects\\\\retinal-fundus-models\\\\databases\\\\OUR_IMAGES\\\\normalizedImages-opticDisksMasks-largestCC-v3-3db'\n )\n", (6035, 6158), False, 'from pathlib import Path\n'), ((1249, 1343), 'bsmu.retinal_fundus.models.temp.zero_all_except_foreground_largest_connected_component', 'temp.zero_all_except_foreground_largest_connected_component', (['predicted_mask'], {'threshold': '(0.5)'}), '(predicted_mask,\n threshold=0.5)\n', (1308, 1343), True, 'import bsmu.retinal_fundus.models.temp as temp\n'), ((2365, 2401), 'numpy.round', 'np.round', (['object_properties.centroid'], {}), '(object_properties.centroid)\n', (2373, 2401), True, 'import numpy as np\n'), ((4383, 4477), 'pathlib.Path', 'Path', (['"""D:\\\\Projects\\\\retinal-fundus-models\\\\databases\\\\OUR_IMAGES\\\\normalizedImages"""'], {}), "(\n 'D:\\\\Projects\\\\retinal-fundus-models\\\\databases\\\\OUR_IMAGES\\\\normalizedImages'\n )\n", (4387, 4477), False, 'from pathlib import Path\n'), ((4692, 4819), 'pathlib.Path', 'Path', (['"""D:\\\\Projects\\\\retinal-fundus-models\\\\databases\\\\OUR_IMAGES\\\\normalizedImages-opticDisksMasks-largestCC-v3-3db"""'], {}), "(\n 'D:\\\\Projects\\\\retinal-fundus-models\\\\databases\\\\OUR_IMAGES\\\\normalizedImages-opticDisksMasks-largestCC-v3-3db'\n )\n", (4696, 4819), False, 'from pathlib import Path\n'), ((4897, 4930), 'pathlib.Path', 'Path', (['"""D:\\\\Temp\\\\crop-tests\\\\new"""'], {}), "('D:\\\\Temp\\\\crop-tests\\\\new')\n", (4901, 4930), False, 'from pathlib import Path\n'), ((5174, 5207), 'pathlib.Path', 'Path', (['"""D:\\\\Temp\\\\crop-tests\\\\new"""'], {}), "('D:\\\\Temp\\\\crop-tests\\\\new')\n", (5178, 5207), False, 'from pathlib import Path\n'), ((4934, 4949), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (4938, 4949), False, 'from pathlib import Path\n'), ((5211, 5226), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (5215, 5226), False, 'from pathlib import Path\n')] |
import torch
import numpy as np
from numba import jit
import torch_geometric.utils as pyg_utils
from rlkit.torch import pytorch_util as ptu
from rlkit.torch.core import eval_np, np_ify, torch_ify
class TrafficGraphBuilder(torch.nn.Module):
def __init__(self,
input_dim,
node_num,
ego_init=np.array([0.,1.]),
other_init=np.array([1.,0.]),
):
super(TrafficGraphBuilder, self).__init__()
self.input_dim = input_dim
self.node_num = node_num
self.ego_init = torch_ify(ego_init)
self.other_init = torch_ify(other_init)
self.output_dim = input_dim + self.ego_init.shape[0]
def forward(self, obs, valid_musk=None):
# x: (batch*num_node) x output_dim
# edge_index: 2 x node_edge
# messages from nodes in edge_index[0] are sent to nodes in edge_index[1]
batch_size, node_num, obs_dim = obs.shape
x = torch.zeros(batch_size,self.node_num, self.output_dim).to(ptu.device)
x[:,:,:self.input_dim] = obs
x[:,0,self.input_dim:] = self.ego_init[None,:]
x[:,1:,self.input_dim:] = self.other_init[None,None,:]
x = x.reshape(int(batch_size*self.node_num),self.output_dim)
# xs = obs[:,:,0]
# ys = obs[:,:,1]
# upper_indices = torch.where(ys > 4.)
# lower_indices = torch.where((ys > 0.) and (ys <= 4.))
obs = np_ify(obs)
edge_index = get_edge_index(obs) #batch x 2 x max_edge_num
edge_index = np.swapaxes(edge_index,0,1).reshape(2,-1)
edge_index = np.unique(edge_index, axis=1)
edge_index = torch_ify(edge_index).long()
edge_index = pyg_utils.remove_self_loops(edge_index)[0]
return x, edge_index
def get_valid_node_mask(self, obs):
# return a mask of all valid nodes
# shape: batch x node_num
valid_musk = (obs[:,:,1] != 0) # y!= 0
return valid_musk
@jit(nopython=True)
def get_edge_index(obs):
batch_size, node_num, obs_dim = obs.shape
Xs = obs[:,:,0]
Ys = obs[:,:,1]
Edges = np.zeros((batch_size,2,node_num*(3+4)))
for i in range(batch_size):
xs = Xs[i]
ys = Ys[i]
sort_index = np.argsort(xs)
sort_y = ys[sort_index]
lane0_sort_mask = ((sort_y<=1./3.) * (sort_y>0.))
lane1_sort_mask = ((sort_y>1./3.) * (sort_y<=2./3.))
lane2_sort_mask = ((sort_y>=2./3.) * (sort_y<1.))
lane0_sort_index = sort_index[lane0_sort_mask]
lane1_sort_index = sort_index[lane1_sort_mask]
lane2_sort_index = sort_index[lane2_sort_mask]
lane01_sort_index = sort_index[(lane0_sort_mask | lane1_sort_mask)]
lane12_sort_index = sort_index[(lane1_sort_mask | lane2_sort_mask)]
lane01_edges = np.concatenate((np.expand_dims(np.concatenate((np.zeros(len(lane1_sort_index)),lane1_sort_index)),axis=0),
np.expand_dims(np.concatenate((lane1_sort_index,np.zeros(len(lane1_sort_index)))),axis=0)),axis=0)
lane1_edges = np.concatenate((np.expand_dims(np.concatenate((lane1_sort_index[:-1],lane1_sort_index[1:])),axis=0),
np.expand_dims(np.concatenate((lane1_sort_index[1:],lane1_sort_index[:-1])),axis=0)),axis=0)
lane2_edges = np.concatenate((np.expand_dims(np.concatenate((lane2_sort_index[:-1],lane2_sort_index[1:])),axis=0),
np.expand_dims(np.concatenate((lane2_sort_index[1:],lane2_sort_index[:-1])),axis=0)),axis=0)
lane12_edges = np.concatenate((np.expand_dims(np.concatenate((lane12_sort_index[:-1],lane12_sort_index[1:])),axis=0),
np.expand_dims(np.concatenate((lane12_sort_index[1:],lane12_sort_index[:-1])),axis=0)),axis=0)
edges = np.concatenate((lane01_edges, lane1_edges, lane2_edges, lane12_edges),axis=-1)+i*node_num
Edges[i,:,:edges.shape[1]] = edges
return Edges
| [
"numpy.unique",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numba.jit",
"torch_geometric.utils.remove_self_loops",
"numpy.swapaxes",
"rlkit.torch.core.torch_ify",
"numpy.concatenate",
"rlkit.torch.core.np_ify",
"torch.zeros"
] | [((1982, 2000), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1985, 2000), False, 'from numba import jit\n'), ((2124, 2169), 'numpy.zeros', 'np.zeros', (['(batch_size, 2, node_num * (3 + 4))'], {}), '((batch_size, 2, node_num * (3 + 4)))\n', (2132, 2169), True, 'import numpy as np\n'), ((342, 362), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (350, 362), True, 'import numpy as np\n'), ((388, 408), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (396, 408), True, 'import numpy as np\n'), ((571, 590), 'rlkit.torch.core.torch_ify', 'torch_ify', (['ego_init'], {}), '(ego_init)\n', (580, 590), False, 'from rlkit.torch.core import eval_np, np_ify, torch_ify\n'), ((617, 638), 'rlkit.torch.core.torch_ify', 'torch_ify', (['other_init'], {}), '(other_init)\n', (626, 638), False, 'from rlkit.torch.core import eval_np, np_ify, torch_ify\n'), ((1452, 1463), 'rlkit.torch.core.np_ify', 'np_ify', (['obs'], {}), '(obs)\n', (1458, 1463), False, 'from rlkit.torch.core import eval_np, np_ify, torch_ify\n'), ((1615, 1644), 'numpy.unique', 'np.unique', (['edge_index'], {'axis': '(1)'}), '(edge_index, axis=1)\n', (1624, 1644), True, 'import numpy as np\n'), ((2255, 2269), 'numpy.argsort', 'np.argsort', (['xs'], {}), '(xs)\n', (2265, 2269), True, 'import numpy as np\n'), ((1716, 1755), 'torch_geometric.utils.remove_self_loops', 'pyg_utils.remove_self_loops', (['edge_index'], {}), '(edge_index)\n', (1743, 1755), True, 'import torch_geometric.utils as pyg_utils\n'), ((3826, 3905), 'numpy.concatenate', 'np.concatenate', (['(lane01_edges, lane1_edges, lane2_edges, lane12_edges)'], {'axis': '(-1)'}), '((lane01_edges, lane1_edges, lane2_edges, lane12_edges), axis=-1)\n', (3840, 3905), True, 'import numpy as np\n'), ((980, 1035), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.node_num', 'self.output_dim'], {}), '(batch_size, self.node_num, self.output_dim)\n', (991, 1035), False, 'import torch\n'), ((1552, 1581), 'numpy.swapaxes', 'np.swapaxes', (['edge_index', '(0)', '(1)'], {}), '(edge_index, 0, 1)\n', (1563, 1581), True, 'import numpy as np\n'), ((1666, 1687), 'rlkit.torch.core.torch_ify', 'torch_ify', (['edge_index'], {}), '(edge_index)\n', (1675, 1687), False, 'from rlkit.torch.core import eval_np, np_ify, torch_ify\n'), ((3113, 3174), 'numpy.concatenate', 'np.concatenate', (['(lane1_sort_index[:-1], lane1_sort_index[1:])'], {}), '((lane1_sort_index[:-1], lane1_sort_index[1:]))\n', (3127, 3174), True, 'import numpy as np\n'), ((3230, 3291), 'numpy.concatenate', 'np.concatenate', (['(lane1_sort_index[1:], lane1_sort_index[:-1])'], {}), '((lane1_sort_index[1:], lane1_sort_index[:-1]))\n', (3244, 3291), True, 'import numpy as np\n'), ((3361, 3422), 'numpy.concatenate', 'np.concatenate', (['(lane2_sort_index[:-1], lane2_sort_index[1:])'], {}), '((lane2_sort_index[:-1], lane2_sort_index[1:]))\n', (3375, 3422), True, 'import numpy as np\n'), ((3478, 3539), 'numpy.concatenate', 'np.concatenate', (['(lane2_sort_index[1:], lane2_sort_index[:-1])'], {}), '((lane2_sort_index[1:], lane2_sort_index[:-1]))\n', (3492, 3539), True, 'import numpy as np\n'), ((3610, 3673), 'numpy.concatenate', 'np.concatenate', (['(lane12_sort_index[:-1], lane12_sort_index[1:])'], {}), '((lane12_sort_index[:-1], lane12_sort_index[1:]))\n', (3624, 3673), True, 'import numpy as np\n'), ((3729, 3792), 'numpy.concatenate', 'np.concatenate', (['(lane12_sort_index[1:], lane12_sort_index[:-1])'], {}), '((lane12_sort_index[1:], lane12_sort_index[:-1]))\n', (3743, 3792), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Module providing Multirate signal processing functionality.
Largely based on MATLAB's Multirate signal processing toolbox with consultation
of Octave m-file source code.
"""
import sys
import fractions
import numpy
from scipy import signal
def downsample(s, n, phase=0):
"""Decrease sampling rate by integer factor n with included offset phase.
"""
return s[phase::n]
def upsample(s, n, phase=0):
"""Increase sampling rate by integer factor n with included offset phase.
"""
return numpy.roll(numpy.kron(s, numpy.r_[1, numpy.zeros(n-1)]), phase)
def decimate(s, r, n=None, fir=False):
"""Decimation - decrease sampling rate by r. The decimation process filters
the input data s with an order n lowpass filter and then resamples the
resulting smoothed signal at a lower rate. By default, decimate employs an
eighth-order lowpass Chebyshev Type I filter with a cutoff frequency of
0.8/r. It filters the input sequence in both the forward and reverse
directions to remove all phase distortion, effectively doubling the filter
order. If 'fir' is set to True decimate uses an order 30 FIR filter (by
default otherwise n), instead of the Chebyshev IIR filter. Here decimate
filters the input sequence in only one direction. This technique conserves
memory and is useful for working with long sequences.
"""
if fir:
if n is None:
n = 30
b = signal.firwin(n, 1.0/r)
a = 1
f = signal.lfilter(b, a, s)
else: #iir
if n is None:
n = 8
b, a = signal.cheby1(n, 0.05, 0.8/r)
f = signal.filtfilt(b, a, s)
return downsample(f, r)
def interp(s, r, l=4, alpha=0.5):
"""Interpolation - increase sampling rate by integer factor r. Interpolation
increases the original sampling rate for a sequence to a higher rate. interp
performs lowpass interpolation by inserting zeros into the original sequence
and then applying a special lowpass filter. l specifies the filter length
and alpha the cut-off frequency. The length of the FIR lowpass interpolating
filter is 2*l*r+1. The number of original sample values used for
interpolation is 2*l. Ordinarily, l should be less than or equal to 10. The
original signal is assumed to be band limited with normalized cutoff
frequency 0=alpha=1, where 1 is half the original sampling frequency (the
Nyquist frequency). The default value for l is 4 and the default value for
alpha is 0.5.
"""
b = signal.firwin(2*l*r+1, alpha/r);
a = 1
return r*signal.lfilter(b, a, upsample(s, r))[r*l+1:-1]
def resample(s, p, q, h=None):
"""Change sampling rate by rational factor. This implementation is based on
the Octave implementation of the resample function. It designs the
anti-aliasing filter using the window approach applying a Kaiser window with
the beta term calculated as specified by [2].
Ref [1] <NAME> and <NAME>,
Digital Signal Processing: Principles, Algorithms, and Applications,
4th ed., Prentice Hall, 2007. Chap. 6
Ref [2] <NAME>, <NAME> and <NAME>,
Discrete-time signal processing, Signal processing series,
Prentice-Hall, 1999
"""
gcd = fractions.gcd(p,q)
if gcd>1:
p=p/gcd
q=q/gcd
if h is None: #design filter
#properties of the antialiasing filter
log10_rejection = -3.0
stopband_cutoff_f = 1.0/(2.0 * max(p,q))
roll_off_width = stopband_cutoff_f / 10.0
#determine filter length
#use empirical formula from [2] Chap 7, Eq. (7.63) p 476
rejection_db = -20.0*log10_rejection;
l = numpy.ceil((rejection_db-8.0) / (28.714 * roll_off_width))
#ideal sinc filter
t = numpy.arange(-l, l + 1)
ideal_filter=2*p*stopband_cutoff_f*numpy.sinc(2*stopband_cutoff_f*t)
#determine parameter of Kaiser window
#use empirical formula from [2] Chap 7, Eq. (7.62) p 474
beta = signal.kaiser_beta(rejection_db)
#apodize ideal filter response
h = numpy.kaiser(2*l+1, beta)*ideal_filter
ls = len(s)
lh = len(h)
l = (lh - 1)/2.0
ly = numpy.ceil(ls*p/float(q))
#pre and postpad filter response
nz_pre = numpy.floor(q - numpy.mod(l,q))
hpad = h[-lh+nz_pre:]
offset = numpy.floor((l+nz_pre)/q)
nz_post = 0;
while numpy.ceil(((ls-1)*p + nz_pre + lh + nz_post )/q ) - offset < ly:
nz_post += 1
hpad = hpad[:lh + nz_pre + nz_post]
#filtering
xfilt = upfirdn(s, hpad, p, q)
return xfilt[offset-1:offset-1+ly]
def upfirdn(s, h, p, q):
"""Upsample signal s by p, apply FIR filter as specified by h, and
downsample by q. Using fftconvolve as opposed to lfilter as it does not seem
to do a full convolution operation (and its much faster than convolve).
"""
return downsample(signal.fftconvolve(h, upsample(s, p)), q)
def main():
"""Show simple use cases for functionality provided by this module. Each
example below attempts to mimic the examples provided by mathworks MATLAB
documentation, http://www.mathworks.com/help/toolbox/signal/
"""
import pylab
argv = sys.argv
if len(argv) != 1:
print >>sys.stderr, 'usage: python -m pim.sp.multirate'
sys.exit(2)
#Downsample
x = numpy.arange(1, 11)
print('Down Sampling %s by 3' % x)
print(downsample(x, 3))
print('Down Sampling %s by 3 with phase offset 2' % x)
print(downsample(x, 3, phase=2))
#Upsample
x = numpy.arange(1, 5)
print('Up Sampling %s by 3' % x)
print(upsample(x, 3))
print('Up Sampling %s by 3 with phase offset 2' % x)
print(upsample(x, 3, 2))
#Decimate
t = numpy.arange(0, 1, 0.00025)
x = numpy.sin(2*numpy.pi*30*t) + numpy.sin(2*numpy.pi*60*t)
y = decimate(x,4)
pylab.figure()
pylab.subplot(2, 1, 1)
pylab.title('Original Signal')
pylab.stem(numpy.arange(len(x[0:120])), x[0:120])
pylab.subplot(2, 1, 2)
pylab.title('Decimated Signal')
pylab.stem(numpy.arange(len(y[0:30])), y[0:30])
#Interp
t = numpy.arange(0, 1, 0.001)
x = numpy.sin(2*numpy.pi*30*t) + numpy.sin(2*numpy.pi*60*t)
y = interp(x,4)
pylab.figure()
pylab.subplot(2, 1, 1)
pylab.title('Original Signal')
pylab.stem(numpy.arange(len(x[0:30])), x[0:30])
pylab.subplot(2, 1, 2)
pylab.title('Interpolated Signal')
pylab.stem(numpy.arange(len(y[0:120])), y[0:120])
#upfirdn
L = 147.0
M = 160.0
N = 24.0*L
h = signal.firwin(N-1, 1/M, window=('kaiser', 7.8562))
h = L*h
Fs = 48000.0
n = numpy.arange(0, 10239)
x = numpy.sin(2*numpy.pi*1000/Fs*n)
y = upfirdn(x, h, L, M)
pylab.figure()
pylab.stem(n[1:49]/Fs, x[1:49])
pylab.stem(n[1:45]/(Fs*L/M), y[13:57], 'r', markerfmt='ro',)
pylab.xlabel('Time (sec)')
pylab.ylabel('Signal value')
#resample
fs1 = 10.0
t1 = numpy.arange(0, 1 + 1.0/fs1, 1.0/fs1)
x = t1
y = resample(x, 3, 2)
t2 = numpy.arange(0,(len(y)))*2.0/(3.0*fs1)
pylab.figure()
pylab.plot(t1, x, '*')
pylab.plot(t2, y, 'o')
pylab.plot(numpy.arange(-0.5,1.5, 0.01), numpy.arange(-0.5,1.5, 0.01), ':')
pylab.legend(('original','resampled'))
pylab.xlabel('Time')
x = numpy.hstack([numpy.arange(1,11), numpy.arange(9,0,-1)])
y = resample(x,3,2)
pylab.figure()
pylab.subplot(2, 1, 1)
pylab.title('Edge Effects Not Noticeable')
pylab.plot(numpy.arange(19)+1, x, '*')
pylab.plot(numpy.arange(29)*2/3.0 + 1, y, 'o')
pylab.legend(('original', 'resampled'))
x = numpy.hstack([numpy.arange(10, 0, -1), numpy.arange(2,11)])
y = resample(x,3,2)
pylab.subplot(2, 1, 2)
pylab.plot(numpy.arange(19)+1, x, '*')
pylab.plot(numpy.arange(29)*2/3.0 + 1, y, 'o')
pylab.title('Edge Effects Very Noticeable')
pylab.legend(('original', 'resampled'))
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"pylab.title",
"scipy.signal.filtfilt",
"scipy.signal.kaiser_beta",
"pylab.xlabel",
"scipy.signal.cheby1",
"sys.exit",
"numpy.sin",
"numpy.mod",
"numpy.arange",
"pylab.stem",
"pylab.plot",
"numpy.ceil",
"fractions.gcd",
"pylab.subplot",
"scipy.signal.firwin",
"numpy.floor",
"pylab.fi... | [((2574, 2613), 'scipy.signal.firwin', 'signal.firwin', (['(2 * l * r + 1)', '(alpha / r)'], {}), '(2 * l * r + 1, alpha / r)\n', (2587, 2613), False, 'from scipy import signal\n'), ((3290, 3309), 'fractions.gcd', 'fractions.gcd', (['p', 'q'], {}), '(p, q)\n', (3303, 3309), False, 'import fractions\n'), ((4411, 4440), 'numpy.floor', 'numpy.floor', (['((l + nz_pre) / q)'], {}), '((l + nz_pre) / q)\n', (4422, 4440), False, 'import numpy\n'), ((5421, 5440), 'numpy.arange', 'numpy.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (5433, 5440), False, 'import numpy\n'), ((5627, 5645), 'numpy.arange', 'numpy.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (5639, 5645), False, 'import numpy\n'), ((5818, 5845), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', '(0.00025)'], {}), '(0, 1, 0.00025)\n', (5830, 5845), False, 'import numpy\n'), ((5936, 5950), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (5948, 5950), False, 'import pylab\n'), ((5955, 5977), 'pylab.subplot', 'pylab.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (5968, 5977), False, 'import pylab\n'), ((5982, 6012), 'pylab.title', 'pylab.title', (['"""Original Signal"""'], {}), "('Original Signal')\n", (5993, 6012), False, 'import pylab\n'), ((6071, 6093), 'pylab.subplot', 'pylab.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (6084, 6093), False, 'import pylab\n'), ((6098, 6129), 'pylab.title', 'pylab.title', (['"""Decimated Signal"""'], {}), "('Decimated Signal')\n", (6109, 6129), False, 'import pylab\n'), ((6203, 6228), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', '(0.001)'], {}), '(0, 1, 0.001)\n', (6215, 6228), False, 'import numpy\n'), ((6317, 6331), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (6329, 6331), False, 'import pylab\n'), ((6336, 6358), 'pylab.subplot', 'pylab.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (6349, 6358), False, 'import pylab\n'), ((6363, 6393), 'pylab.title', 'pylab.title', (['"""Original Signal"""'], {}), "('Original Signal')\n", (6374, 6393), False, 'import pylab\n'), ((6450, 6472), 'pylab.subplot', 'pylab.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (6463, 6472), False, 'import pylab\n'), ((6477, 6511), 'pylab.title', 'pylab.title', (['"""Interpolated Signal"""'], {}), "('Interpolated Signal')\n", (6488, 6511), False, 'import pylab\n'), ((6632, 6686), 'scipy.signal.firwin', 'signal.firwin', (['(N - 1)', '(1 / M)'], {'window': "('kaiser', 7.8562)"}), "(N - 1, 1 / M, window=('kaiser', 7.8562))\n", (6645, 6686), False, 'from scipy import signal\n'), ((6720, 6742), 'numpy.arange', 'numpy.arange', (['(0)', '(10239)'], {}), '(0, 10239)\n', (6732, 6742), False, 'import numpy\n'), ((6752, 6791), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * 1000 / Fs * n)'], {}), '(2 * numpy.pi * 1000 / Fs * n)\n', (6761, 6791), False, 'import numpy\n'), ((6816, 6830), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (6828, 6830), False, 'import pylab\n'), ((6835, 6868), 'pylab.stem', 'pylab.stem', (['(n[1:49] / Fs)', 'x[1:49]'], {}), '(n[1:49] / Fs, x[1:49])\n', (6845, 6868), False, 'import pylab\n'), ((6871, 6936), 'pylab.stem', 'pylab.stem', (['(n[1:45] / (Fs * L / M))', 'y[13:57]', '"""r"""'], {'markerfmt': '"""ro"""'}), "(n[1:45] / (Fs * L / M), y[13:57], 'r', markerfmt='ro')\n", (6881, 6936), False, 'import pylab\n'), ((6936, 6962), 'pylab.xlabel', 'pylab.xlabel', (['"""Time (sec)"""'], {}), "('Time (sec)')\n", (6948, 6962), False, 'import pylab\n'), ((6967, 6995), 'pylab.ylabel', 'pylab.ylabel', (['"""Signal value"""'], {}), "('Signal value')\n", (6979, 6995), False, 'import pylab\n'), ((7035, 7076), 'numpy.arange', 'numpy.arange', (['(0)', '(1 + 1.0 / fs1)', '(1.0 / fs1)'], {}), '(0, 1 + 1.0 / fs1, 1.0 / fs1)\n', (7047, 7076), False, 'import numpy\n'), ((7162, 7176), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (7174, 7176), False, 'import pylab\n'), ((7181, 7203), 'pylab.plot', 'pylab.plot', (['t1', 'x', '"""*"""'], {}), "(t1, x, '*')\n", (7191, 7203), False, 'import pylab\n'), ((7208, 7230), 'pylab.plot', 'pylab.plot', (['t2', 'y', '"""o"""'], {}), "(t2, y, 'o')\n", (7218, 7230), False, 'import pylab\n'), ((7315, 7354), 'pylab.legend', 'pylab.legend', (["('original', 'resampled')"], {}), "(('original', 'resampled'))\n", (7327, 7354), False, 'import pylab\n'), ((7358, 7378), 'pylab.xlabel', 'pylab.xlabel', (['"""Time"""'], {}), "('Time')\n", (7370, 7378), False, 'import pylab\n'), ((7477, 7491), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (7489, 7491), False, 'import pylab\n'), ((7496, 7518), 'pylab.subplot', 'pylab.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (7509, 7518), False, 'import pylab\n'), ((7523, 7565), 'pylab.title', 'pylab.title', (['"""Edge Effects Not Noticeable"""'], {}), "('Edge Effects Not Noticeable')\n", (7534, 7565), False, 'import pylab\n'), ((7664, 7703), 'pylab.legend', 'pylab.legend', (["('original', 'resampled')"], {}), "(('original', 'resampled'))\n", (7676, 7703), False, 'import pylab\n'), ((7800, 7822), 'pylab.subplot', 'pylab.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (7813, 7822), False, 'import pylab\n'), ((7921, 7964), 'pylab.title', 'pylab.title', (['"""Edge Effects Very Noticeable"""'], {}), "('Edge Effects Very Noticeable')\n", (7932, 7964), False, 'import pylab\n'), ((7969, 8008), 'pylab.legend', 'pylab.legend', (["('original', 'resampled')"], {}), "(('original', 'resampled'))\n", (7981, 8008), False, 'import pylab\n'), ((8014, 8026), 'pylab.show', 'pylab.show', ([], {}), '()\n', (8024, 8026), False, 'import pylab\n'), ((1477, 1502), 'scipy.signal.firwin', 'signal.firwin', (['n', '(1.0 / r)'], {}), '(n, 1.0 / r)\n', (1490, 1502), False, 'from scipy import signal\n'), ((1527, 1550), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 's'], {}), '(b, a, s)\n', (1541, 1550), False, 'from scipy import signal\n'), ((1621, 1652), 'scipy.signal.cheby1', 'signal.cheby1', (['n', '(0.05)', '(0.8 / r)'], {}), '(n, 0.05, 0.8 / r)\n', (1634, 1652), False, 'from scipy import signal\n'), ((1663, 1687), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 's'], {}), '(b, a, s)\n', (1678, 1687), False, 'from scipy import signal\n'), ((3731, 3791), 'numpy.ceil', 'numpy.ceil', (['((rejection_db - 8.0) / (28.714 * roll_off_width))'], {}), '((rejection_db - 8.0) / (28.714 * roll_off_width))\n', (3741, 3791), False, 'import numpy\n'), ((3832, 3855), 'numpy.arange', 'numpy.arange', (['(-l)', '(l + 1)'], {}), '(-l, l + 1)\n', (3844, 3855), False, 'import numpy\n'), ((4064, 4096), 'scipy.signal.kaiser_beta', 'signal.kaiser_beta', (['rejection_db'], {}), '(rejection_db)\n', (4082, 4096), False, 'from scipy import signal\n'), ((5384, 5395), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (5392, 5395), False, 'import sys\n'), ((5854, 5886), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * 30 * t)'], {}), '(2 * numpy.pi * 30 * t)\n', (5863, 5886), False, 'import numpy\n'), ((5883, 5915), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * 60 * t)'], {}), '(2 * numpy.pi * 60 * t)\n', (5892, 5915), False, 'import numpy\n'), ((6237, 6269), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * 30 * t)'], {}), '(2 * numpy.pi * 30 * t)\n', (6246, 6269), False, 'import numpy\n'), ((6266, 6298), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * 60 * t)'], {}), '(2 * numpy.pi * 60 * t)\n', (6275, 6298), False, 'import numpy\n'), ((7246, 7275), 'numpy.arange', 'numpy.arange', (['(-0.5)', '(1.5)', '(0.01)'], {}), '(-0.5, 1.5, 0.01)\n', (7258, 7275), False, 'import numpy\n'), ((7276, 7305), 'numpy.arange', 'numpy.arange', (['(-0.5)', '(1.5)', '(0.01)'], {}), '(-0.5, 1.5, 0.01)\n', (7288, 7305), False, 'import numpy\n'), ((3899, 3936), 'numpy.sinc', 'numpy.sinc', (['(2 * stopband_cutoff_f * t)'], {}), '(2 * stopband_cutoff_f * t)\n', (3909, 3936), False, 'import numpy\n'), ((4159, 4188), 'numpy.kaiser', 'numpy.kaiser', (['(2 * l + 1)', 'beta'], {}), '(2 * l + 1, beta)\n', (4171, 4188), False, 'import numpy\n'), ((4355, 4370), 'numpy.mod', 'numpy.mod', (['l', 'q'], {}), '(l, q)\n', (4364, 4370), False, 'import numpy\n'), ((4464, 4518), 'numpy.ceil', 'numpy.ceil', (['(((ls - 1) * p + nz_pre + lh + nz_post) / q)'], {}), '(((ls - 1) * p + nz_pre + lh + nz_post) / q)\n', (4474, 4518), False, 'import numpy\n'), ((7406, 7425), 'numpy.arange', 'numpy.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (7418, 7425), False, 'import numpy\n'), ((7426, 7448), 'numpy.arange', 'numpy.arange', (['(9)', '(0)', '(-1)'], {}), '(9, 0, -1)\n', (7438, 7448), False, 'import numpy\n'), ((7581, 7597), 'numpy.arange', 'numpy.arange', (['(19)'], {}), '(19)\n', (7593, 7597), False, 'import numpy\n'), ((7726, 7749), 'numpy.arange', 'numpy.arange', (['(10)', '(0)', '(-1)'], {}), '(10, 0, -1)\n', (7738, 7749), False, 'import numpy\n'), ((7751, 7770), 'numpy.arange', 'numpy.arange', (['(2)', '(11)'], {}), '(2, 11)\n', (7763, 7770), False, 'import numpy\n'), ((7838, 7854), 'numpy.arange', 'numpy.arange', (['(19)'], {}), '(19)\n', (7850, 7854), False, 'import numpy\n'), ((575, 593), 'numpy.zeros', 'numpy.zeros', (['(n - 1)'], {}), '(n - 1)\n', (586, 593), False, 'import numpy\n'), ((7624, 7640), 'numpy.arange', 'numpy.arange', (['(29)'], {}), '(29)\n', (7636, 7640), False, 'import numpy\n'), ((7881, 7897), 'numpy.arange', 'numpy.arange', (['(29)'], {}), '(29)\n', (7893, 7897), False, 'import numpy\n')] |
import numpy as np
event_t = np.dtype(np.uint64)
def multiplyNEventArray(data,multiplier) :
return np.tile(data,multiplier)
| [
"numpy.tile",
"numpy.dtype"
] | [((30, 49), 'numpy.dtype', 'np.dtype', (['np.uint64'], {}), '(np.uint64)\n', (38, 49), True, 'import numpy as np\n'), ((105, 130), 'numpy.tile', 'np.tile', (['data', 'multiplier'], {}), '(data, multiplier)\n', (112, 130), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""error list
* status code and error list
"""
import numpy as np
# Status Code
# 2xx Success
DONE = 200
DONE_IN_SD = 201
# 3xx Temporary Unable
TMP_DONE_IN_SD = 301
TMP_INCOMPLETE_IN_SD = 311
TMP_ANALYZE_TIMEOUT = 313
TMP_QUEUE_TIMEOUT = 314
TMP_CANT_GET_HD = 322
TMP_UNEXPECTED = 399
# 4xx Confirmed Failure
ERR_INCOMPLETE_IN_SD = 411
ERR_ANALYZE_TIMEOUT = 413
ERR_QUEUE_TIMEOUT = 414
ERR_INCOMPLETE_IN_HD = 420
ERR_CANT_GET_HD = 422
ERR_BAD_URL = 423
ERR_BAD_LENGTH = 424
ERR_BAD_RESOLUTION = 425
ERR_COPYRIGHTED_CONTENT = 426
ERR_PRIVATE_DELETED_CONTENT = 427
ERR_UNAVAILABLE_CONTENT = 428
ERR_PERM_UNEXPECTED = 499
# 5xx API Request Failure
ERR_APP_SERVER_URL = 520
ERR_APP_SERVER_HTTP = 521
ERR_BAD_REQ = 532
ERR_BAD_TOKEN = 533
ERR_SERVICE_UNAVAILABLE = 544
ERR_REQ_UNEXPECTED = 599
error_list = [
[DONE, "OK", "(cfm) OK"],
[DONE_IN_SD, "SD画質での解析です。", "(cfm) OK, but analyze in SD(360p)"],
[TMP_DONE_IN_SD, "SD画質での解析です。5分以上経過後に再度解析を試みられます。", "(tmp) OK, but analyze in SD(360p)"],
[TMP_INCOMPLETE_IN_SD, "SD画質での解析に失敗しました。5分以上経過後に再度解析を試みられます。",
"(tmp) Failed to analyze in SD(360p)"],
[TMP_CANT_GET_HD, "動画の取得に失敗しました。5分以上経過後に再度解析を試みられます。", "(tmp) Failed to get Movie"],
[TMP_ANALYZE_TIMEOUT, "解析がタイムアウトしました。5分以上経過後に再度解析を試みられます。", "(tmp) Analyze timeout"],
[TMP_QUEUE_TIMEOUT, "解析待機中にタイムアウトしました。5分以上経過後に再度解析を試みられます。", "(tmp) Queue timeout"],
[TMP_UNEXPECTED, "一時的に解析出来ません。5分以上経過後に再度解析を試みられます。", "(tmp) Unexpected error"],
[ERR_INCOMPLETE_IN_SD, "解析出来ない動画です。", "(cfm) Failed to analyze in SD(360p)"],
[ERR_ANALYZE_TIMEOUT, "動画の解析中にタイムアウトしました。", "(cfm) Analyze timeout"],
[ERR_QUEUE_TIMEOUT, "動画の解析待ち中にタイムアウトしました。", "(cfm) Queue timeout"],
[ERR_INCOMPLETE_IN_HD, "TLが存在しない動画です。", "(cfm) No Timeline movie in HD(720p)"],
[ERR_CANT_GET_HD, "解析出来ない動画です。", "(cfm) Failed to get Movie"],
[ERR_BAD_URL, "URLはhttps://www.youtube.com/watch?v=...の形式でお願いします。", "Bad movie url"],
[ERR_BAD_LENGTH, "動画時間が長すぎるため、解析に対応しておりません。", "(cfm) Too long movie length"],
[ERR_BAD_RESOLUTION, "非対応の解像度です。720pの一部の動画に対応しております。", "(cfm) Bad movie resolution"],
[ERR_COPYRIGHTED_CONTENT, "著作権で保護されているため、動画の取得ができません。", "(cfm) Can not download movie"],
[ERR_PRIVATE_DELETED_CONTENT, "非公開または削除されたため、動画の取得ができません。", "(cfm) Can not download movie"],
[ERR_UNAVAILABLE_CONTENT, "ライブ配信または現在公開されていないため、動画の取得ができません。",
"(cfm) Can not download movie"],
[ERR_PERM_UNEXPECTED, "解析出来ない動画です。", "(cfm) Unexpected error"],
[ERR_APP_SERVER_URL, "解析サーバーへのエラーが発生しました。", "Server error"],
[ERR_APP_SERVER_HTTP, "解析サーバーへのエラーが発生しました。", "Server error"],
[ERR_BAD_REQ, "必須パラメータがありません。", "No url on rest"],
[ERR_BAD_TOKEN, "不正なトークンです。 Twitter @PriLog_R までご連絡下さい。",
"Invalid token, please contact me on Twitter @PriLog_R"],
[ERR_SERVICE_UNAVAILABLE, "申し訳ありません。現在サーバー側の問題により解析ができません。", "Server error"],
[ERR_REQ_UNEXPECTED, "API処理中に予期しない問題が起きました。 Twitter @PriLog_R までご連絡下さい。",
"(cfm) Unexpected error"]
]
def get_error_message(error_type, language=1):
"""get error_message with args
Args:
error_type (int): error_type
language (int): message language 1: Japanese, 2: English
Returns:
error (str): error_message get from error_list
"""
arr = np.array(error_list)
pos = np.argwhere(arr == str(error_type))
try:
error = error_list[pos[0][0]][language]
except IndexError:
# エラーステータス改訂時に過去のキャッシュ参照した場合または予期しないエラーの場合
error = error_list[-1][language]
return error
| [
"numpy.array"
] | [((3295, 3315), 'numpy.array', 'np.array', (['error_list'], {}), '(error_list)\n', (3303, 3315), True, 'import numpy as np\n')] |
"""This module provides the analytical solution for computing the hessian matrix of our
loglikelihood function
"""
import numpy as np
from scipy.stats import norm
def compute_hessian(x0, X1, X0, Z1, Z0, Y1, Y0):
"""This function wraps all subroutines and returns the hessian matrix of our
log-likelihood function
"""
# def auxiliary parameters
num_obs = X1.shape[0] + X0.shape[0]
n_col_X1 = X1.shape[1]
n_col_X0 = X0.shape[1]
n_col_Z = Z1.shape[1]
# parameters
num_col_X1X0 = n_col_X1 + n_col_X0
num_col_X1X0Z1 = num_col_X1X0 + n_col_Z
beta1, beta0, gamma = (
x0[:n_col_X1],
x0[n_col_X1:num_col_X1X0],
x0[num_col_X1X0:-4],
)
sd1, sd0, rho1v, rho0v = x0[-4], x0[-2], x0[-3], x0[-1]
# aux_params
nu1 = (Y1 - np.dot(beta1, X1.T)) / sd1
lambda1 = (np.dot(gamma, Z1.T) - rho1v * nu1) / (np.sqrt(1 - rho1v ** 2))
nu0 = (Y0 - np.dot(beta0, X0.T)) / sd0
lambda0 = (np.dot(gamma, Z0.T) - rho0v * nu0) / (np.sqrt(1 - rho0v ** 2))
eta1 = (
-lambda1 * norm.pdf(lambda1) * norm.cdf(lambda1) - norm.pdf(lambda1) ** 2
) / (norm.cdf(lambda1) ** 2)
eta0 = (
lambda0 * norm.pdf(lambda0) * (1 - norm.cdf(lambda0)) - norm.pdf(lambda0) ** 2
) / (1 - norm.cdf(lambda0)) ** 2
# combinations of obs
X1X1 = np.einsum("ij, i ->ij", X1, eta1).T @ X1
X1Z1 = np.einsum("ij, i ->ij", X1, eta1).T @ Z1
X0X0 = np.einsum("ij, i ->ij", X0, eta0).T @ X0
X0Z0 = np.einsum("ij, i ->ij", X0, eta0).T @ Z0
Z1Z1 = np.einsum("ij, i ->ij", Z1, eta1).T @ Z1
Z0Z0 = np.einsum("ij, i ->ij", Z0, eta0).T @ Z0
# beginning with derivations of beta1
derv_beta1 = calc_hess_beta1(
X1X1, X1Z1, X1, sd1, rho1v, nu1, lambda1, eta1, n_col_X1, n_col_X0, num_obs
)
derv_beta0 = calc_hess_beta0(
X0X0, X0Z0, X0, sd0, rho0v, nu0, lambda0, eta0, n_col_X1, n_col_X0, num_obs
)
derv_gamma = calc_hess_gamma(
Z1Z1,
Z0Z0,
Z1,
X1,
Z0,
X0,
sd0,
sd1,
rho0v,
rho1v,
eta1,
eta0,
nu0,
nu1,
lambda0,
lambda1,
num_col_X1X0,
num_obs,
)
derv_dist = calc_hess_dist(
Z1,
Z0,
gamma,
sd1,
sd0,
rho1v,
rho0v,
lambda1,
lambda0,
nu1,
nu0,
eta1,
eta0,
num_col_X1X0Z1,
num_obs,
)
# convert results to a symmetric hessian matrix
hessian_upper = np.triu(
np.concatenate((derv_beta1, derv_beta0, derv_gamma, derv_dist), axis=0)
)
aux = hessian_upper.copy()
for i in range(hessian_upper.shape[0]):
hessian_upper[:, i][i + 1 :] = hessian_upper[i][i + 1 :]
return hessian_upper, aux
def calc_hess_beta1(
X1X1, X1Z1, X1, sd1, rho1v, nu1, lambda1, eta1, n_col_X1, n_col_X0, num_obs
):
"""This function computes the derivatives of the first order conditions of beta1 wrt
all other parameters.
"""
# define some auxiliary variables
rho_aux1 = lambda1 * rho1v / (1 - rho1v ** 2) - nu1 / (1 - rho1v ** 2) ** 0.5
rho_aux2 = rho1v ** 2 / ((1 - rho1v ** 2) ** (3 / 2)) + 1 / (1 - rho1v ** 2) ** 0.5
sd_aux1 = rho1v ** 2 / (1 - rho1v ** 2)
sd_aux2 = rho1v / np.sqrt(1 - rho1v ** 2)
# derivation wrt beta1
der_b1_beta1 = -(
X1X1 * (rho1v ** 2 / (1 - rho1v ** 2)) * 1 / sd1 ** 2 - X1.T @ X1 / sd1 ** 2
)
# add zeros for derv beta 0
der_b1_beta1 = np.concatenate(
(der_b1_beta1, np.zeros((n_col_X1, n_col_X0))), axis=1
)
# derivation wrt gamma
der_b1_gamma = -(X1Z1 * rho1v / (sd1 * (1 - rho1v ** 2)))
der_b1_gamma = np.concatenate((der_b1_beta1, der_b1_gamma), axis=1)
# derv wrt sigma 1
der_b1_sd = (
-1
/ sd1
* (
(
(eta1 * sd_aux1 * nu1 - norm.pdf(lambda1) / norm.cdf(lambda1) * sd_aux2)
- 2 * nu1
)
* 1
/ sd1
)
)
# expand_dimensions and add
der_b1_sd = np.expand_dims((der_b1_sd.T @ X1), 1)
der_b1_sd = np.concatenate((der_b1_gamma, der_b1_sd), axis=1)
# derv wrt rho1
der_b1_rho = (
-(
eta1 * rho_aux1 * rho1v / ((1 - rho1v ** 2) ** 0.5)
+ norm.pdf(lambda1) / norm.cdf(lambda1) * rho_aux2
)
* 1
/ sd1
)
# expand_dimensions and add
der_b1_rho = np.expand_dims((der_b1_rho.T @ X1), 1)
der_b1_rho = np.concatenate((der_b1_sd, der_b1_rho), axis=1)
# add zeros for sigma0 and rho0
der_b1 = np.concatenate((der_b1_rho, np.zeros((n_col_X1, 2))), axis=1)
der_beta1 = der_b1 / num_obs
return der_beta1
def calc_hess_beta0(
X0X0, X0Z0, X0, sd0, rho0v, nu0, lambda0, eta0, n_col_X1, n_col_X0, num_obs
):
"""This function computes the derivatives of the first order conditions of beta0 wrt
all other parameters.
"""
# define some aux_vars
rho_aux1 = lambda0 * rho0v / (1 - rho0v ** 2) - nu0 / (1 - rho0v ** 2) ** 0.5
rho_aux2 = rho0v ** 2 / ((1 - rho0v ** 2) ** (3 / 2)) + 1 / (1 - rho0v ** 2) ** 0.5
sd_aux1 = rho0v ** 2 / (1 - rho0v ** 2)
sd_aux2 = rho0v / (np.sqrt(1 - rho0v ** 2))
# add zeros for beta0
der_b0_beta1 = np.zeros((n_col_X1, n_col_X0))
# beta0
der_b0_beta0 = (
-(X0X0 * (rho0v ** 2 / (1 - rho0v ** 2)) * 1 / sd0 ** 2) + X0.T @ X0 / sd0 ** 2
)
der_b0_beta0 = np.concatenate((der_b0_beta1, der_b0_beta0), axis=1)
# gamma
der_b0_gamma = -X0Z0 * rho0v / (1 - rho0v ** 2) * 1 / sd0
der_b0_gamma = np.concatenate((der_b0_beta0, der_b0_gamma), axis=1)
# add zeros for sigma1 and rho1
der_b0_gamma = np.concatenate((der_b0_gamma, np.zeros((n_col_X0, 2))), axis=1)
# sigma
der_b0_sd = (
-(
eta0 * nu0 * sd_aux1
+ norm.pdf(lambda0) / (1 - norm.cdf(lambda0)) * sd_aux2
- 2 * nu0
)
* 1
/ sd0 ** 2
)
der_b0_sd = np.expand_dims((der_b0_sd.T @ X0), 1)
der_b0_sd = np.concatenate((der_b0_gamma, der_b0_sd), axis=1)
# rho
der_b0_rho = (
(
eta0 * -rho_aux1 * (rho0v / ((1 - rho0v ** 2) ** 0.5))
+ norm.pdf(lambda0) / (1 - norm.cdf(lambda0)) * rho_aux2
)
* 1
/ sd0
)
der_b0_rho = np.expand_dims((der_b0_rho.T @ X0), 1)
der_b0_rho = np.concatenate((der_b0_sd, der_b0_rho), axis=1)
der_beta0 = der_b0_rho / num_obs
return der_beta0
def calc_hess_gamma(
Z1Z1,
Z0Z0,
Z1,
X1,
Z0,
X0,
sd0,
sd1,
rho0v,
rho1v,
eta1,
eta0,
nu0,
nu1,
lambda0,
lambda1,
num_col_X1X0,
num_obs,
):
"""This function computes the derivatives of the first order conditions of gamma wrt
all other parameters.
"""
der_gamma_beta = np.zeros((Z1.shape[1], num_col_X1X0))
der_g_gamma = -(1 / (1 - rho1v ** 2) * Z1Z1 + 1 / (1 - rho0v ** 2) * Z0Z0)
der_g_gamma = np.concatenate((der_gamma_beta, der_g_gamma), axis=1)
# sigma1
der_g_sd1 = -(
np.einsum("ij, i ->ij", Z1, eta1).T
@ np.einsum("ij, i ->ij", X1, nu1)
/ sd1
* rho1v
/ (1 - rho1v ** 2)
)[:, 0]
der_g_sd1 = np.expand_dims(der_g_sd1, 0).T
der_g_sd1 = np.concatenate((der_g_gamma, der_g_sd1), axis=1)
# rho1
aux_rho11 = np.einsum("ij, i ->ij", Z1, eta1).T @ (
lambda1 * rho1v / (1 - rho1v ** 2) - nu1 / np.sqrt(1 - rho1v ** 2)
)
aux_rho21 = Z1.T @ (norm.pdf(lambda1) / norm.cdf(lambda1))
der_g_rho1 = -aux_rho11 * 1 / (np.sqrt(1 - rho1v ** 2)) - aux_rho21 * rho1v / (
(1 - rho1v ** 2) ** (3 / 2)
)
der_g_rho1 = np.expand_dims(der_g_rho1, 0).T
der_g_rho1 = np.concatenate((der_g_sd1, der_g_rho1), axis=1)
# sigma0
der_g_sd0 = (
np.einsum("ij, i ->ij", Z0, eta0).T
@ np.einsum("ij, i ->ij", X0, nu0)
/ sd0
* rho0v
/ (1 - rho0v ** 2)
)[:, 0]
der_g_sd0 = np.expand_dims(der_g_sd0, 0).T
der_g_sd0 = np.concatenate((der_g_rho1, -der_g_sd0), axis=1)
# rho1
aux_rho10 = np.einsum("ij, i ->ij", Z0, eta0).T @ (
lambda0 * rho0v / (1 - rho0v ** 2) - nu0 / np.sqrt(1 - rho0v ** 2)
)
aux_rho20 = -Z0.T @ (norm.pdf(lambda0) / (1 - norm.cdf(lambda0)))
der_g_rho0 = aux_rho10 * 1 / (np.sqrt(1 - rho0v ** 2)) + aux_rho20 * rho0v / (
(1 - rho0v ** 2) ** (3 / 2)
)
der_g_rho0 = np.expand_dims(-der_g_rho0, 0).T
der_g_rho0 = np.concatenate((der_g_sd0, der_g_rho0), axis=1)
return der_g_rho0 / num_obs
def calc_hess_dist(
Z1,
Z0,
gamma,
sd1,
sd0,
rho1v,
rho0v,
lambda1,
lambda0,
nu1,
nu0,
eta1,
eta0,
num_col_X1X0Z1,
num_obs,
):
"""This function computes the derivatives of the first order conditions of all
distribution parameters wrt all other parameters.
"""
# aux_vars
Delta_sd1 = (
+1 / sd1
- (norm.pdf(lambda1) / norm.cdf(lambda1))
* (rho1v * nu1 / (np.sqrt(1 - rho1v ** 2) * sd1))
- nu1 ** 2 / sd1
)
Delta_sd1_der = (
nu1
/ sd1
* (
-eta1 * (rho1v ** 2 * nu1) / (1 - rho1v ** 2)
+ (norm.pdf(lambda1) / norm.cdf(lambda1)) * rho1v / np.sqrt(1 - rho1v ** 2)
+ 2 * nu1
)
)
Delta_sd0 = (
+1 / sd0
+ (norm.pdf(lambda0) / (1 - norm.cdf(lambda0)))
* (rho0v * nu0 / (np.sqrt(1 - rho0v ** 2) * sd0))
- nu0 ** 2 / sd0
)
Delta_sd0_der = (
nu0
/ sd0
* (
-eta0 * (rho0v ** 2 * nu0) / (1 - rho0v ** 2)
- (norm.pdf(lambda0) / (1 - norm.cdf(lambda0)))
* rho0v
/ np.sqrt(1 - rho0v ** 2)
+ 2 * nu0
)
)
aux_rho11 = lambda1 * rho1v / (1 - rho1v ** 2) - nu1 / np.sqrt(1 - rho1v ** 2)
aux_rho12 = 1 / (1 - rho1v ** 2) ** (3 / 2)
aux_rho_rho11 = (np.dot(gamma, Z1.T) * rho1v - nu1) / (1 - rho1v ** 2) ** (3 / 2)
aux_rho_rho12 = (
2 * np.dot(gamma, Z1.T) * rho1v ** 2 + np.dot(gamma, Z1.T) - 3 * nu1 * rho1v
) / (1 - rho1v ** 2) ** (5 / 2)
aux_rho01 = lambda0 * rho0v / (1 - rho0v ** 2) - nu0 / np.sqrt(1 - rho0v ** 2)
aux_rho02 = 1 / (1 - rho0v ** 2) ** (3 / 2)
aux_rho_rho01 = (np.dot(gamma, Z0.T) * rho0v - nu0) / (1 - rho0v ** 2) ** (3 / 2)
aux_rho_rho02 = (
2 * np.dot(gamma, Z0.T) * rho0v ** 2 + np.dot(gamma, Z0.T) - 3 * nu0 * rho0v
) / (1 - rho0v ** 2) ** (5 / 2)
# for sigma1
# wrt sd1
derv_sd1_sd1 = 1 / sd1 * (-Delta_sd1 + Delta_sd1_der)
# wrt rho1
derv_sd1_rho1 = (
1
/ sd1
* (
-eta1 * aux_rho11 * (rho1v * nu1) / (np.sqrt(1 - rho1v ** 2))
- (norm.pdf(lambda1) / norm.cdf(lambda1)) * aux_rho12 * nu1
)
)
# append values
derv_sd1 = np.append(
np.zeros(num_col_X1X0Z1), [sum(derv_sd1_sd1), sum(derv_sd1_rho1), 0, 0]
)
# for rho1
# wrt to rho1
derv_rho1v_rho1 = (
-eta1 * aux_rho11 * aux_rho_rho11
- (norm.pdf(lambda1) / norm.cdf(lambda1)) * aux_rho_rho12
)
derv_rho1 = np.append(np.zeros(num_col_X1X0Z1 + 1), [sum(derv_rho1v_rho1), 0, 0])
# for sigma0
# wrt sd0
derv_sd0_sd0 = 1 / sd0 * (-Delta_sd0 + Delta_sd0_der)
# wrt rho0
derv_sd0_rho0 = (
1
/ sd0
* (
-eta0 * aux_rho01 * (rho0v * nu0) / (np.sqrt(1 - rho0v ** 2))
+ (norm.pdf(lambda0) / (1 - norm.cdf(lambda0))) * aux_rho02 * nu0
)
)
derv_sd0 = np.append(
np.zeros(num_col_X1X0Z1 + 2), [sum(derv_sd0_sd0), sum(derv_sd0_rho0)]
)
# for rho0
derv_rho0v_rho0 = -(
eta0 * aux_rho01 * aux_rho_rho01
- (norm.pdf(lambda0) / (1 - norm.cdf(lambda0))) * aux_rho_rho02
)
derv_rho0 = np.append(np.zeros(num_col_X1X0Z1 + 3), [sum(derv_rho0v_rho0)])
derv_dist = np.stack([derv_sd1, derv_rho1, derv_sd0, derv_rho0]) / num_obs
return derv_dist
| [
"numpy.sqrt",
"numpy.stack",
"numpy.zeros",
"numpy.dot",
"numpy.einsum",
"scipy.stats.norm.pdf",
"numpy.expand_dims",
"numpy.concatenate",
"scipy.stats.norm.cdf"
] | [((3736, 3788), 'numpy.concatenate', 'np.concatenate', (['(der_b1_beta1, der_b1_gamma)'], {'axis': '(1)'}), '((der_b1_beta1, der_b1_gamma), axis=1)\n', (3750, 3788), True, 'import numpy as np\n'), ((4109, 4144), 'numpy.expand_dims', 'np.expand_dims', (['(der_b1_sd.T @ X1)', '(1)'], {}), '(der_b1_sd.T @ X1, 1)\n', (4123, 4144), True, 'import numpy as np\n'), ((4163, 4212), 'numpy.concatenate', 'np.concatenate', (['(der_b1_gamma, der_b1_sd)'], {'axis': '(1)'}), '((der_b1_gamma, der_b1_sd), axis=1)\n', (4177, 4212), True, 'import numpy as np\n'), ((4483, 4519), 'numpy.expand_dims', 'np.expand_dims', (['(der_b1_rho.T @ X1)', '(1)'], {}), '(der_b1_rho.T @ X1, 1)\n', (4497, 4519), True, 'import numpy as np\n'), ((4539, 4586), 'numpy.concatenate', 'np.concatenate', (['(der_b1_sd, der_b1_rho)'], {'axis': '(1)'}), '((der_b1_sd, der_b1_rho), axis=1)\n', (4553, 4586), True, 'import numpy as np\n'), ((5319, 5349), 'numpy.zeros', 'np.zeros', (['(n_col_X1, n_col_X0)'], {}), '((n_col_X1, n_col_X0))\n', (5327, 5349), True, 'import numpy as np\n'), ((5497, 5549), 'numpy.concatenate', 'np.concatenate', (['(der_b0_beta1, der_b0_beta0)'], {'axis': '(1)'}), '((der_b0_beta1, der_b0_beta0), axis=1)\n', (5511, 5549), True, 'import numpy as np\n'), ((5643, 5695), 'numpy.concatenate', 'np.concatenate', (['(der_b0_beta0, der_b0_gamma)'], {'axis': '(1)'}), '((der_b0_beta0, der_b0_gamma), axis=1)\n', (5657, 5695), True, 'import numpy as np\n'), ((6044, 6079), 'numpy.expand_dims', 'np.expand_dims', (['(der_b0_sd.T @ X0)', '(1)'], {}), '(der_b0_sd.T @ X0, 1)\n', (6058, 6079), True, 'import numpy as np\n'), ((6098, 6147), 'numpy.concatenate', 'np.concatenate', (['(der_b0_gamma, der_b0_sd)'], {'axis': '(1)'}), '((der_b0_gamma, der_b0_sd), axis=1)\n', (6112, 6147), True, 'import numpy as np\n'), ((6383, 6419), 'numpy.expand_dims', 'np.expand_dims', (['(der_b0_rho.T @ X0)', '(1)'], {}), '(der_b0_rho.T @ X0, 1)\n', (6397, 6419), True, 'import numpy as np\n'), ((6439, 6486), 'numpy.concatenate', 'np.concatenate', (['(der_b0_sd, der_b0_rho)'], {'axis': '(1)'}), '((der_b0_sd, der_b0_rho), axis=1)\n', (6453, 6486), True, 'import numpy as np\n'), ((6905, 6942), 'numpy.zeros', 'np.zeros', (['(Z1.shape[1], num_col_X1X0)'], {}), '((Z1.shape[1], num_col_X1X0))\n', (6913, 6942), True, 'import numpy as np\n'), ((7041, 7094), 'numpy.concatenate', 'np.concatenate', (['(der_gamma_beta, der_g_gamma)'], {'axis': '(1)'}), '((der_gamma_beta, der_g_gamma), axis=1)\n', (7055, 7094), True, 'import numpy as np\n'), ((7347, 7395), 'numpy.concatenate', 'np.concatenate', (['(der_g_gamma, der_g_sd1)'], {'axis': '(1)'}), '((der_g_gamma, der_g_sd1), axis=1)\n', (7361, 7395), True, 'import numpy as np\n'), ((7801, 7848), 'numpy.concatenate', 'np.concatenate', (['(der_g_sd1, der_g_rho1)'], {'axis': '(1)'}), '((der_g_sd1, der_g_rho1), axis=1)\n', (7815, 7848), True, 'import numpy as np\n'), ((8100, 8148), 'numpy.concatenate', 'np.concatenate', (['(der_g_rho1, -der_g_sd0)'], {'axis': '(1)'}), '((der_g_rho1, -der_g_sd0), axis=1)\n', (8114, 8148), True, 'import numpy as np\n'), ((8561, 8608), 'numpy.concatenate', 'np.concatenate', (['(der_g_sd0, der_g_rho0)'], {'axis': '(1)'}), '((der_g_sd0, der_g_rho0), axis=1)\n', (8575, 8608), True, 'import numpy as np\n'), ((880, 903), 'numpy.sqrt', 'np.sqrt', (['(1 - rho1v ** 2)'], {}), '(1 - rho1v ** 2)\n', (887, 903), True, 'import numpy as np\n'), ((1001, 1024), 'numpy.sqrt', 'np.sqrt', (['(1 - rho0v ** 2)'], {}), '(1 - rho0v ** 2)\n', (1008, 1024), True, 'import numpy as np\n'), ((2574, 2645), 'numpy.concatenate', 'np.concatenate', (['(derv_beta1, derv_beta0, derv_gamma, derv_dist)'], {'axis': '(0)'}), '((derv_beta1, derv_beta0, derv_gamma, derv_dist), axis=0)\n', (2588, 2645), True, 'import numpy as np\n'), ((3327, 3350), 'numpy.sqrt', 'np.sqrt', (['(1 - rho1v ** 2)'], {}), '(1 - rho1v ** 2)\n', (3334, 3350), True, 'import numpy as np\n'), ((5248, 5271), 'numpy.sqrt', 'np.sqrt', (['(1 - rho0v ** 2)'], {}), '(1 - rho0v ** 2)\n', (5255, 5271), True, 'import numpy as np\n'), ((7300, 7328), 'numpy.expand_dims', 'np.expand_dims', (['der_g_sd1', '(0)'], {}), '(der_g_sd1, 0)\n', (7314, 7328), True, 'import numpy as np\n'), ((7752, 7781), 'numpy.expand_dims', 'np.expand_dims', (['der_g_rho1', '(0)'], {}), '(der_g_rho1, 0)\n', (7766, 7781), True, 'import numpy as np\n'), ((8053, 8081), 'numpy.expand_dims', 'np.expand_dims', (['der_g_sd0', '(0)'], {}), '(der_g_sd0, 0)\n', (8067, 8081), True, 'import numpy as np\n'), ((8511, 8541), 'numpy.expand_dims', 'np.expand_dims', (['(-der_g_rho0)', '(0)'], {}), '(-der_g_rho0, 0)\n', (8525, 8541), True, 'import numpy as np\n'), ((10970, 10994), 'numpy.zeros', 'np.zeros', (['num_col_X1X0Z1'], {}), '(num_col_X1X0Z1)\n', (10978, 10994), True, 'import numpy as np\n'), ((11246, 11274), 'numpy.zeros', 'np.zeros', (['(num_col_X1X0Z1 + 1)'], {}), '(num_col_X1X0Z1 + 1)\n', (11254, 11274), True, 'import numpy as np\n'), ((11671, 11699), 'numpy.zeros', 'np.zeros', (['(num_col_X1X0Z1 + 2)'], {}), '(num_col_X1X0Z1 + 2)\n', (11679, 11699), True, 'import numpy as np\n'), ((11933, 11961), 'numpy.zeros', 'np.zeros', (['(num_col_X1X0Z1 + 3)'], {}), '(num_col_X1X0Z1 + 3)\n', (11941, 11961), True, 'import numpy as np\n'), ((12004, 12056), 'numpy.stack', 'np.stack', (['[derv_sd1, derv_rho1, derv_sd0, derv_rho0]'], {}), '([derv_sd1, derv_rho1, derv_sd0, derv_rho0])\n', (12012, 12056), True, 'import numpy as np\n'), ((800, 819), 'numpy.dot', 'np.dot', (['beta1', 'X1.T'], {}), '(beta1, X1.T)\n', (806, 819), True, 'import numpy as np\n'), ((842, 861), 'numpy.dot', 'np.dot', (['gamma', 'Z1.T'], {}), '(gamma, Z1.T)\n', (848, 861), True, 'import numpy as np\n'), ((921, 940), 'numpy.dot', 'np.dot', (['beta0', 'X0.T'], {}), '(beta0, X0.T)\n', (927, 940), True, 'import numpy as np\n'), ((963, 982), 'numpy.dot', 'np.dot', (['gamma', 'Z0.T'], {}), '(gamma, Z0.T)\n', (969, 982), True, 'import numpy as np\n'), ((1131, 1148), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda1'], {}), '(lambda1)\n', (1139, 1148), False, 'from scipy.stats import norm\n'), ((1329, 1362), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'X1', 'eta1'], {}), "('ij, i ->ij', X1, eta1)\n", (1338, 1362), True, 'import numpy as np\n'), ((1381, 1414), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'X1', 'eta1'], {}), "('ij, i ->ij', X1, eta1)\n", (1390, 1414), True, 'import numpy as np\n'), ((1434, 1467), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'X0', 'eta0'], {}), "('ij, i ->ij', X0, eta0)\n", (1443, 1467), True, 'import numpy as np\n'), ((1486, 1519), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'X0', 'eta0'], {}), "('ij, i ->ij', X0, eta0)\n", (1495, 1519), True, 'import numpy as np\n'), ((1539, 1572), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'Z1', 'eta1'], {}), "('ij, i ->ij', Z1, eta1)\n", (1548, 1572), True, 'import numpy as np\n'), ((1591, 1624), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'Z0', 'eta0'], {}), "('ij, i ->ij', Z0, eta0)\n", (1600, 1624), True, 'import numpy as np\n'), ((3581, 3611), 'numpy.zeros', 'np.zeros', (['(n_col_X1, n_col_X0)'], {}), '((n_col_X1, n_col_X0))\n', (3589, 3611), True, 'import numpy as np\n'), ((4665, 4688), 'numpy.zeros', 'np.zeros', (['(n_col_X1, 2)'], {}), '((n_col_X1, 2))\n', (4673, 4688), True, 'import numpy as np\n'), ((5782, 5805), 'numpy.zeros', 'np.zeros', (['(n_col_X0, 2)'], {}), '((n_col_X0, 2))\n', (5790, 5805), True, 'import numpy as np\n'), ((7424, 7457), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'Z1', 'eta1'], {}), "('ij, i ->ij', Z1, eta1)\n", (7433, 7457), True, 'import numpy as np\n'), ((7569, 7586), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda1'], {}), '(lambda1)\n', (7577, 7586), False, 'from scipy.stats import norm\n'), ((7589, 7606), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda1'], {}), '(lambda1)\n', (7597, 7606), False, 'from scipy.stats import norm\n'), ((7644, 7667), 'numpy.sqrt', 'np.sqrt', (['(1 - rho1v ** 2)'], {}), '(1 - rho1v ** 2)\n', (7651, 7667), True, 'import numpy as np\n'), ((8177, 8210), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'Z0', 'eta0'], {}), "('ij, i ->ij', Z0, eta0)\n", (8186, 8210), True, 'import numpy as np\n'), ((8323, 8340), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda0'], {}), '(lambda0)\n', (8331, 8340), False, 'from scipy.stats import norm\n'), ((8403, 8426), 'numpy.sqrt', 'np.sqrt', (['(1 - rho0v ** 2)'], {}), '(1 - rho0v ** 2)\n', (8410, 8426), True, 'import numpy as np\n'), ((9924, 9947), 'numpy.sqrt', 'np.sqrt', (['(1 - rho1v ** 2)'], {}), '(1 - rho1v ** 2)\n', (9931, 9947), True, 'import numpy as np\n'), ((10286, 10309), 'numpy.sqrt', 'np.sqrt', (['(1 - rho0v ** 2)'], {}), '(1 - rho0v ** 2)\n', (10293, 10309), True, 'import numpy as np\n'), ((1079, 1096), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda1'], {}), '(lambda1)\n', (1087, 1096), False, 'from scipy.stats import norm\n'), ((1099, 1116), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda1'], {}), '(lambda1)\n', (1107, 1116), False, 'from scipy.stats import norm\n'), ((1232, 1249), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda0'], {}), '(lambda0)\n', (1240, 1249), False, 'from scipy.stats import norm\n'), ((1268, 1285), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda0'], {}), '(lambda0)\n', (1276, 1285), False, 'from scipy.stats import norm\n'), ((7515, 7538), 'numpy.sqrt', 'np.sqrt', (['(1 - rho1v ** 2)'], {}), '(1 - rho1v ** 2)\n', (7522, 7538), True, 'import numpy as np\n'), ((8268, 8291), 'numpy.sqrt', 'np.sqrt', (['(1 - rho0v ** 2)'], {}), '(1 - rho0v ** 2)\n', (8275, 8291), True, 'import numpy as np\n'), ((8348, 8365), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda0'], {}), '(lambda0)\n', (8356, 8365), False, 'from scipy.stats import norm\n'), ((10018, 10037), 'numpy.dot', 'np.dot', (['gamma', 'Z1.T'], {}), '(gamma, Z1.T)\n', (10024, 10037), True, 'import numpy as np\n'), ((10152, 10171), 'numpy.dot', 'np.dot', (['gamma', 'Z1.T'], {}), '(gamma, Z1.T)\n', (10158, 10171), True, 'import numpy as np\n'), ((10380, 10399), 'numpy.dot', 'np.dot', (['gamma', 'Z0.T'], {}), '(gamma, Z0.T)\n', (10386, 10399), True, 'import numpy as np\n'), ((10514, 10533), 'numpy.dot', 'np.dot', (['gamma', 'Z0.T'], {}), '(gamma, Z0.T)\n', (10520, 10533), True, 'import numpy as np\n'), ((10802, 10825), 'numpy.sqrt', 'np.sqrt', (['(1 - rho1v ** 2)'], {}), '(1 - rho1v ** 2)\n', (10809, 10825), True, 'import numpy as np\n'), ((11159, 11176), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda1'], {}), '(lambda1)\n', (11167, 11176), False, 'from scipy.stats import norm\n'), ((11179, 11196), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda1'], {}), '(lambda1)\n', (11187, 11196), False, 'from scipy.stats import norm\n'), ((11518, 11541), 'numpy.sqrt', 'np.sqrt', (['(1 - rho0v ** 2)'], {}), '(1 - rho0v ** 2)\n', (11525, 11541), True, 'import numpy as np\n'), ((1059, 1076), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda1'], {}), '(lambda1)\n', (1067, 1076), False, 'from scipy.stats import norm\n'), ((1186, 1203), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda0'], {}), '(lambda0)\n', (1194, 1203), False, 'from scipy.stats import norm\n'), ((1211, 1228), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda0'], {}), '(lambda0)\n', (1219, 1228), False, 'from scipy.stats import norm\n'), ((9037, 9054), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda1'], {}), '(lambda1)\n', (9045, 9054), False, 'from scipy.stats import norm\n'), ((9057, 9074), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda1'], {}), '(lambda1)\n', (9065, 9074), False, 'from scipy.stats import norm\n'), ((9347, 9370), 'numpy.sqrt', 'np.sqrt', (['(1 - rho1v ** 2)'], {}), '(1 - rho1v ** 2)\n', (9354, 9370), True, 'import numpy as np\n'), ((9456, 9473), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda0'], {}), '(lambda0)\n', (9464, 9473), False, 'from scipy.stats import norm\n'), ((9802, 9825), 'numpy.sqrt', 'np.sqrt', (['(1 - rho0v ** 2)'], {}), '(1 - rho0v ** 2)\n', (9809, 9825), True, 'import numpy as np\n'), ((11840, 11857), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda0'], {}), '(lambda0)\n', (11848, 11857), False, 'from scipy.stats import norm\n'), ((6269, 6286), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda0'], {}), '(lambda0)\n', (6277, 6286), False, 'from scipy.stats import norm\n'), ((7935, 7967), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'X0', 'nu0'], {}), "('ij, i ->ij', X0, nu0)\n", (7944, 7967), True, 'import numpy as np\n'), ((9102, 9125), 'numpy.sqrt', 'np.sqrt', (['(1 - rho1v ** 2)'], {}), '(1 - rho1v ** 2)\n', (9109, 9125), True, 'import numpy as np\n'), ((9481, 9498), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda0'], {}), '(lambda0)\n', (9489, 9498), False, 'from scipy.stats import norm\n'), ((9527, 9550), 'numpy.sqrt', 'np.sqrt', (['(1 - rho0v ** 2)'], {}), '(1 - rho0v ** 2)\n', (9534, 9550), True, 'import numpy as np\n'), ((10117, 10136), 'numpy.dot', 'np.dot', (['gamma', 'Z1.T'], {}), '(gamma, Z1.T)\n', (10123, 10136), True, 'import numpy as np\n'), ((10479, 10498), 'numpy.dot', 'np.dot', (['gamma', 'Z0.T'], {}), '(gamma, Z0.T)\n', (10485, 10498), True, 'import numpy as np\n'), ((10842, 10859), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda1'], {}), '(lambda1)\n', (10850, 10859), False, 'from scipy.stats import norm\n'), ((10862, 10879), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda1'], {}), '(lambda1)\n', (10870, 10879), False, 'from scipy.stats import norm\n'), ((11558, 11575), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda0'], {}), '(lambda0)\n', (11566, 11575), False, 'from scipy.stats import norm\n'), ((11865, 11882), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda0'], {}), '(lambda0)\n', (11873, 11882), False, 'from scipy.stats import norm\n'), ((4342, 4359), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda1'], {}), '(lambda1)\n', (4350, 4359), False, 'from scipy.stats import norm\n'), ((4362, 4379), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda1'], {}), '(lambda1)\n', (4370, 4379), False, 'from scipy.stats import norm\n'), ((6294, 6311), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda0'], {}), '(lambda0)\n', (6302, 6311), False, 'from scipy.stats import norm\n'), ((7182, 7214), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'X1', 'nu1'], {}), "('ij, i ->ij', X1, nu1)\n", (7191, 7214), True, 'import numpy as np\n'), ((7889, 7922), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'Z0', 'eta0'], {}), "('ij, i ->ij', Z0, eta0)\n", (7898, 7922), True, 'import numpy as np\n'), ((9298, 9315), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda1'], {}), '(lambda1)\n', (9306, 9315), False, 'from scipy.stats import norm\n'), ((9318, 9335), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda1'], {}), '(lambda1)\n', (9326, 9335), False, 'from scipy.stats import norm\n'), ((9723, 9740), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda0'], {}), '(lambda0)\n', (9731, 9740), False, 'from scipy.stats import norm\n'), ((11583, 11600), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda0'], {}), '(lambda0)\n', (11591, 11600), False, 'from scipy.stats import norm\n'), ((3921, 3938), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda1'], {}), '(lambda1)\n', (3929, 3938), False, 'from scipy.stats import norm\n'), ((3941, 3958), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda1'], {}), '(lambda1)\n', (3949, 3958), False, 'from scipy.stats import norm\n'), ((5905, 5922), 'scipy.stats.norm.pdf', 'norm.pdf', (['lambda0'], {}), '(lambda0)\n', (5913, 5922), False, 'from scipy.stats import norm\n'), ((7136, 7169), 'numpy.einsum', 'np.einsum', (['"""ij, i ->ij"""', 'Z1', 'eta1'], {}), "('ij, i ->ij', Z1, eta1)\n", (7145, 7169), True, 'import numpy as np\n'), ((9748, 9765), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda0'], {}), '(lambda0)\n', (9756, 9765), False, 'from scipy.stats import norm\n'), ((5930, 5947), 'scipy.stats.norm.cdf', 'norm.cdf', (['lambda0'], {}), '(lambda0)\n', (5938, 5947), False, 'from scipy.stats import norm\n')] |
import os
import os.path as osp
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
import numpy as np
from .config import cfg, MEANS, STD
from pycocotools import mask as maskUtils
import contextlib
import io
import logging
import time
def collate_fn_flying_chairs(batch):
imgs_1 = []
imgs_2 = []
flows = []
for sample in batch:
imgs_1.append(sample[0])
imgs_2.append(sample[1])
flows.append(sample[2])
return torch.stack(imgs_1, 0), torch.stack(imgs_2, 0), torch.stack(flows, 0)
class FlyingChairs(data.Dataset):
"""`YoutubeVIS <https://youtube-vos.org/dataset/vis/>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
set_name (string): Name of the specific set of COCO images.
transform (callable, optional): A function/transform that augments the
raw images`
target_transform (callable, optional): A function/transform that takes
in the target (bbox) and transforms it.
prep_crowds (bool): Whether or not to prepare crowds for the evaluation step.
"""
def __init__(self, image_path, info_file, is_train=True):
# Do this here because we have too many things named COCO
self.root = image_path
with open(info_file, "r") as file:
res = file.read()
ids = res.split('\n')
ids = [int(x) for x in ids if x]
keep_label = 1 if is_train else 2
ids = {idx: x for idx, x in enumerate(ids) if x == keep_label}
self.ids = list(ids.keys())
def __getitem__(self, index):
flow_id = self.ids[index] + 1
img1_path = os.path.join(self.root, "{:05d}_img1.ppm".format(flow_id))
img2_path = os.path.join(self.root, "{:05d}_img2.ppm".format(flow_id))
flow_path = os.path.join(self.root, "{:05d}_flow.flo".format(flow_id))
img1 = self.readImage(img1_path)
img2 = self.readImage(img2_path)
flow = self.readFlow(flow_path)
h, w, _ = img1.shape
flow = flow * 2 / np.array([w, h]) * 8
target_size = (550, 550) # FIXME: hard code image size
img1 = cv2.resize(img1, target_size)
img2 = cv2.resize(img2, target_size)
flow = cv2.resize(flow, target_size)
img1 = (img1 - MEANS) / STD
img2 = (img2 - MEANS) / STD
img1 = img1[:, :, ::-1]
img2 = img2[:, :, ::-1]
img1 = img1.astype(np.float32)
img2 = img2.astype(np.float32)
flow = flow.astype(np.float32)
t = transforms.ToTensor()
return t(img1), t(img2), t(flow)
def __len__(self):
return len(self.ids)
@staticmethod
def readFlow(name):
f = open(name, 'rb')
header = f.read(4)
if header.decode("utf-8") != 'PIEH':
raise Exception('Flow file header does not contain PIEH')
width = np.fromfile(f, np.int32, 1).squeeze()
height = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2))
return flow.astype(np.float32)
@staticmethod
def readImage(name):
return cv2.imread(name)
| [
"numpy.fromfile",
"torch.stack",
"numpy.array",
"cv2.resize",
"torchvision.transforms.ToTensor",
"cv2.imread"
] | [((517, 539), 'torch.stack', 'torch.stack', (['imgs_1', '(0)'], {}), '(imgs_1, 0)\n', (528, 539), False, 'import torch\n'), ((541, 563), 'torch.stack', 'torch.stack', (['imgs_2', '(0)'], {}), '(imgs_2, 0)\n', (552, 563), False, 'import torch\n'), ((565, 586), 'torch.stack', 'torch.stack', (['flows', '(0)'], {}), '(flows, 0)\n', (576, 586), False, 'import torch\n'), ((2238, 2267), 'cv2.resize', 'cv2.resize', (['img1', 'target_size'], {}), '(img1, target_size)\n', (2248, 2267), False, 'import cv2\n'), ((2283, 2312), 'cv2.resize', 'cv2.resize', (['img2', 'target_size'], {}), '(img2, target_size)\n', (2293, 2312), False, 'import cv2\n'), ((2328, 2357), 'cv2.resize', 'cv2.resize', (['flow', 'target_size'], {}), '(flow, target_size)\n', (2338, 2357), False, 'import cv2\n'), ((2627, 2648), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2646, 2648), True, 'import torchvision.transforms as transforms\n'), ((3258, 3274), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (3268, 3274), False, 'import cv2\n'), ((2137, 2153), 'numpy.array', 'np.array', (['[w, h]'], {}), '([w, h])\n', (2145, 2153), True, 'import numpy as np\n'), ((2975, 3002), 'numpy.fromfile', 'np.fromfile', (['f', 'np.int32', '(1)'], {}), '(f, np.int32, 1)\n', (2986, 3002), True, 'import numpy as np\n'), ((3030, 3057), 'numpy.fromfile', 'np.fromfile', (['f', 'np.int32', '(1)'], {}), '(f, np.int32, 1)\n', (3041, 3057), True, 'import numpy as np\n'), ((3084, 3130), 'numpy.fromfile', 'np.fromfile', (['f', 'np.float32', '(width * height * 2)'], {}), '(f, np.float32, width * height * 2)\n', (3095, 3130), True, 'import numpy as np\n')] |
import numpy as np
from discretize.utils.matrix_utils import mkvc
from discretize.utils.code_utils import deprecate_function
def cylindrical_to_cartesian(grid, vec=None):
"""
Take a grid defined in cylindrical coordinates :math:`(r, \theta, z)` and
transform it to cartesian coordinates.
"""
grid = np.atleast_2d(grid)
if vec is None:
return np.hstack(
[
mkvc(grid[:, 0] * np.cos(grid[:, 1]), 2),
mkvc(grid[:, 0] * np.sin(grid[:, 1]), 2),
mkvc(grid[:, 2], 2),
]
)
if len(vec.shape) == 1 or vec.shape[1] == 1:
vec = vec.reshape(grid.shape, order="F")
x = vec[:, 0] * np.cos(grid[:, 1]) - vec[:, 1] * np.sin(grid[:, 1])
y = vec[:, 0] * np.sin(grid[:, 1]) + vec[:, 1] * np.cos(grid[:, 1])
newvec = [x, y]
if grid.shape[1] == 3:
z = vec[:, 2]
newvec += [z]
return np.vstack(newvec).T
def cyl2cart(grid, vec=None):
"""An alias for cylindrical_to_cartesian"""
return cylindrical_to_cartesian(grid, vec)
def cartesian_to_cylindrical(grid, vec=None):
"""
Take a grid defined in cartesian coordinates and transform it to cyl
coordinates
"""
if vec is None:
vec = grid
vec = np.atleast_2d(vec)
grid = np.atleast_2d(grid)
theta = np.arctan2(grid[:, 1], grid[:, 0])
return np.hstack(
[
mkvc(np.cos(theta) * vec[:, 0] + np.sin(theta) * vec[:, 1], 2),
mkvc(-np.sin(theta) * vec[:, 0] + np.cos(theta) * vec[:, 1], 2),
mkvc(vec[:, 2], 2),
]
)
def cart2cyl(grid, vec=None):
"""An alias for cartesian_to_cylindrical"""
return cylindrical_to_cartesian(grid, vec)
def rotation_matrix_from_normals(v0, v1, tol=1e-20):
"""
Performs the minimum number of rotations to define a rotation from the
direction indicated by the vector n0 to the direction indicated by n1.
The axis of rotation is n0 x n1
https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
:param numpy.array v0: vector of length 3
:param numpy.array v1: vector of length 3
:param tol = 1e-20: tolerance. If the norm of the cross product between the two vectors is below this, no rotation is performed
:rtype: numpy.array, 3x3
:return: rotation matrix which rotates the frame so that n0 is aligned with n1
"""
# ensure both n0, n1 are vectors of length 1
if len(v0) != 3:
raise ValueError("Length of n0 should be 3")
if len(v1) != 3:
raise ValueError("Length of n1 should be 3")
# ensure both are true normals
n0 = v0 * 1.0 / np.linalg.norm(v0)
n1 = v1 * 1.0 / np.linalg.norm(v1)
n0dotn1 = n0.dot(n1)
# define the rotation axis, which is the cross product of the two vectors
rotAx = np.cross(n0, n1)
if np.linalg.norm(rotAx) < tol:
return np.eye(3, dtype=float)
rotAx *= 1.0 / np.linalg.norm(rotAx)
cosT = n0dotn1 / (np.linalg.norm(n0) * np.linalg.norm(n1))
sinT = np.sqrt(1.0 - n0dotn1 ** 2)
ux = np.array(
[
[0.0, -rotAx[2], rotAx[1]],
[rotAx[2], 0.0, -rotAx[0]],
[-rotAx[1], rotAx[0], 0.0],
],
dtype=float,
)
return np.eye(3, dtype=float) + sinT * ux + (1.0 - cosT) * (ux.dot(ux))
def rotate_points_from_normals(XYZ, n0, n1, x0=np.r_[0.0, 0.0, 0.0]):
"""
rotates a grid so that the vector n0 is aligned with the vector n1
:param numpy.array n0: vector of length 3, should have norm 1
:param numpy.array n1: vector of length 3, should have norm 1
:param numpy.array x0: vector of length 3, point about which we perform the rotation
:rtype: numpy.array, 3x3
:return: rotation matrix which rotates the frame so that n0 is aligned with n1
"""
R = rotation_matrix_from_normals(n0, n1)
if XYZ.shape[1] != 3:
raise ValueError("Grid XYZ should be 3 wide")
if len(x0) != 3:
raise ValueError("x0 should have length 3")
X0 = np.ones([XYZ.shape[0], 1]) * mkvc(x0)
return (XYZ - X0).dot(R.T) + X0 # equivalent to (R*(XYZ - X0)).T + X0
rotationMatrixFromNormals = deprecate_function(
rotation_matrix_from_normals, "rotationMatrixFromNormals", removal_version="1.0.0"
)
rotatePointsFromNormals = deprecate_function(
rotate_points_from_normals, "rotatePointsFromNormals", removal_version="1.0.0"
)
| [
"numpy.atleast_2d",
"numpy.eye",
"numpy.sqrt",
"numpy.cross",
"numpy.ones",
"discretize.utils.matrix_utils.mkvc",
"numpy.array",
"numpy.arctan2",
"numpy.vstack",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"discretize.utils.code_utils.deprecate_function"
] | [((4172, 4278), 'discretize.utils.code_utils.deprecate_function', 'deprecate_function', (['rotation_matrix_from_normals', '"""rotationMatrixFromNormals"""'], {'removal_version': '"""1.0.0"""'}), "(rotation_matrix_from_normals,\n 'rotationMatrixFromNormals', removal_version='1.0.0')\n", (4190, 4278), False, 'from discretize.utils.code_utils import deprecate_function\n'), ((4307, 4409), 'discretize.utils.code_utils.deprecate_function', 'deprecate_function', (['rotate_points_from_normals', '"""rotatePointsFromNormals"""'], {'removal_version': '"""1.0.0"""'}), "(rotate_points_from_normals, 'rotatePointsFromNormals',\n removal_version='1.0.0')\n", (4325, 4409), False, 'from discretize.utils.code_utils import deprecate_function\n'), ((321, 340), 'numpy.atleast_2d', 'np.atleast_2d', (['grid'], {}), '(grid)\n', (334, 340), True, 'import numpy as np\n'), ((1277, 1295), 'numpy.atleast_2d', 'np.atleast_2d', (['vec'], {}), '(vec)\n', (1290, 1295), True, 'import numpy as np\n'), ((1307, 1326), 'numpy.atleast_2d', 'np.atleast_2d', (['grid'], {}), '(grid)\n', (1320, 1326), True, 'import numpy as np\n'), ((1340, 1374), 'numpy.arctan2', 'np.arctan2', (['grid[:, 1]', 'grid[:, 0]'], {}), '(grid[:, 1], grid[:, 0])\n', (1350, 1374), True, 'import numpy as np\n'), ((2823, 2839), 'numpy.cross', 'np.cross', (['n0', 'n1'], {}), '(n0, n1)\n', (2831, 2839), True, 'import numpy as np\n'), ((3032, 3059), 'numpy.sqrt', 'np.sqrt', (['(1.0 - n0dotn1 ** 2)'], {}), '(1.0 - n0dotn1 ** 2)\n', (3039, 3059), True, 'import numpy as np\n'), ((3070, 3182), 'numpy.array', 'np.array', (['[[0.0, -rotAx[2], rotAx[1]], [rotAx[2], 0.0, -rotAx[0]], [-rotAx[1], rotAx[\n 0], 0.0]]'], {'dtype': 'float'}), '([[0.0, -rotAx[2], rotAx[1]], [rotAx[2], 0.0, -rotAx[0]], [-rotAx[1\n ], rotAx[0], 0.0]], dtype=float)\n', (3078, 3182), True, 'import numpy as np\n'), ((927, 944), 'numpy.vstack', 'np.vstack', (['newvec'], {}), '(newvec)\n', (936, 944), True, 'import numpy as np\n'), ((2648, 2666), 'numpy.linalg.norm', 'np.linalg.norm', (['v0'], {}), '(v0)\n', (2662, 2666), True, 'import numpy as np\n'), ((2687, 2705), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (2701, 2705), True, 'import numpy as np\n'), ((2848, 2869), 'numpy.linalg.norm', 'np.linalg.norm', (['rotAx'], {}), '(rotAx)\n', (2862, 2869), True, 'import numpy as np\n'), ((2892, 2914), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'float'}), '(3, dtype=float)\n', (2898, 2914), True, 'import numpy as np\n'), ((2935, 2956), 'numpy.linalg.norm', 'np.linalg.norm', (['rotAx'], {}), '(rotAx)\n', (2949, 2956), True, 'import numpy as np\n'), ((4028, 4054), 'numpy.ones', 'np.ones', (['[XYZ.shape[0], 1]'], {}), '([XYZ.shape[0], 1])\n', (4035, 4054), True, 'import numpy as np\n'), ((4057, 4065), 'discretize.utils.matrix_utils.mkvc', 'mkvc', (['x0'], {}), '(x0)\n', (4061, 4065), False, 'from discretize.utils.matrix_utils import mkvc\n'), ((699, 717), 'numpy.cos', 'np.cos', (['grid[:, 1]'], {}), '(grid[:, 1])\n', (705, 717), True, 'import numpy as np\n'), ((732, 750), 'numpy.sin', 'np.sin', (['grid[:, 1]'], {}), '(grid[:, 1])\n', (738, 750), True, 'import numpy as np\n'), ((771, 789), 'numpy.sin', 'np.sin', (['grid[:, 1]'], {}), '(grid[:, 1])\n', (777, 789), True, 'import numpy as np\n'), ((804, 822), 'numpy.cos', 'np.cos', (['grid[:, 1]'], {}), '(grid[:, 1])\n', (810, 822), True, 'import numpy as np\n'), ((1573, 1591), 'discretize.utils.matrix_utils.mkvc', 'mkvc', (['vec[:, 2]', '(2)'], {}), '(vec[:, 2], 2)\n', (1577, 1591), False, 'from discretize.utils.matrix_utils import mkvc\n'), ((2980, 2998), 'numpy.linalg.norm', 'np.linalg.norm', (['n0'], {}), '(n0)\n', (2994, 2998), True, 'import numpy as np\n'), ((3001, 3019), 'numpy.linalg.norm', 'np.linalg.norm', (['n1'], {}), '(n1)\n', (3015, 3019), True, 'import numpy as np\n'), ((3260, 3282), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'float'}), '(3, dtype=float)\n', (3266, 3282), True, 'import numpy as np\n'), ((534, 553), 'discretize.utils.matrix_utils.mkvc', 'mkvc', (['grid[:, 2]', '(2)'], {}), '(grid[:, 2], 2)\n', (538, 553), False, 'from discretize.utils.matrix_utils import mkvc\n'), ((436, 454), 'numpy.cos', 'np.cos', (['grid[:, 1]'], {}), '(grid[:, 1])\n', (442, 454), True, 'import numpy as np\n'), ((494, 512), 'numpy.sin', 'np.sin', (['grid[:, 1]'], {}), '(grid[:, 1])\n', (500, 512), True, 'import numpy as np\n'), ((1425, 1438), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1431, 1438), True, 'import numpy as np\n'), ((1453, 1466), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1459, 1466), True, 'import numpy as np\n'), ((1530, 1543), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1536, 1543), True, 'import numpy as np\n'), ((1502, 1515), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1508, 1515), True, 'import numpy as np\n')] |
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
import numpy as np
import pandas as pd
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, Union, List
from hadar.optimizer.utils import JSON
T = TypeVar("T")
class NumericalValue(JSON, ABC, Generic[T]):
"""
Interface to handle numerical value in study
"""
def __init__(self, value: T, horizon: int, nb_scn: int):
self.value = value
self.horizon = horizon
self.nb_scn = nb_scn
@abstractmethod
def __getitem__(self, item) -> float:
pass
@abstractmethod
def __lt__(self, other) -> bool:
pass
def __le__(self, other) -> bool:
return not self.__gt__(other)
@abstractmethod
def __gt__(self, other) -> bool:
pass
def __ge__(self, other) -> bool:
return not self.__lt__(other)
@abstractmethod
def flatten(self) -> np.ndarray:
"""
flat data into 1D matrix.
:return: [v[0, 0], v[0, 1], v[0, 2], ..., v[1, i], v[2, i], ..., v[j, i])
"""
pass
class ScalarNumericalValue(NumericalValue[float]):
"""
Implement one scalar numerical value i.e. float or int
"""
def __getitem__(self, item) -> float:
i, j = item
if i >= self.nb_scn:
raise IndexError(
"There are %d scenario you ask the %dth" % (self.nb_scn, i)
)
if j >= self.horizon:
raise IndexError(
"There are %d time step you ask the %dth" % (self.horizon, j)
)
return self.value
def __lt__(self, other):
return self.value < other
def __gt__(self, other):
return self.value > other
def flatten(self) -> np.ndarray:
return np.ones(self.horizon * self.nb_scn) * self.value
@staticmethod
def from_json(dict):
pass # not used. Deserialization is done by study elements themself
class NumpyNumericalValue(NumericalValue[np.ndarray], ABC):
"""
Half-implementation with numpy array as numerical value. Implement only compare methods.
"""
def __lt__(self, other) -> bool:
return np.all(self.value < other)
def __gt__(self, other) -> bool:
return np.all(self.value > other)
class MatrixNumericalValue(NumpyNumericalValue):
"""
Implementation with complex matrix with shape (nb_scn, horizon)
"""
def __getitem__(self, item) -> float:
i, j = item
return self.value[i, j]
def flatten(self) -> np.ndarray:
return self.value.flatten()
@staticmethod
def from_json(dict):
pass # not used. Deserialization is done by study elements themself
class RowNumericValue(NumpyNumericalValue):
"""
Implementation with one scenario wiht shape (horizon, ).
"""
def __getitem__(self, item) -> float:
i, j = item
if i >= self.nb_scn:
raise IndexError(
"There are %d scenario you ask the %dth" % (self.nb_scn, i)
)
return self.value[j]
def flatten(self) -> np.ndarray:
return np.tile(self.value, self.nb_scn)
@staticmethod
def from_json(dict):
pass # not used. Deserialization is done by study elements themself
class ColumnNumericValue(NumpyNumericalValue):
"""
Implementation with one time step by scenario with shape (nb_scn, 1)
"""
def __getitem__(self, item) -> float:
i, j = item
if j >= self.horizon:
raise IndexError(
"There are %d time step you ask the %dth" % (self.horizon, j)
)
return self.value[i, 0]
def flatten(self) -> np.ndarray:
return np.repeat(self.value.flatten(), self.horizon)
@staticmethod
def from_json(dict):
pass # not used. Deserialization is done by study elements themself
class NumericalValueFactory:
def __init__(self, horizon: int, nb_scn: int):
self.horizon = horizon
self.nb_scn = nb_scn
def __eq__(self, other):
if not isinstance(other, NumericalValueFactory):
return False
return other.horizon == self.horizon and other.nb_scn == self.nb_scn
def create(
self, value: Union[float, List[float], str, np.ndarray, NumericalValue]
) -> NumericalValue:
if isinstance(value, NumericalValue):
return value
# If data come from json serialized dictionary, use 'value' key as input
if isinstance(value, dict) and "value" in value:
value = value["value"]
# If data is just a scalar
if type(value) in [float, int, complex]:
return ScalarNumericalValue(
value=value, horizon=self.horizon, nb_scn=self.nb_scn
)
# If data is list or pandas object convert to numpy array
if type(value) in [List, list, pd.DataFrame, pd.Series]:
value = np.array(value)
if isinstance(value, np.ndarray):
# If scenario are not provided copy timeseries for each scenario
if value.shape == (self.horizon,):
return RowNumericValue(
value=value, horizon=self.horizon, nb_scn=self.nb_scn
)
# If horizon are not provide extend each scenario to full horizon
if value.shape == (self.nb_scn, 1):
return ColumnNumericValue(
value=value, horizon=self.horizon, nb_scn=self.nb_scn
)
# If perfect size
if value.shape == (self.nb_scn, self.horizon):
return MatrixNumericalValue(
value=value, horizon=self.horizon, nb_scn=self.nb_scn
)
# If any size pattern matches, raise error on quantity size given
horizon_given = value.shape[0] if len(value.shape) == 1 else value.shape[1]
sc_given = 1 if len(value.shape) == 1 else value.shape[0]
raise ValueError(
"Array must be: a number, an array like (horizon, ) or (nb_scn, 1) or (nb_scn, horizon). "
"In your case horizon specified is %d and actual is %d. "
"And nb_scn specified %d is whereas actual is %d"
% (self.horizon, horizon_given, self.nb_scn, sc_given)
)
raise ValueError("Wrong source data for numerical value")
| [
"numpy.tile",
"numpy.ones",
"numpy.array",
"numpy.all",
"typing.TypeVar"
] | [((605, 617), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (612, 617), False, 'from typing import TypeVar, Generic, Union, List\n'), ((2553, 2579), 'numpy.all', 'np.all', (['(self.value < other)'], {}), '(self.value < other)\n', (2559, 2579), True, 'import numpy as np\n'), ((2633, 2659), 'numpy.all', 'np.all', (['(self.value > other)'], {}), '(self.value > other)\n', (2639, 2659), True, 'import numpy as np\n'), ((3502, 3534), 'numpy.tile', 'np.tile', (['self.value', 'self.nb_scn'], {}), '(self.value, self.nb_scn)\n', (3509, 3534), True, 'import numpy as np\n'), ((2159, 2194), 'numpy.ones', 'np.ones', (['(self.horizon * self.nb_scn)'], {}), '(self.horizon * self.nb_scn)\n', (2166, 2194), True, 'import numpy as np\n'), ((5321, 5336), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (5329, 5336), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 19:35:29 2020
@author: altius
"""
import json
import os
import cv2
import matplotlib.pyplot as plt
import time
import numpy as np
label1 = np.load('/datadrive/downloads/cocoapi-master/PythonAPI/openpose/tf-pose-estimation/data_configs_op/aic_pre_aligned_humans_coco.npy',allow_pickle=True)
file_names = np.load('/datadrive/downloads/cocoapi-master/PythonAPI/openpose/tf-pose-estimation/data_configs_op/files_aic_pre_aligned_humans_coco.npy')
p_a='/mnt/sda1/downloads/cocoapi-master/PythonAPI/aligned_action_persons_mids/'
path = '/mnt/sda1/downloads/ai-challenger/ai_challenger_keypoint_train_20170902/'
jpeg_path = '/mnt/sda1/downloads/ai-challenger/ai_challenger_keypoint_train_20170902/keypoint_train_images_20170902'
file_name1 = 'keypoint_train_annotations_20170902.json'
path_save = '/mnt/sda1/downloads/cocoapi-master/PythonAPI/aic_persons_17_single/'
labels_raw = json.loads(open(os.path.join(path,file_name1)).read())
labels=np.zeros((500000,17,3))
def aic_pck_amp(est_keys,true_keypoints):
dist=1000
torso_diam=np.linalg.norm(true_keypoints[3,0:2] - true_keypoints[9,0:2])
est_key=est_keys[:,0:2]
true_keypoint=true_keypoints[:,0:2]
dist_all= np.array([ np.linalg.norm(true_keypoint[x,:] - est_key[x,:]) for x in range(est_key.shape[0])])
return np.sum(dist_all<torso_diam/5)
distan=list()
c=0;
file_name=list()
for ind in range(0,len(labels_raw)):
data_peak=labels_raw[ind]
I = cv2.imread(os.path.join(jpeg_path,data_peak['image_id'])+'.jpg')
if ind%100==0:
print(ind)
for j in range(len(data_peak['human_annotations'].keys())):
try:
if I is not None and len(data_peak['human_annotations'].keys())==1:
next_one='human'+str(j+1)
labels_temp=data_peak['keypoint_annotations'][next_one]
labels_arr=np.copy(np.array(labels_temp).reshape(14,3))
labels_arr[:,2]=3-labels_arr[:,2]
coord=data_peak['human_annotations'][next_one]
if ind>0:
ind_found=-1
for d in range(max(0,ind-115),ind+1):
if file_names[d][175:]==(data_peak['image_id']+'.jpg'):
ind_found=d
else:
ind_found=0
if ind_found>-1 and np.mean(labels_arr[:,2])>1.9:
lab1=label1[ind_found]
body_len=np.zeros((5))
for w in range(5):
try:
temp_label_dic=lab1['human'+str(w+1)]
temp_label=np.zeros((18,2))
for r in range(18):
try:
temp_label[r,0]=temp_label_dic[r][0]
temp_label[r,1]=temp_label_dic[r][1]
except:
pass
minx=int(min(temp_label[:,1]))
maxx=int((max(temp_label[:,1])))
miny=int(min(temp_label[:,0]))
maxy=int((max(temp_label[:,0])))
im_size=max(maxx-minx,maxy-miny)
body_len[w]=im_size
except:
pass
max_size=np.argmax(body_len)
temp_label_dic=lab1['human'+str(max_size+1)]
temp_label=np.zeros((18,2))
for r in range(18):
try:
temp_label[r,0]=temp_label_dic[r][0]
temp_label[r,1]=temp_label_dic[r][1]
except:
pass
lab2=labels_arr[:,0:2]
lab2[12,:]=[0,0]
lab3=np.zeros((lab2.shape))
for q in range(2,lab3.shape[0]):
lab3[q-2,:]=temp_label[q,:]
lab3[13,:]=temp_label[1,:]
distan.append(aic_pck_amp(lab3,lab2))
except:
pass
plt.imshow(I)
skeleton=temp_label
for ii in range(max(skeleton.shape)):
# cv2.circle(img, center=tuple(skeleton[ii][0:2].astype(int)), radius=2, color=(0, 255, 0), thickness=20)
cv2.putText(I,str(ii), tuple(skeleton[ii][0:2].astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0),thickness=5)
plt.imshow(I)
| [
"matplotlib.pyplot.imshow",
"numpy.mean",
"os.path.join",
"numpy.argmax",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.linalg.norm",
"numpy.load"
] | [((215, 376), 'numpy.load', 'np.load', (['"""/datadrive/downloads/cocoapi-master/PythonAPI/openpose/tf-pose-estimation/data_configs_op/aic_pre_aligned_humans_coco.npy"""'], {'allow_pickle': '(True)'}), "(\n '/datadrive/downloads/cocoapi-master/PythonAPI/openpose/tf-pose-estimation/data_configs_op/aic_pre_aligned_humans_coco.npy'\n , allow_pickle=True)\n", (222, 376), True, 'import numpy as np\n'), ((379, 527), 'numpy.load', 'np.load', (['"""/datadrive/downloads/cocoapi-master/PythonAPI/openpose/tf-pose-estimation/data_configs_op/files_aic_pre_aligned_humans_coco.npy"""'], {}), "(\n '/datadrive/downloads/cocoapi-master/PythonAPI/openpose/tf-pose-estimation/data_configs_op/files_aic_pre_aligned_humans_coco.npy'\n )\n", (386, 527), True, 'import numpy as np\n'), ((1013, 1038), 'numpy.zeros', 'np.zeros', (['(500000, 17, 3)'], {}), '((500000, 17, 3))\n', (1021, 1038), True, 'import numpy as np\n'), ((4452, 4465), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I'], {}), '(I)\n', (4462, 4465), True, 'import matplotlib.pyplot as plt\n'), ((4767, 4780), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I'], {}), '(I)\n', (4777, 4780), True, 'import matplotlib.pyplot as plt\n'), ((1109, 1172), 'numpy.linalg.norm', 'np.linalg.norm', (['(true_keypoints[3, 0:2] - true_keypoints[9, 0:2])'], {}), '(true_keypoints[3, 0:2] - true_keypoints[9, 0:2])\n', (1123, 1172), True, 'import numpy as np\n'), ((1375, 1408), 'numpy.sum', 'np.sum', (['(dist_all < torso_diam / 5)'], {}), '(dist_all < torso_diam / 5)\n', (1381, 1408), True, 'import numpy as np\n'), ((1269, 1320), 'numpy.linalg.norm', 'np.linalg.norm', (['(true_keypoint[x, :] - est_key[x, :])'], {}), '(true_keypoint[x, :] - est_key[x, :])\n', (1283, 1320), True, 'import numpy as np\n'), ((1528, 1574), 'os.path.join', 'os.path.join', (['jpeg_path', "data_peak['image_id']"], {}), "(jpeg_path, data_peak['image_id'])\n", (1540, 1574), False, 'import os\n'), ((966, 996), 'os.path.join', 'os.path.join', (['path', 'file_name1'], {}), '(path, file_name1)\n', (978, 996), False, 'import os\n'), ((2561, 2572), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2569, 2572), True, 'import numpy as np\n'), ((3545, 3564), 'numpy.argmax', 'np.argmax', (['body_len'], {}), '(body_len)\n', (3554, 3564), True, 'import numpy as np\n'), ((3699, 3716), 'numpy.zeros', 'np.zeros', (['(18, 2)'], {}), '((18, 2))\n', (3707, 3716), True, 'import numpy as np\n'), ((4148, 4168), 'numpy.zeros', 'np.zeros', (['lab2.shape'], {}), '(lab2.shape)\n', (4156, 4168), True, 'import numpy as np\n'), ((2437, 2462), 'numpy.mean', 'np.mean', (['labels_arr[:, 2]'], {}), '(labels_arr[:, 2])\n', (2444, 2462), True, 'import numpy as np\n'), ((1926, 1947), 'numpy.array', 'np.array', (['labels_temp'], {}), '(labels_temp)\n', (1934, 1947), True, 'import numpy as np\n'), ((2748, 2765), 'numpy.zeros', 'np.zeros', (['(18, 2)'], {}), '((18, 2))\n', (2756, 2765), True, 'import numpy as np\n')] |
from sklearn.model_selection import ParameterGrid
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
# df.fillna(0, inplace=True)
df = df.dropna().drop_duplicates()
df = pd.get_dummies(df, columns=['zipcode'], drop_first=True)
df.drop(df[df['price'] == 0].index, inplace= True)
df.drop(df[df['price'] == "nan"].index, inplace= True)
df.drop(df[df['sqft_living'] < df['bedrooms'] * 150].index, inplace=True)
y = pd.Series(df['price'])
y[y < 0] = -y
df.drop(['price', 'id', 'date', 'sqft_living', 'long'], axis=1,inplace=True)
df = np.abs(df)
df['yr_renovated'] = df[['yr_renovated', 'yr_built']].max(axis=1)
return (df, y)
def feature_evaluation(X: pd.DataFrame, y: pd.Series,
output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
y_sigma = np.sqrt(np.sum((y - y.mean()) ** 2))
for (feature_name, x) in X.iteritems():
corr = (x - x.mean()).dot(y - y.mean()) / (
np.sqrt(np.sum((x - x.mean()) ** 2)) * y_sigma)
fig = go.Figure([go.Scatter(x=x, y=y, mode='markers')],
layout=go.Layout(
title=feature_name + " Pearson Correlation: " + str(
corr), xaxis_title=feature_name,
yaxis_title="the response"))
fig.write_image(output_path + "/" + feature_name + ".png")
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
X, y = load_data("../datasets/house_prices.csv")
# Question 2 - Feature evaluation with respect to response
feature_evaluation(X, y, "G:/My Drive/Courses/2B/IML")
# Question 3 - Split samples into training- and testing sets.
train_X, train_y, test_X, test_y = split_train_test(X, y, 0.75)
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (mean-2*stds, mean+2*stds)
averages = []
stds = []
p_ = np.arange(10,101)
for p in p_:
p_averages = np.array([])
for i in range(10):
X, y, X2, y2 = split_train_test(train_X, train_y, train_proportion=p/100)
model = LinearRegression() # 2
model.fit(X.to_numpy(), y.to_numpy())
p_averages = np.append(p_averages, model.loss(test_X.to_numpy(), test_y.to_numpy())) # 3
# 4
averages.append(p_averages.mean())
stds.append(p_averages.std())
stds = np.array(stds)
averages = np.array(averages)
p_for_graph = [str(p) + "%" for p in p_]
fig = go.Figure([go.Scatter(x=p_for_graph, y=averages, mode='markers+lines',showlegend=False),
go.Scatter(x=p_for_graph, y=averages + (2 * stds), fill=None, mode="lines", line=dict(color="lightgrey"), showlegend=False),
go.Scatter(x=p_for_graph, y=averages - (2 * stds), fill='tonexty', mode="lines", line=dict(color="lightgrey"), showlegend=False)],
layout=go.Layout(
title="Average loss of model in response to % of data used for training, with error ribbon of size (mean-2*stds, mean+2*stds)", xaxis_title="% of data used",
yaxis_title="average loss with confidence interval of (mean-2*stds, mean+2*stds)"))
fig.show()
| [
"pandas.Series",
"numpy.abs",
"plotly.graph_objects.Layout",
"pandas.read_csv",
"IMLearn.utils.split_train_test",
"numpy.array",
"IMLearn.learners.regressors.LinearRegression",
"plotly.graph_objects.Scatter",
"numpy.random.seed",
"pandas.get_dummies",
"numpy.arange"
] | [((674, 695), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (685, 695), True, 'import pandas as pd\n'), ((779, 835), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'columns': "['zipcode']", 'drop_first': '(True)'}), "(df, columns=['zipcode'], drop_first=True)\n", (793, 835), True, 'import pandas as pd\n'), ((1037, 1059), 'pandas.Series', 'pd.Series', (["df['price']"], {}), "(df['price'])\n", (1046, 1059), True, 'import pandas as pd\n'), ((1170, 1180), 'numpy.abs', 'np.abs', (['df'], {}), '(df)\n', (1176, 1180), True, 'import numpy as np\n'), ((2589, 2606), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2603, 2606), True, 'import numpy as np\n'), ((2957, 2985), 'IMLearn.utils.split_train_test', 'split_train_test', (['X', 'y', '(0.75)'], {}), '(X, y, 0.75)\n', (2973, 2985), False, 'from IMLearn.utils import split_train_test\n'), ((3530, 3548), 'numpy.arange', 'np.arange', (['(10)', '(101)'], {}), '(10, 101)\n', (3539, 3548), True, 'import numpy as np\n'), ((4013, 4027), 'numpy.array', 'np.array', (['stds'], {}), '(stds)\n', (4021, 4027), True, 'import numpy as np\n'), ((4043, 4061), 'numpy.array', 'np.array', (['averages'], {}), '(averages)\n', (4051, 4061), True, 'import numpy as np\n'), ((3586, 3598), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3594, 3598), True, 'import numpy as np\n'), ((3654, 3714), 'IMLearn.utils.split_train_test', 'split_train_test', (['train_X', 'train_y'], {'train_proportion': '(p / 100)'}), '(train_X, train_y, train_proportion=p / 100)\n', (3670, 3714), False, 'from IMLearn.utils import split_train_test\n'), ((3733, 3751), 'IMLearn.learners.regressors.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3749, 3751), False, 'from IMLearn.learners.regressors import LinearRegression\n'), ((4128, 4205), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'p_for_graph', 'y': 'averages', 'mode': '"""markers+lines"""', 'showlegend': '(False)'}), "(x=p_for_graph, y=averages, mode='markers+lines', showlegend=False)\n", (4138, 4205), True, 'import plotly.graph_objects as go\n'), ((4536, 4801), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': '"""Average loss of model in response to % of data used for training, with error ribbon of size (mean-2*stds, mean+2*stds)"""', 'xaxis_title': '"""% of data used"""', 'yaxis_title': '"""average loss with confidence interval of (mean-2*stds, mean+2*stds)"""'}), "(title=\n 'Average loss of model in response to % of data used for training, with error ribbon of size (mean-2*stds, mean+2*stds)'\n , xaxis_title='% of data used', yaxis_title=\n 'average loss with confidence interval of (mean-2*stds, mean+2*stds)')\n", (4545, 4801), True, 'import plotly.graph_objects as go\n'), ((2204, 2240), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': 'y', 'mode': '"""markers"""'}), "(x=x, y=y, mode='markers')\n", (2214, 2240), True, 'import plotly.graph_objects as go\n')] |
import torch
from torch import nn
import os
import time
import numpy as np
import matplotlib.pyplot as plt
from ipdb import set_trace
from scipy.linalg import solve_continuous_are
from scipy.io import loadmat
from scipy.interpolate import interpn
from systems.manipulator4dof import Manipulator4DOF
from stable_baselines3 import A2C, PPO, DDPG
from stable_baselines3.common.env_checker import check_env
from stable_baselines3.common.sb2_compat.rmsprop_tf_like import RMSpropTFLike
m = [5.4, 1.8, 0.6, 0.2]
l = [0.2, 0.5, 0.25, 0.125]
g = 9.81
sys = {'m': m, 'l': l, 'g': g,\
'Q': np.diag([4,4,4,4,0.1,0.1,0.1,0.1]), 'R': np.diag([0.002,0.004,0.024,0.1440]),\
'goal': np.array([[np.pi],[0],[0],[0],[0],[0],[0],[0]]), 'u0': np.array([[0],[0],[0],[0]]),\
'T': 4, 'dt': 1e-3, 'gamma_': 0.997, 'X_DIMS': 8, 'U_DIMS': 4,\
'x_limits': np.array([[0, 2*np.pi],[-np.pi, np.pi],[-np.pi, np.pi],[-np.pi, np.pi],[-6, 6],[-6, 6],[-6, 6],[-6, 6]]),\
'u_limits': np.array([[-24, 24], [-15, 15], [-7.5, 7.5], [-1, 1]])}
fixed_start = False
normalized_actions = True
env = Manipulator4DOF(sys, fixed_start=fixed_start, normalized_actions=normalized_actions)
check_env(env)
# Test dynamics
num_points = 100
states = np.zeros((sys['X_DIMS'], num_points))
inputs = np.zeros((sys['U_DIMS'], num_points))
d1 = np.zeros((sys['X_DIMS'], num_points))
d2 = np.zeros((sys['X_DIMS'], num_points))
d3 = np.zeros((sys['X_DIMS'], num_points))
t1 = 0
t2 = 0
t3 = 0
for nn in range(num_points):
states[:,nn] = sys['x_limits'][:,0] + (sys['x_limits'][:,1] - sys['x_limits'][:,0])*np.random.rand(sys['X_DIMS'])
inputs[:,nn] = sys['u_limits'][:,0] + (sys['u_limits'][:,1] - sys['u_limits'][:,0])*np.random.rand(sys['U_DIMS'])
t1s = time.time()
d1[:,nn:(nn+1)] = env.dyn(states[:,nn:(nn+1)], inputs[:,nn:(nn+1)])
t1 += (time.time() - t1s)
t2s = time.time()
d2[:,nn:(nn+1)] = env.dyn_numerical(states[:,nn:(nn+1)], inputs[:,nn:(nn+1)])
t2 += (time.time() - t2s)
t3s = time.time()
d3[:,nn:(nn+1)] = env.dyn_simscape(states[:,nn:(nn+1)], inputs[:,nn:(nn+1)])
t3 += (time.time() - t3s)
set_trace()
# Load DP solution to compare
# filename = '~/Documents/MATLAB/iLQG/DP/data/manipulator4dof/decomp0/final.mat'
# policy_analytical = loadmat(filename)
# Test the policy
# obs = env.reset()
# start = obs
# for i in range(12000):
# action = interpn()
# obs, reward, done, info = env.step(action)
# if done:
# print('Start state :', start, ', Final state :', obs)
# obs = env.reset()
# start = obs
# Compute Policy and Value function numerically
algorithm = 'A2C'
if (fixed_start):
save_path = os.path.join('examples/data/manipulator4dof_fixedstart', algorithm)
else:
save_path = os.path.join('examples/data/manipulator4dof', algorithm)
log_path = os.path.join(save_path, 'tb_log')
files = [f for f in os.listdir(save_path) if os.path.isfile(os.path.join(save_path, f))]
save_timestep = 0
ff_latest = ''
for ff in files:
if 'model' not in ff:
continue
tt = ff.split('_')[-1]
tt = int(tt.split('.')[0])
if (tt > save_timestep):
save_timestep = tt
ff_latest = ff
total_timesteps = 40000000
if ((save_timestep <= total_timesteps) and (save_timestep > 0)):
if (algorithm == 'A2C'):
model = A2C.load(os.path.join(save_path, 'model_'+str(save_timestep)))
elif (algorithm == 'PPO'):
model = PPO.load(os.path.join(save_path, 'model_'+str(save_timestep)))
elif (algorithm == 'DDPG'):
model = DDPG.load(os.path.join(save_path, 'model_'+str(save_timestep)))
else:
if (normalized_actions):
policy_std = 0.1
else:
policy_std = 0.1 * sys['u_limits'][:,1]
if (algorithm == 'A2C'):
policy_kwargs = dict(activation_fn=nn.ReLU, net_arch=[dict(pi=[64, 64, 64], vf=[64, 64, 64])], log_std_init=policy_std, optimizer_class=RMSpropTFLike, optimizer_kwargs=dict(eps=1e-5))
model = A2C('MlpPolicy', env, gamma=sys['gamma_'], n_steps=50, tensorboard_log=log_path, verbose=1, policy_kwargs=policy_kwargs)
elif (algorithm == 'PPO'):
policy_kwargs = dict(activation_fn=nn.ReLU, net_arch=[dict(pi=[32, 32], vf=[32, 32])])
model = PPO('MlpPolicy', env, gamma=sys['gamma_'], n_steps=env.horizon, clip_range_vf=None, clip_range=0.5, tensorboard_log=log_path, verbose=1, policy_kwargs=policy_kwargs)
elif (algorithm == 'DDPG'):
policy_kwargs = dict(activation_fn=nn.ReLU, net_arch=dict(pi=[16, 16], qf=[16, 16]))
model = DDPG('MlpPolicy', env, gamma=sys['gamma_'], tensorboard_log=log_path, verbose=1, policy_kwargs=policy_kwargs)
save_every = total_timesteps
timesteps = save_timestep
log_steps = 4000
while timesteps < total_timesteps:
model.learn(total_timesteps=save_every, log_interval=round(log_steps/model.n_steps))
timesteps = timesteps + save_every
model.save(os.path.join(save_path, 'model_' + str(timesteps)))
# Test the learned policy
num_trajectories = 4
trajectories = np.zeros((sys['X_DIMS'], int(sys['T']/sys['dt']), num_trajectories))
for t in range(num_trajectories):
obs = env.reset()
start = obs
for i in range(int(sys['T']/sys['dt'])):
action, _state = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
trajectories[:,i,t] = obs
if done:
print('Start state :', start, ', Final state :', obs)
break
fig = plt.figure()
colors = ['r', 'g', 'b', 'm']
ax1 = fig.add_subplot(411)
ax1.set_xlabel('th1')
ax1.set_ylabel('th1-dot')
for t in range(num_trajectories):
plt.plot(trajectories[0, :, t], trajectories[4, :, t], colors[t])
ax2 = fig.add_subplot(412)
ax2.set_xlabel('th2')
ax2.set_ylabel('th2-dot')
for t in range(num_trajectories):
plt.plot(trajectories[1, :, t], trajectories[5, :, t], colors[t])
ax3 = fig.add_subplot(413)
ax3.set_xlabel('th3')
ax3.set_ylabel('th3-dot')
for t in range(num_trajectories):
plt.plot(trajectories[2, :, t], trajectories[6, :, t], colors[t])
ax4 = fig.add_subplot(414)
ax4.set_xlabel('th4')
ax4.set_ylabel('th4-dot')
for t in range(num_trajectories):
plt.plot(trajectories[3, :, t], trajectories[7, :, t], colors[t])
plt.show()
set_trace()
| [
"os.listdir",
"numpy.random.rand",
"stable_baselines3.common.env_checker.check_env",
"ipdb.set_trace",
"matplotlib.pyplot.plot",
"os.path.join",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"stable_baselines3.DDPG",
"systems.manipulator4dof.Manipulator4DOF",
"stabl... | [((1080, 1169), 'systems.manipulator4dof.Manipulator4DOF', 'Manipulator4DOF', (['sys'], {'fixed_start': 'fixed_start', 'normalized_actions': 'normalized_actions'}), '(sys, fixed_start=fixed_start, normalized_actions=\n normalized_actions)\n', (1095, 1169), False, 'from systems.manipulator4dof import Manipulator4DOF\n'), ((1165, 1179), 'stable_baselines3.common.env_checker.check_env', 'check_env', (['env'], {}), '(env)\n', (1174, 1179), False, 'from stable_baselines3.common.env_checker import check_env\n'), ((1223, 1260), 'numpy.zeros', 'np.zeros', (["(sys['X_DIMS'], num_points)"], {}), "((sys['X_DIMS'], num_points))\n", (1231, 1260), True, 'import numpy as np\n'), ((1270, 1307), 'numpy.zeros', 'np.zeros', (["(sys['U_DIMS'], num_points)"], {}), "((sys['U_DIMS'], num_points))\n", (1278, 1307), True, 'import numpy as np\n'), ((1313, 1350), 'numpy.zeros', 'np.zeros', (["(sys['X_DIMS'], num_points)"], {}), "((sys['X_DIMS'], num_points))\n", (1321, 1350), True, 'import numpy as np\n'), ((1356, 1393), 'numpy.zeros', 'np.zeros', (["(sys['X_DIMS'], num_points)"], {}), "((sys['X_DIMS'], num_points))\n", (1364, 1393), True, 'import numpy as np\n'), ((1399, 1436), 'numpy.zeros', 'np.zeros', (["(sys['X_DIMS'], num_points)"], {}), "((sys['X_DIMS'], num_points))\n", (1407, 1436), True, 'import numpy as np\n'), ((2085, 2096), 'ipdb.set_trace', 'set_trace', ([], {}), '()\n', (2094, 2096), False, 'from ipdb import set_trace\n'), ((2777, 2810), 'os.path.join', 'os.path.join', (['save_path', '"""tb_log"""'], {}), "(save_path, 'tb_log')\n", (2789, 2810), False, 'import os\n'), ((5219, 5231), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5229, 5231), True, 'import matplotlib.pyplot as plt\n'), ((5970, 5980), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5978, 5980), True, 'import matplotlib.pyplot as plt\n'), ((5981, 5992), 'ipdb.set_trace', 'set_trace', ([], {}), '()\n', (5990, 5992), False, 'from ipdb import set_trace\n'), ((588, 629), 'numpy.diag', 'np.diag', (['[4, 4, 4, 4, 0.1, 0.1, 0.1, 0.1]'], {}), '([4, 4, 4, 4, 0.1, 0.1, 0.1, 0.1])\n', (595, 629), True, 'import numpy as np\n'), ((629, 666), 'numpy.diag', 'np.diag', (['[0.002, 0.004, 0.024, 0.144]'], {}), '([0.002, 0.004, 0.024, 0.144])\n', (636, 666), True, 'import numpy as np\n'), ((679, 733), 'numpy.array', 'np.array', (['[[np.pi], [0], [0], [0], [0], [0], [0], [0]]'], {}), '([[np.pi], [0], [0], [0], [0], [0], [0], [0]])\n', (687, 733), True, 'import numpy as np\n'), ((734, 764), 'numpy.array', 'np.array', (['[[0], [0], [0], [0]]'], {}), '([[0], [0], [0], [0]])\n', (742, 764), True, 'import numpy as np\n'), ((848, 965), 'numpy.array', 'np.array', (['[[0, 2 * np.pi], [-np.pi, np.pi], [-np.pi, np.pi], [-np.pi, np.pi], [-6, 6],\n [-6, 6], [-6, 6], [-6, 6]]'], {}), '([[0, 2 * np.pi], [-np.pi, np.pi], [-np.pi, np.pi], [-np.pi, np.pi],\n [-6, 6], [-6, 6], [-6, 6], [-6, 6]])\n', (856, 965), True, 'import numpy as np\n'), ((971, 1025), 'numpy.array', 'np.array', (['[[-24, 24], [-15, 15], [-7.5, 7.5], [-1, 1]]'], {}), '([[-24, 24], [-15, 15], [-7.5, 7.5], [-1, 1]])\n', (979, 1025), True, 'import numpy as np\n'), ((1724, 1735), 'time.time', 'time.time', ([], {}), '()\n', (1733, 1735), False, 'import time\n'), ((1840, 1851), 'time.time', 'time.time', ([], {}), '()\n', (1849, 1851), False, 'import time\n'), ((1967, 1978), 'time.time', 'time.time', ([], {}), '()\n', (1976, 1978), False, 'import time\n'), ((2621, 2688), 'os.path.join', 'os.path.join', (['"""examples/data/manipulator4dof_fixedstart"""', 'algorithm'], {}), "('examples/data/manipulator4dof_fixedstart', algorithm)\n", (2633, 2688), False, 'import os\n'), ((2708, 2764), 'os.path.join', 'os.path.join', (['"""examples/data/manipulator4dof"""', 'algorithm'], {}), "('examples/data/manipulator4dof', algorithm)\n", (2720, 2764), False, 'import os\n'), ((5372, 5437), 'matplotlib.pyplot.plot', 'plt.plot', (['trajectories[0, :, t]', 'trajectories[4, :, t]', 'colors[t]'], {}), '(trajectories[0, :, t], trajectories[4, :, t], colors[t])\n', (5380, 5437), True, 'import matplotlib.pyplot as plt\n'), ((5549, 5614), 'matplotlib.pyplot.plot', 'plt.plot', (['trajectories[1, :, t]', 'trajectories[5, :, t]', 'colors[t]'], {}), '(trajectories[1, :, t], trajectories[5, :, t], colors[t])\n', (5557, 5614), True, 'import matplotlib.pyplot as plt\n'), ((5726, 5791), 'matplotlib.pyplot.plot', 'plt.plot', (['trajectories[2, :, t]', 'trajectories[6, :, t]', 'colors[t]'], {}), '(trajectories[2, :, t], trajectories[6, :, t], colors[t])\n', (5734, 5791), True, 'import matplotlib.pyplot as plt\n'), ((5903, 5968), 'matplotlib.pyplot.plot', 'plt.plot', (['trajectories[3, :, t]', 'trajectories[7, :, t]', 'colors[t]'], {}), '(trajectories[3, :, t], trajectories[7, :, t], colors[t])\n', (5911, 5968), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1824), 'time.time', 'time.time', ([], {}), '()\n', (1822, 1824), False, 'import time\n'), ((1939, 1950), 'time.time', 'time.time', ([], {}), '()\n', (1948, 1950), False, 'import time\n'), ((2065, 2076), 'time.time', 'time.time', ([], {}), '()\n', (2074, 2076), False, 'import time\n'), ((2831, 2852), 'os.listdir', 'os.listdir', (['save_path'], {}), '(save_path)\n', (2841, 2852), False, 'import os\n'), ((3820, 3945), 'stable_baselines3.A2C', 'A2C', (['"""MlpPolicy"""', 'env'], {'gamma': "sys['gamma_']", 'n_steps': '(50)', 'tensorboard_log': 'log_path', 'verbose': '(1)', 'policy_kwargs': 'policy_kwargs'}), "('MlpPolicy', env, gamma=sys['gamma_'], n_steps=50, tensorboard_log=\n log_path, verbose=1, policy_kwargs=policy_kwargs)\n", (3823, 3945), False, 'from stable_baselines3 import A2C, PPO, DDPG\n'), ((1572, 1601), 'numpy.random.rand', 'np.random.rand', (["sys['X_DIMS']"], {}), "(sys['X_DIMS'])\n", (1586, 1601), True, 'import numpy as np\n'), ((1687, 1716), 'numpy.random.rand', 'np.random.rand', (["sys['U_DIMS']"], {}), "(sys['U_DIMS'])\n", (1701, 1716), True, 'import numpy as np\n'), ((2871, 2897), 'os.path.join', 'os.path.join', (['save_path', 'f'], {}), '(save_path, f)\n', (2883, 2897), False, 'import os\n'), ((4068, 4241), 'stable_baselines3.PPO', 'PPO', (['"""MlpPolicy"""', 'env'], {'gamma': "sys['gamma_']", 'n_steps': 'env.horizon', 'clip_range_vf': 'None', 'clip_range': '(0.5)', 'tensorboard_log': 'log_path', 'verbose': '(1)', 'policy_kwargs': 'policy_kwargs'}), "('MlpPolicy', env, gamma=sys['gamma_'], n_steps=env.horizon,\n clip_range_vf=None, clip_range=0.5, tensorboard_log=log_path, verbose=1,\n policy_kwargs=policy_kwargs)\n", (4071, 4241), False, 'from stable_baselines3 import A2C, PPO, DDPG\n'), ((4360, 4473), 'stable_baselines3.DDPG', 'DDPG', (['"""MlpPolicy"""', 'env'], {'gamma': "sys['gamma_']", 'tensorboard_log': 'log_path', 'verbose': '(1)', 'policy_kwargs': 'policy_kwargs'}), "('MlpPolicy', env, gamma=sys['gamma_'], tensorboard_log=log_path,\n verbose=1, policy_kwargs=policy_kwargs)\n", (4364, 4473), False, 'from stable_baselines3 import A2C, PPO, DDPG\n')] |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
from pylab import *
# Authors <NAME> and <NAME>, Dec 2021
file = 'data.dat'
# Read header
f = open(file, 'r')
dimxy = f.readline().rstrip().lstrip(' #')
aa= dimxy.split(" ")
dim1=int(aa[0])
dim2=int(aa[1])
xlabel = f.readline().rstrip().lstrip('#')
ylabel = f.readline().rstrip().lstrip('#')
title = f.readline().rstrip().lstrip('#')
f.close()
# Read data
d=np.loadtxt(file,skiprows=4)
xx = d[:,2].reshape(dim1,dim2).transpose()
# Plot
cc=plt.contour(xx,colors='black')
plt.clabel(cc, inline=True, fontsize=12)
plt.contourf(xx, cmap='RdBu_r', alpha=0.5)
plt.colorbar(orientation='vertical');
if dim1==dim2:
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel, fontsize=14)
plt.title(title, fontsize=14)
#plt.show()
savefig('data.png')
| [
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.clabel",
"matplotlib.pyplot.title",
"numpy.loadtxt"
] | [((470, 498), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'skiprows': '(4)'}), '(file, skiprows=4)\n', (480, 498), True, 'import numpy as np\n'), ((552, 583), 'matplotlib.pyplot.contour', 'plt.contour', (['xx'], {'colors': '"""black"""'}), "(xx, colors='black')\n", (563, 583), True, 'import matplotlib.pyplot as plt\n'), ((583, 623), 'matplotlib.pyplot.clabel', 'plt.clabel', (['cc'], {'inline': '(True)', 'fontsize': '(12)'}), '(cc, inline=True, fontsize=12)\n', (593, 623), True, 'import matplotlib.pyplot as plt\n'), ((626, 668), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx'], {'cmap': '"""RdBu_r"""', 'alpha': '(0.5)'}), "(xx, cmap='RdBu_r', alpha=0.5)\n", (638, 668), True, 'import matplotlib.pyplot as plt\n'), ((670, 706), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""vertical"""'}), "(orientation='vertical')\n", (682, 706), True, 'import matplotlib.pyplot as plt\n'), ((777, 808), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'fontsize': '(14)'}), '(xlabel, fontsize=14)\n', (787, 808), True, 'import matplotlib.pyplot as plt\n'), ((809, 840), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {'fontsize': '(14)'}), '(ylabel, fontsize=14)\n', (819, 840), True, 'import matplotlib.pyplot as plt\n'), ((841, 870), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(14)'}), '(title, fontsize=14)\n', (850, 870), True, 'import matplotlib.pyplot as plt\n'), ((752, 761), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (759, 761), True, 'import matplotlib.pyplot as plt\n'), ((726, 735), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (733, 735), True, 'import matplotlib.pyplot as plt\n')] |
import unittest
from numpy import alltrue, arange, array
from numpy.testing import assert_array_equal
# Chaco imports
from chaco.api import (
ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext
)
from chaco.base import rgba_dtype
from chaco.segment_plot import SegmentPlot
from chaco.default_colormaps import viridis
class SegmentPlotTest(unittest.TestCase):
def setUp(self):
self.size = (250, 250)
index_data_source = ArrayDataSource(arange(10))
index_range = DataRange1D()
index_range.add(index_data_source)
index_mapper = LinearMapper(range=index_range)
value_data_source = ArrayDataSource(arange(1, 11))
value_range = DataRange1D()
value_range.add(value_data_source)
value_mapper = LinearMapper(range=value_range)
self.segment_plot = SegmentPlot(
index=index_data_source,
index_mapper=index_mapper,
value=value_data_source,
value_mapper=value_mapper,
border_visible=False,
)
self.segment_plot.outer_bounds = list(self.size)
def set_color_data(self):
color_data_source = ArrayDataSource(arange(2, 7))
color_range = DataRange1D()
color_range.add(color_data_source)
color_mapper = viridis(range=color_range)
self.segment_plot.color_by_data = True
self.segment_plot.color_data = color_data_source
self.segment_plot.color_mapper = color_mapper
def set_width_data(self):
width_data_source = ArrayDataSource(arange(3, 8))
width_range = DataRange1D()
width_range.add(width_data_source)
width_mapper = LinearMapper(low_pos=1, high_pos=10, range=width_range)
self.segment_plot.width_by_data = True
self.segment_plot.width_data = width_data_source
self.segment_plot.width_mapper = width_mapper
def test_segment(self):
self.assertEqual(self.segment_plot.origin, 'bottom left')
self.assertIs(
self.segment_plot.x_mapper, self.segment_plot.index_mapper
)
self.assertIs(
self.segment_plot.y_mapper, self.segment_plot.value_mapper
)
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_orthogonal(self):
self.segment_plot.render_style = 'orthogonal'
self.assertEqual(self.segment_plot.origin, 'bottom left')
self.assertIs(
self.segment_plot.x_mapper, self.segment_plot.index_mapper
)
self.assertIs(
self.segment_plot.y_mapper, self.segment_plot.value_mapper
)
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_quad(self):
self.segment_plot.render_style = 'quad'
self.assertEqual(self.segment_plot.origin, 'bottom left')
self.assertIs(
self.segment_plot.x_mapper, self.segment_plot.index_mapper
)
self.assertIs(
self.segment_plot.y_mapper, self.segment_plot.value_mapper
)
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_cubic(self):
self.segment_plot.render_style = 'cubic'
self.assertEqual(self.segment_plot.origin, 'bottom left')
self.assertIs(
self.segment_plot.x_mapper, self.segment_plot.index_mapper
)
self.assertIs(
self.segment_plot.y_mapper, self.segment_plot.value_mapper
)
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_color(self):
self.set_color_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_color_orthogonal(self):
self.segment_plot.render_style = 'orthogonal'
self.set_color_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_color_quad(self):
self.segment_plot.render_style = 'quad'
self.set_color_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_color_cubic(self):
self.segment_plot.render_style = 'cubic'
self.set_color_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_width(self):
self.set_width_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_width_orthogonal(self):
self.segment_plot.render_style = 'orthogonal'
self.set_width_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_width_quad(self):
self.segment_plot.render_style = 'quad'
self.set_width_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_width_cubic(self):
self.segment_plot.render_style = 'cubic'
self.set_width_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_width_color(self):
self.set_width_data()
self.set_color_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_width_orthogonal_color(self):
self.segment_plot.render_style = 'orthogonal'
self.set_width_data()
self.set_color_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_width_quad_color(self):
self.segment_plot.render_style = 'quad'
self.set_width_data()
self.set_color_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_width_cubic_color(self):
self.segment_plot.render_style = 'cubic'
self.set_width_data()
self.set_color_data()
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_alpha(self):
self.segment_plot.alpha = 0.5
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_orthogonal_alpha(self):
self.segment_plot.render_style = 'orthogonal'
self.segment_plot.alpha = 0.5
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_quad_alpha(self):
self.segment_plot.render_style = 'quad'
self.segment_plot.alpha = 0.5
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_cubic_alpha(self):
self.segment_plot.render_style = 'cubic'
self.segment_plot.alpha = 0.5
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_selection(self):
mask = array([True, True, False, False, True])
self.segment_plot.index.metadata['selections'] = [mask]
black = self.segment_plot.color_
yellow = self.segment_plot.selection_color_
expected_colors = array([yellow, yellow, black, black, yellow])
expected_colors = expected_colors.astype('float32').view(rgba_dtype)
expected_colors.shape = (5, )
expected_colors['a'][~mask] *= 0.3
assert_array_equal(mask, self.segment_plot.selected_mask)
assert_array_equal(expected_colors, self.segment_plot.effective_colors)
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
def test_segment_selection_color(self):
mask = array([True, True, False, False, True])
self.segment_plot.index.metadata['selections'] = [mask]
self.set_color_data()
color_data = self.segment_plot.color_data.get_data()
colors = self.segment_plot.color_mapper.map_screen(color_data)
expected_colors = colors.astype('float32').view(rgba_dtype)
expected_colors.shape = (5, )
expected_colors['a'][~mask] *= 0.3
assert_array_equal(mask, self.segment_plot.selected_mask)
assert_array_equal(expected_colors, self.segment_plot.effective_colors)
gc = PlotGraphicsContext(self.size)
gc.render_component(self.segment_plot)
actual = gc.bmp_array[:, :, :]
self.assertFalse(alltrue(actual == 255))
| [
"chaco.default_colormaps.viridis",
"chaco.segment_plot.SegmentPlot",
"numpy.alltrue",
"numpy.arange",
"chaco.api.LinearMapper",
"chaco.api.DataRange1D",
"chaco.api.PlotGraphicsContext",
"numpy.array",
"numpy.testing.assert_array_equal"
] | [((509, 522), 'chaco.api.DataRange1D', 'DataRange1D', ([], {}), '()\n', (520, 522), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((589, 620), 'chaco.api.LinearMapper', 'LinearMapper', ([], {'range': 'index_range'}), '(range=index_range)\n', (601, 620), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((703, 716), 'chaco.api.DataRange1D', 'DataRange1D', ([], {}), '()\n', (714, 716), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((783, 814), 'chaco.api.LinearMapper', 'LinearMapper', ([], {'range': 'value_range'}), '(range=value_range)\n', (795, 814), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((844, 986), 'chaco.segment_plot.SegmentPlot', 'SegmentPlot', ([], {'index': 'index_data_source', 'index_mapper': 'index_mapper', 'value': 'value_data_source', 'value_mapper': 'value_mapper', 'border_visible': '(False)'}), '(index=index_data_source, index_mapper=index_mapper, value=\n value_data_source, value_mapper=value_mapper, border_visible=False)\n', (855, 986), False, 'from chaco.segment_plot import SegmentPlot\n'), ((1221, 1234), 'chaco.api.DataRange1D', 'DataRange1D', ([], {}), '()\n', (1232, 1234), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((1301, 1327), 'chaco.default_colormaps.viridis', 'viridis', ([], {'range': 'color_range'}), '(range=color_range)\n', (1308, 1327), False, 'from chaco.default_colormaps import viridis\n'), ((1598, 1611), 'chaco.api.DataRange1D', 'DataRange1D', ([], {}), '()\n', (1609, 1611), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((1678, 1733), 'chaco.api.LinearMapper', 'LinearMapper', ([], {'low_pos': '(1)', 'high_pos': '(10)', 'range': 'width_range'}), '(low_pos=1, high_pos=10, range=width_range)\n', (1690, 1733), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((2210, 2240), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (2229, 2240), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((2759, 2789), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (2778, 2789), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((3296, 3326), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (3315, 3326), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((3835, 3865), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (3854, 3865), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((4080, 4110), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (4099, 4110), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((4390, 4420), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (4409, 4420), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((4688, 4718), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (4707, 4718), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((4988, 5018), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (5007, 5018), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((5233, 5263), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (5252, 5263), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((5543, 5573), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (5562, 5573), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((5841, 5871), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (5860, 5871), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((6141, 6171), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (6160, 6171), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((6422, 6452), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (6441, 6452), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((6768, 6798), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (6787, 6798), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((7102, 7132), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (7121, 7132), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((7438, 7468), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (7457, 7468), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((7691, 7721), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (7710, 7721), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((8009, 8039), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (8028, 8039), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((8315, 8345), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (8334, 8345), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((8623, 8653), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (8642, 8653), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((8843, 8882), 'numpy.array', 'array', (['[True, True, False, False, True]'], {}), '([True, True, False, False, True])\n', (8848, 8882), False, 'from numpy import alltrue, arange, array\n'), ((9067, 9112), 'numpy.array', 'array', (['[yellow, yellow, black, black, yellow]'], {}), '([yellow, yellow, black, black, yellow])\n', (9072, 9112), False, 'from numpy import alltrue, arange, array\n'), ((9280, 9337), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['mask', 'self.segment_plot.selected_mask'], {}), '(mask, self.segment_plot.selected_mask)\n', (9298, 9337), False, 'from numpy.testing import assert_array_equal\n'), ((9346, 9417), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expected_colors', 'self.segment_plot.effective_colors'], {}), '(expected_colors, self.segment_plot.effective_colors)\n', (9364, 9417), False, 'from numpy.testing import assert_array_equal\n'), ((9432, 9462), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (9451, 9462), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((9658, 9697), 'numpy.array', 'array', (['[True, True, False, False, True]'], {}), '([True, True, False, False, True])\n', (9663, 9697), False, 'from numpy import alltrue, arange, array\n'), ((10083, 10140), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['mask', 'self.segment_plot.selected_mask'], {}), '(mask, self.segment_plot.selected_mask)\n', (10101, 10140), False, 'from numpy.testing import assert_array_equal\n'), ((10149, 10220), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expected_colors', 'self.segment_plot.effective_colors'], {}), '(expected_colors, self.segment_plot.effective_colors)\n', (10167, 10220), False, 'from numpy.testing import assert_array_equal\n'), ((10235, 10265), 'chaco.api.PlotGraphicsContext', 'PlotGraphicsContext', (['self.size'], {}), '(self.size)\n', (10254, 10265), False, 'from chaco.api import ArrayDataSource, DataRange1D, LinearMapper, PlotGraphicsContext\n'), ((475, 485), 'numpy.arange', 'arange', (['(10)'], {}), '(10)\n', (481, 485), False, 'from numpy import alltrue, arange, array\n'), ((666, 679), 'numpy.arange', 'arange', (['(1)', '(11)'], {}), '(1, 11)\n', (672, 679), False, 'from numpy import alltrue, arange, array\n'), ((1185, 1197), 'numpy.arange', 'arange', (['(2)', '(7)'], {}), '(2, 7)\n', (1191, 1197), False, 'from numpy import alltrue, arange, array\n'), ((1562, 1574), 'numpy.arange', 'arange', (['(3)', '(8)'], {}), '(3, 8)\n', (1568, 1574), False, 'from numpy import alltrue, arange, array\n'), ((2352, 2374), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (2359, 2374), False, 'from numpy import alltrue, arange, array\n'), ((2901, 2923), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (2908, 2923), False, 'from numpy import alltrue, arange, array\n'), ((3438, 3460), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (3445, 3460), False, 'from numpy import alltrue, arange, array\n'), ((3977, 3999), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (3984, 3999), False, 'from numpy import alltrue, arange, array\n'), ((4222, 4244), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (4229, 4244), False, 'from numpy import alltrue, arange, array\n'), ((4532, 4554), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (4539, 4554), False, 'from numpy import alltrue, arange, array\n'), ((4830, 4852), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (4837, 4852), False, 'from numpy import alltrue, arange, array\n'), ((5130, 5152), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (5137, 5152), False, 'from numpy import alltrue, arange, array\n'), ((5375, 5397), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (5382, 5397), False, 'from numpy import alltrue, arange, array\n'), ((5685, 5707), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (5692, 5707), False, 'from numpy import alltrue, arange, array\n'), ((5983, 6005), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (5990, 6005), False, 'from numpy import alltrue, arange, array\n'), ((6283, 6305), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (6290, 6305), False, 'from numpy import alltrue, arange, array\n'), ((6564, 6586), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (6571, 6586), False, 'from numpy import alltrue, arange, array\n'), ((6910, 6932), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (6917, 6932), False, 'from numpy import alltrue, arange, array\n'), ((7244, 7266), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (7251, 7266), False, 'from numpy import alltrue, arange, array\n'), ((7580, 7602), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (7587, 7602), False, 'from numpy import alltrue, arange, array\n'), ((7833, 7855), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (7840, 7855), False, 'from numpy import alltrue, arange, array\n'), ((8151, 8173), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (8158, 8173), False, 'from numpy import alltrue, arange, array\n'), ((8457, 8479), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (8464, 8479), False, 'from numpy import alltrue, arange, array\n'), ((8765, 8787), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (8772, 8787), False, 'from numpy import alltrue, arange, array\n'), ((9574, 9596), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (9581, 9596), False, 'from numpy import alltrue, arange, array\n'), ((10377, 10399), 'numpy.alltrue', 'alltrue', (['(actual == 255)'], {}), '(actual == 255)\n', (10384, 10399), False, 'from numpy import alltrue, arange, array\n')] |
#!/usr/bin/env python
from numpy import array
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
label_traindat = '../data/label_train_multiclass.dat'
# set both input attributes as not nominal (ie. continuous)
feattypes = array([False, False])
parameter_list = [[traindat,testdat,label_traindat,feattypes]]
def multiclass_cartree_modular(train=traindat,test=testdat,labels=label_traindat,ft=feattypes):
try:
from modshogun import RealFeatures, MulticlassLabels, CSVFile, CARTree, PT_MULTICLASS
except ImportError:
print("Could not import Shogun modules")
return
# wrap features and labels into Shogun objects
feats_train=RealFeatures(CSVFile(train))
feats_test=RealFeatures(CSVFile(test))
train_labels=MulticlassLabels(CSVFile(labels))
# CART Tree formation with 5 fold cross-validation pruning
c=CARTree(ft,5,PT_MULTICLASS,True)
c.set_labels(train_labels)
c.train(feats_train)
# Classify test data
output=c.apply_multiclass(feats_test).get_labels()
return c,output
if __name__=='__main__':
print('CARTree')
multiclass_cartree_modular(*parameter_list[0])
| [
"numpy.array",
"modshogun.CARTree",
"modshogun.CSVFile"
] | [((250, 271), 'numpy.array', 'array', (['[False, False]'], {}), '([False, False])\n', (255, 271), False, 'from numpy import array\n'), ((844, 879), 'modshogun.CARTree', 'CARTree', (['ft', '(5)', 'PT_MULTICLASS', '(True)'], {}), '(ft, 5, PT_MULTICLASS, True)\n', (851, 879), False, 'from modshogun import RealFeatures, MulticlassLabels, CSVFile, CARTree, PT_MULTICLASS\n'), ((676, 690), 'modshogun.CSVFile', 'CSVFile', (['train'], {}), '(train)\n', (683, 690), False, 'from modshogun import RealFeatures, MulticlassLabels, CSVFile, CARTree, PT_MULTICLASS\n'), ((717, 730), 'modshogun.CSVFile', 'CSVFile', (['test'], {}), '(test)\n', (724, 730), False, 'from modshogun import RealFeatures, MulticlassLabels, CSVFile, CARTree, PT_MULTICLASS\n'), ((763, 778), 'modshogun.CSVFile', 'CSVFile', (['labels'], {}), '(labels)\n', (770, 778), False, 'from modshogun import RealFeatures, MulticlassLabels, CSVFile, CARTree, PT_MULTICLASS\n')] |
"""
Test utilities for LKPY tests.
"""
import os
import os.path
import logging
from contextlib import contextmanager
import numpy as np
from .. import matrix
import pytest
from lenskit.datasets import MovieLens, ML100K
_log = logging.getLogger(__name__)
ml_test = MovieLens('ml-latest-small')
ml100k = ML100K()
def ml_sample():
ratings = ml_test.ratings
icounts = ratings.groupby('item').rating.count()
top = icounts.nlargest(500)
ratings = ratings.set_index('item')
top_rates = ratings.loc[top.index, :]
_log.info('top 500 items yield %d of %d ratings', len(top_rates), len(ratings))
return top_rates.reset_index()
def rand_csr(nrows=100, ncols=50, nnz=1000, values=True):
"Generate a random CSR for testing."
coords = np.random.choice(np.arange(ncols * nrows, dtype=np.int32), nnz, False)
rows = np.mod(coords, nrows, dtype=np.int32)
cols = np.floor_divide(coords, nrows, dtype=np.int32)
if values:
vals = np.random.randn(nnz)
else:
vals = None
return matrix.CSR.from_coo(rows, cols, vals, (nrows, ncols))
@contextmanager
def rand_seed(seed):
state = np.random.get_state()
try:
np.random.seed(seed)
yield
finally:
np.random.set_state(state)
def repeated(n=50):
"""
Decorator to run a test multiple times. Useful for randomized tests.
Example::
@repeated
def test_something_with_random_values():
pass
Args:
n(int):
The number of iterations. If the decorator is used without
parentheses, this will be the function itself, which will be
run the default number of times (50).
Environment Variables:
LK_TEST_ITERATION_MULT(float):
A multiplier for the number of test iterations. This is useful
when debugging tests, to cause a test to be run more times than
the default.
"""
mult = os.environ.get('LK_TEST_ITERATION_MULT', '1')
mult = float(mult)
def wrap(proc):
def run(*args, **kwargs):
_log.info('running %s for %d iterations', proc.__name__, n * mult)
for i in range(int(n * mult)):
proc(*args, **kwargs)
return run
if hasattr(n, '__call__'):
proc = n
n = 50
return wrap(proc)
else:
return wrap
wantjit = pytest.mark.skipif('NUMBA_DISABLE_JIT' in os.environ,
reason='JIT required')
| [
"logging.getLogger",
"numpy.random.get_state",
"numpy.random.set_state",
"lenskit.datasets.MovieLens",
"numpy.floor_divide",
"os.environ.get",
"lenskit.datasets.ML100K",
"numpy.random.randn",
"numpy.random.seed",
"pytest.mark.skipif",
"numpy.mod",
"numpy.arange"
] | [((231, 258), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (248, 258), False, 'import logging\n'), ((270, 298), 'lenskit.datasets.MovieLens', 'MovieLens', (['"""ml-latest-small"""'], {}), "('ml-latest-small')\n", (279, 298), False, 'from lenskit.datasets import MovieLens, ML100K\n'), ((308, 316), 'lenskit.datasets.ML100K', 'ML100K', ([], {}), '()\n', (314, 316), False, 'from lenskit.datasets import MovieLens, ML100K\n'), ((2385, 2461), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('NUMBA_DISABLE_JIT' in os.environ)"], {'reason': '"""JIT required"""'}), "('NUMBA_DISABLE_JIT' in os.environ, reason='JIT required')\n", (2403, 2461), False, 'import pytest\n'), ((848, 885), 'numpy.mod', 'np.mod', (['coords', 'nrows'], {'dtype': 'np.int32'}), '(coords, nrows, dtype=np.int32)\n', (854, 885), True, 'import numpy as np\n'), ((897, 943), 'numpy.floor_divide', 'np.floor_divide', (['coords', 'nrows'], {'dtype': 'np.int32'}), '(coords, nrows, dtype=np.int32)\n', (912, 943), True, 'import numpy as np\n'), ((1141, 1162), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (1160, 1162), True, 'import numpy as np\n'), ((1950, 1995), 'os.environ.get', 'os.environ.get', (['"""LK_TEST_ITERATION_MULT"""', '"""1"""'], {}), "('LK_TEST_ITERATION_MULT', '1')\n", (1964, 1995), False, 'import os\n'), ((783, 823), 'numpy.arange', 'np.arange', (['(ncols * nrows)'], {'dtype': 'np.int32'}), '(ncols * nrows, dtype=np.int32)\n', (792, 823), True, 'import numpy as np\n'), ((974, 994), 'numpy.random.randn', 'np.random.randn', (['nnz'], {}), '(nnz)\n', (989, 994), True, 'import numpy as np\n'), ((1180, 1200), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1194, 1200), True, 'import numpy as np\n'), ((1236, 1262), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (1255, 1262), True, 'import numpy as np\n')] |
"""Extract fft features
Reference: https://www.kaggle.com/gpreda/lanl-earthquake-eda-and-prediction
"""
import sys
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import competition as cc
from common import stop_watch
TRAIN_CSV_DIRECTORY_PATH = cc.INPUT_PATH / sys.argv[1]
TRAIN_CSV_LIST = list(TRAIN_CSV_DIRECTORY_PATH.glob('**/*.csv'))
@stop_watch
def extract_features(csv_list, feature_dir_path):
df = pd.DataFrame()
Path.mkdir(feature_dir_path, exist_ok=True, parents=True)
for index, each_csv in enumerate(tqdm(sorted(csv_list))):
seg = pd.read_csv(each_csv, dtype=cc.DTYPES)
seg_id = each_csv.split("/")[-1].split(".")[0]
df.loc[index, "seg_id"] = seg_id
xc = pd.Series(seg['acoustic_data'].values)
zc = np.fft.fft(xc)
realFFT = np.real(zc)
imagFFT = np.imag(zc)
# FFT: Real
df.loc[index, 'Rmean'] = realFFT.mean()
df.loc[index, 'Rstd'] = realFFT.std()
df.loc[index, 'Rmax'] = realFFT.max()
df.loc[index, 'Rmin'] = realFFT.min()
# FFT: Imaginary
df.loc[index, 'Imean'] = imagFFT.mean()
df.loc[index, 'Istd'] = imagFFT.std()
df.loc[index, 'Imax'] = imagFFT.max()
df.loc[index, 'Imin'] = imagFFT.min()
# FFT: Real (Specified area)
df.loc[index, 'Rmean_last_5000'] = realFFT[-5000:].mean()
df.loc[index, 'Rmean_last_15000'] = realFFT[-15000:].mean()
df.loc[index, 'Rstd_last_5000'] = realFFT[-5000:].std()
df.loc[index, 'Rstd_last_15000'] = realFFT[-15000:].std()
df.loc[index, 'Rmax_last_5000'] = realFFT[-5000:].max()
df.loc[index, 'Rmax_last_15000'] = realFFT[-15000:].max()
df.loc[index, 'Rmin_last_5000'] = realFFT[-5000:].min()
df.loc[index, 'Rmin_last_15000'] = realFFT[-15000:].min()
# FFT: Imaginary (Specified area)
df.loc[index, 'Imean_last_5000'] = imagFFT[-5000:].mean()
df.loc[index, 'Imean_last_15000'] = imagFFT[-15000:].mean()
df.loc[index, 'Istd_last_5000'] = imagFFT[-5000:].std()
df.loc[index, 'Istd_last_15000'] = imagFFT[-15000:].std()
df.loc[index, 'Imax_last_5000'] = imagFFT[-5000:].max()
df.loc[index, 'Imax_last_15000'] = imagFFT[-15000:].max()
df.loc[index, 'Imin_last_5000'] = imagFFT[-5000:].min()
df.loc[index, 'Imin_last_15000'] = imagFFT[-15000:].min()
print("Aggregation output is belows:")
print(df.head(3))
df.to_csv(feature_dir_path / "{}.csv".format(cc.PREF), index=False)
if __name__ == "__main__":
train_csv_path = cc.FEATURE_PATH / "{}".format(sys.argv[1])
train_csv_l = [str(item) for item in TRAIN_CSV_LIST]
extract_features(train_csv_l, train_csv_path)
test_csv_path = cc.FEATURE_PATH / "test"
test_csv_l = [str(item) for item in cc.TEST_CSV_LIST]
extract_features(test_csv_l, test_csv_path)
| [
"pandas.Series",
"pandas.read_csv",
"numpy.fft.fft",
"numpy.real",
"pathlib.Path.mkdir",
"pandas.DataFrame",
"numpy.imag"
] | [((456, 470), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (468, 470), True, 'import pandas as pd\n'), ((475, 532), 'pathlib.Path.mkdir', 'Path.mkdir', (['feature_dir_path'], {'exist_ok': '(True)', 'parents': '(True)'}), '(feature_dir_path, exist_ok=True, parents=True)\n', (485, 532), False, 'from pathlib import Path\n'), ((609, 647), 'pandas.read_csv', 'pd.read_csv', (['each_csv'], {'dtype': 'cc.DTYPES'}), '(each_csv, dtype=cc.DTYPES)\n', (620, 647), True, 'import pandas as pd\n'), ((757, 795), 'pandas.Series', 'pd.Series', (["seg['acoustic_data'].values"], {}), "(seg['acoustic_data'].values)\n", (766, 795), True, 'import pandas as pd\n'), ((809, 823), 'numpy.fft.fft', 'np.fft.fft', (['xc'], {}), '(xc)\n', (819, 823), True, 'import numpy as np\n'), ((842, 853), 'numpy.real', 'np.real', (['zc'], {}), '(zc)\n', (849, 853), True, 'import numpy as np\n'), ((872, 883), 'numpy.imag', 'np.imag', (['zc'], {}), '(zc)\n', (879, 883), True, 'import numpy as np\n')] |
import csv
import os
import pickle
from typing import List
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import PandasTools
def save_features(path: str, features: List[np.ndarray]) -> None:
"""
Saves features to a compressed :code:`.npz` file with array name "features".
:param path: Path to a :code:`.npz` file where the features will be saved.
:param features: A list of 1D numpy arrays containing the features for molecules.
"""
np.savez_compressed(path, features=features)
def load_features(path: str) -> np.ndarray:
"""
Loads features saved in a variety of formats.
Supported formats:
* :code:`.npz` compressed (assumes features are saved with name "features")
* .npy
* :code:`.csv` / :code:`.txt` (assumes comma-separated features with a header and with one line per molecule)
* :code:`.pkl` / :code:`.pckl` / :code:`.pickle` containing a sparse numpy array
.. note::
All formats assume that the SMILES loaded elsewhere in the code are in the same
order as the features loaded here.
:param path: Path to a file containing features.
:return: A 2D numpy array of size :code:`(num_molecules, features_size)` containing the features.
"""
extension = os.path.splitext(path)[1]
if extension == '.npz':
features = np.load(path)['features']
elif extension == '.npy':
features = np.load(path)
elif extension in ['.csv', '.txt']:
with open(path) as f:
reader = csv.reader(f)
next(reader) # skip header
features = np.array([[float(value) for value in row] for row in reader])
elif extension in ['.pkl', '.pckl', '.pickle']:
with open(path, 'rb') as f:
features = np.array([np.squeeze(np.array(feat.todense())) for feat in pickle.load(f)])
else:
raise ValueError(f'Features path extension {extension} not supported.')
return features
def load_valid_atom_features(path: str, smiles: List[str]) -> List[np.ndarray]:
"""
Loads features saved in a variety of formats.
Supported formats:
* :code:`.npz` descriptors are saved as 2D array for each molecule in the order of that in the data.csv
* :code:`.pkl` / :code:`.pckl` / :code:`.pickle` containing a pandas dataframe with smiles as index and numpy array of descriptors as columns
* :code:'.sdf' containing all mol blocks with descriptors as entries
:param path: Path to file containing atomwise features.
:return: A list of 2D array.
"""
extension = os.path.splitext(path)[1]
if extension == '.npz':
container = np.load(path)
features = [container[key] for key in container]
elif extension in ['.pkl', '.pckl', '.pickle']:
features_df = pd.read_pickle(path)
if features_df.iloc[0, 0].ndim == 1:
features = features_df.apply(lambda x: np.stack(x.tolist(), axis=1), axis=1).tolist()
elif features_df.iloc[0, 0].ndim == 2:
features = features_df.apply(lambda x: np.concatenate(x.tolist(), axis=1), axis=1).tolist()
else:
raise ValueError(f'Atom descriptors input {path} format not supported')
elif extension == '.sdf':
features_df = PandasTools.LoadSDF(path).drop(['ID', 'ROMol'], axis=1).set_index('SMILES')
features_df = features_df[~features_df.index.duplicated()]
# locate atomic descriptors columns
features_df = features_df.iloc[:, features_df.iloc[0, :].apply(lambda x: isinstance(x, str) and ',' in x).to_list()]
features_df = features_df.reindex(smiles)
if features_df.isnull().any().any():
raise ValueError('Invalid custom atomic descriptors file, Nan found in data')
features_df = features_df.applymap(lambda x: np.array(x.replace('\r', '').replace('\n', '').split(',')).astype(float))
# Truncate by number of atoms
num_atoms = {x: Chem.MolFromSmiles(x).GetNumAtoms() for x in features_df.index.to_list()}
def truncate_arrays(r):
return r.apply(lambda x: x[:num_atoms[r.name]])
features_df = features_df.apply(lambda x: truncate_arrays(x), axis=1)
features = features_df.apply(lambda x: np.stack(x.tolist(), axis=1), axis=1).tolist()
else:
raise ValueError(f'Extension "{extension}" is not supported.')
return features
| [
"pandas.read_pickle",
"os.path.splitext",
"rdkit.Chem.MolFromSmiles",
"pickle.load",
"rdkit.Chem.PandasTools.LoadSDF",
"numpy.savez_compressed",
"numpy.load",
"csv.reader"
] | [((491, 535), 'numpy.savez_compressed', 'np.savez_compressed', (['path'], {'features': 'features'}), '(path, features=features)\n', (510, 535), True, 'import numpy as np\n'), ((1280, 1302), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (1296, 1302), False, 'import os\n'), ((2582, 2604), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (2598, 2604), False, 'import os\n'), ((2657, 2670), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (2664, 2670), True, 'import numpy as np\n'), ((1354, 1367), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1361, 1367), True, 'import numpy as np\n'), ((1429, 1442), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1436, 1442), True, 'import numpy as np\n'), ((2803, 2823), 'pandas.read_pickle', 'pd.read_pickle', (['path'], {}), '(path)\n', (2817, 2823), True, 'import pandas as pd\n'), ((1534, 1547), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1544, 1547), False, 'import csv\n'), ((3959, 3980), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['x'], {}), '(x)\n', (3977, 3980), False, 'from rdkit import Chem\n'), ((3269, 3294), 'rdkit.Chem.PandasTools.LoadSDF', 'PandasTools.LoadSDF', (['path'], {}), '(path)\n', (3288, 3294), False, 'from rdkit.Chem import PandasTools\n'), ((1843, 1857), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1854, 1857), False, 'import pickle\n')] |
"""
Implement optics algorithms for optical phase tomography using GPU
<NAME> <EMAIL>
<NAME> <EMAIL>
October 22, 2018
"""
import numpy as np
import arrayfire as af
import contexttimer
from opticaltomography import settings
from opticaltomography.opticsmodel import MultiTransmittance, MultiPhaseContrast
from opticaltomography.opticsmodel import Defocus, Aberration
from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient
from opticaltomography.regularizers import Regularizer
np_complex_datatype = settings.np_complex_datatype
np_float_datatype = settings.np_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class AlgorithmConfigs:
"""
Class created for all parameters for tomography solver
"""
def __init__(self):
self.method = "FISTA"
self.stepsize = 1e-2
self.max_iter = 20
self.error = []
self.reg_term = 0.0 #L2 norm
#FISTA
self.fista_global_update = False
self.restart = False
#total variation regularization
self.total_variation = False
self.reg_tv = 1.0 #lambda
self.max_iter_tv = 15
self.order_tv = 1
self.total_variation_gpu = False
#lasso
self.lasso = False
self.reg_lasso = 1.0
#positivity constraint
self.positivity_real = (False, "larger")
self.positivity_imag = (False, "larger")
self.pure_real = False
self.pure_imag = False
#aberration correction
self.pupil_update = False
self.pupil_global_update = False
self.pupil_step_size = 1.0
self.pupil_update_method = "gradient"
#batch gradient update
self.batch_size = 1
#random order update
self.random_order = False
class PhaseObject3D:
"""
Class created for 3D objects.
Depending on the scattering model, one of the following quantities will be used:
- Refractive index (RI)
- Transmittance function (Trans)
- PhaseContrast
- Scattering potential (V)
shape: shape of object to be reconstructed in (x,y,z), tuple
voxel_size: size of each voxel in (x,y,z), tuple
RI_obj: refractive index of object(Optional)
RI: background refractive index (Optional)
slice_separation: For multislice algorithms, how far apart are slices separated, array (Optional)
"""
def __init__(self, shape, voxel_size, RI_obj = None, RI = 1.0, slice_separation = None):
assert len(shape) == 3, "shape should be 3 dimensional!"
self.shape = shape
self.RI_obj = RI * np.ones(shape, dtype = np_complex_datatype) if RI_obj is None else RI_obj.astype(np_complex_datatype)
self.RI = RI
self.pixel_size = voxel_size[0]
self.pixel_size_z = voxel_size[2]
if slice_separation is not None:
#for discontinuous slices
assert len(slice_separation) == shape[2]-1, "number of separations should match with number of layers!"
self.slice_separation = np.asarray(slice_separation).astype(np_float_datatype)
else:
#for continuous slices
self.slice_separation = self.pixel_size_z * np.ones((shape[2]-1,), dtype = np_float_datatype)
def convertRItoTrans(self, wavelength):
k0 = 2.0 * np.pi / wavelength
self.trans_obj = np.exp(1.0j*k0*(self.RI_obj - self.RI)*self.pixel_size_z)
def convertRItoPhaseContrast(self):
self.contrast_obj = self.RI_obj - self.RI
def convertRItoV(self, wavelength):
k0 = 2.0 * np.pi / wavelength
self.V_obj = k0**2 * (self.RI**2 - self.RI_obj**2)
def convertVtoRI(self, wavelength):
k0 = 2.0 * np.pi / wavelength
B = -1.0 * (self.RI**2 - self.V_obj.real/k0**2)
C = -1.0 * (-1.0 * self.V_obj.imag/k0**2/2.0)**2
RI_obj_real = ((-1.0 * B + (B**2-4.0*C)**0.5)/2.0)**0.5
RI_obj_imag = -0.5 * self.V_obj.imag/k0**2/RI_obj_real
self.RI_obj = RI_obj_real + 1.0j * RI_obj_imag
class TomographySolver:
"""
Highest level solver object for tomography problem
phase_obj_3d: phase_obj_3d object defined from class PhaseObject3D
fx_illu_list: illumination angles in x, default = [0] (on axis)
fy_illu_list: illumination angles in y
rotation_angle_list: angles of rotation in tomogrpahy
propagation_distance_list: defocus distances for each illumination
"""
def __init__(self, phase_obj_3d, fx_illu_list = [0], fy_illu_list = [0], rotation_angle_list = [0], propagation_distance_list = [0], **kwargs):
self.phase_obj_3d = phase_obj_3d
self.wavelength = kwargs["wavelength"]
#Rotation angels and objects
self.rot_angles = rotation_angle_list
self.number_rot = len(self.rot_angles)
self.rotation_pad = kwargs.get("rotation_pad", True)
#Illumination angles
assert len(fx_illu_list) == len(fy_illu_list)
self.fx_illu_list = fx_illu_list
self.fy_illu_list = fy_illu_list
self.number_illum = len(self.fx_illu_list)
#Aberation object
self._aberration_obj = Aberration(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size,\
self.wavelength, kwargs["na"], pad = False)
#Defocus distances and object
self.prop_distances = propagation_distance_list
self._defocus_obj = Defocus(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size, **kwargs)
self.number_defocus = len(self.prop_distances)
#Scattering models and algorithms
self._opticsmodel = {"MultiTrans": MultiTransmittance,
"MultiPhaseContrast": MultiPhaseContrast,
}
self._algorithms = {"GradientDescent": self._solveFirstOrderGradient,
"FISTA": self._solveFirstOrderGradient
}
self.scat_model_args = kwargs
def setScatteringMethod(self, model = "MultiTrans"):
"""
Define scattering method for tomography
model: scattering models, it can be one of the followings:
"MultiTrans", "MultiPhaseContrast"(Used in the paper)
"""
self.scat_model = model
if hasattr(self, '_scattering_obj'):
del self._scattering_obj
if model == "MultiTrans":
self.phase_obj_3d.convertRItoTrans(self.wavelength)
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.trans_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 1, \
flag_gpu_inout = True, flag_inplace = True)
elif model == "MultiPhaseContrast":
if not hasattr(self.phase_obj_3d, 'contrast_obj'):
self.phase_obj_3d.convertRItoPhaseContrast()
self._x = self.phase_obj_3d.contrast_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_inplace = True)
else:
if not hasattr(self.phase_obj_3d, 'V_obj'):
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.V_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_inplace = True)
self._scattering_obj = self._opticsmodel[model](self.phase_obj_3d, **self.scat_model_args)
def forwardPredict(self, field = False):
"""
Uses current object in the phase_obj_3d to predict the amplitude of the exit wave
Before calling, make sure correct object is contained
"""
obj_gpu = af.to_array(self._x)
with contexttimer.Timer() as timer:
forward_scattered_predict= []
if self._scattering_obj.back_scatter:
back_scattered_predict = []
for rot_idx in range(self.number_rot):
forward_scattered_predict.append([])
if self._scattering_obj.back_scatter:
back_scattered_predict.append([])
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, self.rot_angles[rot_idx])
for illu_idx in range(self.number_illum):
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
fields = self._forwardMeasure(fx_illu, fy_illu, obj = obj_gpu)
if field:
forward_scattered_predict[rot_idx].append(np.array(fields["forward_scattered_field"]))
if self._scattering_obj.back_scatter:
back_scattered_predict[rot_idx].append(np.array(fields["back_scattered_field"]))
else:
forward_scattered_predict[rot_idx].append(np.abs(fields["forward_scattered_field"]))
if self._scattering_obj.back_scatter:
back_scattered_predict[rot_idx].append(np.abs(fields["back_scattered_field"]))
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, -1.0*self.rot_angles[rot_idx])
if len(forward_scattered_predict[0][0].shape)==2:
forward_scattered_predict = np.array(forward_scattered_predict).transpose(2, 3, 1, 0)
elif len(forward_scattered_predict[0][0].shape)==3:
forward_scattered_predict = np.array(forward_scattered_predict).transpose(2, 3, 4, 1, 0)
if self._scattering_obj.back_scatter:
if len(back_scattered_predict[0][0].shape)==2:
back_scattered_predict = np.array(back_scattered_predict).transpose(2, 3, 1, 0)
elif len(back_scattered_predict[0][0].shape)==3:
back_scattered_predict = np.array(back_scattered_predict).transpose(2, 3, 4, 1, 0)
return forward_scattered_predict, back_scattered_predict
else:
return forward_scattered_predict
def checkGradient(self, delta = 1e-4):
"""
check if the numerical gradient is similar to the analytical gradient. Only works for 64 bit data type.
"""
assert af_float_datatype == af.Dtype.f64, "This will only be accurate if 64 bit datatype is used!"
shape = self.phase_obj_3d.shape
point = (np.random.randint(shape[0]), np.random.randint(shape[1]), np.random.randint(shape[2]))
illu_idx = np.random.randint(len(self.fx_illu_list))
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
x = np.ones(shape, dtype = np_complex_datatype)
if self._defocus_obj.pad:
amplitude = af.randu(shape[0]//2, shape[1]//2, dtype = af_float_datatype)
else:
amplitude = af.randu(shape[0], shape[1], dtype = af_float_datatype)
print("testing the gradient at point : ", point)
def func(x0):
fields = self._scattering_obj.forward(x0, fx_illu, fy_illu)
field_scattered = self._aberration_obj.forward(fields["forward_scattered_field"])
field_measure = self._defocus_obj.forward(field_scattered, self.prop_distances)
residual = af.abs(field_measure) - amplitude
function_value = af.sum(residual*af.conjg(residual)).real
return function_value
numerical_gradient = calculateNumericalGradient(func, x, point, delta = delta)
fields = self._scattering_obj.forward(x, fx_illu, fy_illu)
forward_scattered_field = fields["forward_scattered_field"]
cache = fields["cache"]
forward_scattered_field = self._aberration_obj.forward(forward_scattered_field)
field_measure = self._defocus_obj.forward(forward_scattered_field, self.prop_distances)
analytical_gradient = self._computeGradient(field_measure, amplitude, cache)[point]
print("numerical gradient: %5.5e + %5.5e j" %(numerical_gradient.real, numerical_gradient.imag))
print("analytical gradient: %5.5e + %5.5e j" %(analytical_gradient.real, analytical_gradient.imag))
def _forwardMeasure(self, fx_illu, fy_illu, obj = None):
"""
From an illumination angle, this function computes the exit wave.
fx_illu, fy_illu: illumination angle in x and y (scalars)
obj: object to be passed through (Optional, default pick from phase_obj_3d)
"""
if obj is None:
fields = self._scattering_obj.forward(self._x, fx_illu, fy_illu)
else:
fields = self._scattering_obj.forward(obj, fx_illu, fy_illu)
field_scattered = self._aberration_obj.forward(fields["forward_scattered_field"])
field_scattered = self._defocus_obj.forward(field_scattered, self.prop_distances)
fields["forward_scattered_field"] = field_scattered
if self._scattering_obj.back_scatter:
field_scattered = self._aberration_obj.forward(fields["back_scattered_field"])
field_scattered = self._defocus_obj.forward(field_scattered, self.prop_distances)
fields["back_scattered_field"] = field_scattered
return fields
def _computeGradient(self, field_measure, amplitude, cache):
"""
Error backpropagation to return a gradient
field_measure: exit wave computed in forward model
amplitude: amplitude measured
cache: exit wave at each layer, saved previously
"""
field_bp = field_measure - amplitude*af.exp(1.0j*af.arg(field_measure))
field_bp = self._defocus_obj.adjoint(field_bp, self.prop_distances)
field_bp = self._aberration_obj.adjoint(field_bp)
gradient = self._scattering_obj.adjoint(field_bp, cache)
return gradient["gradient"]
def _initialization(self,configs, x_init = None):
"""
Initialize algorithm
configs: configs object from class AlgorithmConfigs
x_init: initial guess of object
"""
if x_init is None:
if self.scat_model is "MultiTrans":
self._x[:, :, :] = 1.0
else:
self._x[:, :, :] = 0.0
else:
self._x[:, :, :] = x_init
def _solveFirstOrderGradient(self, configs, amplitudes, verbose):
"""
MAIN part of the solver, runs the FISTA algorithm
configs: configs object from class AlgorithmConfigs
amplitudes: all measurements
verbose: boolean variable to print verbosely
"""
flag_FISTA = False
if configs.method == "FISTA":
flag_FISTA = True
# update multiple angles at a time
batch_update = False
if configs.fista_global_update or configs.batch_size != 1:
gradient_batch = af.constant(0.0, self.phase_obj_3d.shape[0],\
self.phase_obj_3d.shape[1],\
self.phase_obj_3d.shape[2], dtype = af_complex_datatype)
batch_update = True
if configs.fista_global_update:
configs.batch_size = 0
#TODO: what if num_batch is not an integer
if configs.batch_size == 0:
num_batch = 1
else:
if self.number_rot < 2:
num_batch = self.number_illum // configs.batch_size
else:
num_batch = self.number_rot // configs.batch_size
stepsize = configs.stepsize
max_iter = configs.max_iter
reg_term = configs.reg_term
configs.error = []
obj_gpu = af.constant(0.0, self.phase_obj_3d.shape[0],\
self.phase_obj_3d.shape[1],\
self.phase_obj_3d.shape[2], dtype = af_complex_datatype)
#Initialization for FISTA update
if flag_FISTA:
restart = configs.restart
y_k = self._x.copy()
t_k = 1.0
#Start of iterative algorithm
with contexttimer.Timer() as timer:
if verbose:
print("---- Start of the %5s algorithm ----" %(self.scat_model))
for iteration in range(max_iter):
cost = 0.0
obj_gpu[:] = af.to_array(self._x)
if configs.random_order:
rot_order = np.random.permutation(range(self.number_rot))
illu_order = np.random.permutation(range(self.number_illum))
else:
rot_order = range(self.number_rot)
illu_order = range(self.number_illum)
for batch_idx in range(num_batch):
if batch_update:
gradient_batch[:,:,:] = 0.0
if configs.batch_size == 0:
rot_indices = rot_order
illu_indices = illu_order
else:
if self.number_rot < 2:
rot_indices = rot_order
illu_indices = illu_order[batch_idx * configs.batch_size : (batch_idx+1) * configs.batch_size]
else:
illu_indices = illu_order
rot_indices = rot_order[batch_idx * configs.batch_size : (batch_idx+1) * configs.batch_size]
for rot_idx in rot_indices:
# Rotate the object
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, self.rot_angles[rot_idx])
if batch_update:
self._rot_obj.rotate(gradient_batch, self.rot_angles[rot_idx])
for illu_idx in illu_indices:
#forward scattering
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
fields = self._forwardMeasure(fx_illu, fy_illu, obj = obj_gpu)
field_measure = fields["forward_scattered_field"]
cache = fields["cache"]
#calculate error
amplitude = af.to_array(amplitudes[:,:,:,illu_idx, rot_idx])
residual = af.abs(field_measure) - amplitude
cost += af.sum(residual*af.conjg(residual)).real
#calculate gradient
if batch_update:
gradient_batch[:, :, :] += self._computeGradient(field_measure, amplitude, cache)
else:
obj_gpu[:, :, :] -= stepsize * self._computeGradient(field_measure, amplitude, cache)
field_measure = None
cache = None
amplitude = None
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, -1.0*self.rot_angles[rot_idx])
if batch_update:
self._rot_obj.rotate_adj(gradient_batch, self.rot_angles[rot_idx])
if batch_update:
obj_gpu[:, :, :] -= stepsize * gradient_batch
if np.isnan(obj_gpu).sum() > 0:
stepsize *= 0.5
print("WARNING: Gradient update diverges! Resetting stepsize to %3.2f" %(stepsize))
return obj_gpu
# L2 regularizer
obj_gpu[:, :, :] -= stepsize * reg_term * obj_gpu
#record total error
configs.error.append(cost + reg_term * af.sum(obj_gpu*af.conjg(obj_gpu)).real)
if flag_FISTA:
#check convergence
if iteration > 0:
if configs.error[-1] > configs.error[-2]:
if restart:
t_k = 1.0
self._x[:, :, :] = y_k
print("WARNING: FISTA Restart! Error: %5.5f" %(np.log10(configs.error[-1])))
continue
else:
print("WARNING: Error increased! Error: %5.5f" %(np.log10(configs.error[-1])))
#FISTA auxiliary variable
y_k1 = np.array(self._regularizer_obj.applyRegularizer(obj_gpu))
#FISTA update
t_k1 = 0.5*(1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0) / t_k1
self._x[:, :, :] = y_k1 + beta * (y_k1 - y_k)
t_k = t_k1
y_k = y_k1.copy()
else:
#check convergence
self._x[:, :, :] = np.array(obj_gpu)
if iteration > 0:
if configs.error[-1] > configs.error[-2]:
print("WARNING: Error increased! Error: %5.5f" %(np.log10(configs.error[-1])))
stepsize *= 0.8
if verbose:
print("iteration: %d/%d, error: %5.5f, elapsed time: %5.2f seconds" %(iteration+1, max_iter, np.log10(configs.error[-1]), timer.elapsed))
return self._x
def solve(self, configs, amplitudes, x_init = None, verbose = True):
"""
function to solve for the tomography problem
configs: configs object from class AlgorithmConfigs
amplitudes: measurements in amplitude not INTENSITY, ordered by (x,y,illumination,defocus,rotation)
x_init: initial guess for object
verbose: boolean variable to print verbosely
"""
self._initialization(configs, x_init)
self._aberration_obj.setUpdateParams(flag_update = configs.pupil_update,\
pupil_step_size = configs.pupil_step_size,\
update_method = configs.pupil_update_method,\
global_update = configs.pupil_global_update,\
measurement_num = self.number_illum*self.number_rot)
self._regularizer_obj = Regularizer(configs, verbose)
if self.number_defocus < 2:
amplitudes = amplitudes[:,:, np.newaxis]
if self.number_illum < 2:
amplitudes = amplitudes[:,:,:, np.newaxis]
if self.number_rot < 2:
amplitudes = amplitudes[:,:,:,:, np.newaxis]
return self._algorithms[configs.method](configs, amplitudes, verbose) | [
"arrayfire.constant",
"numpy.log10",
"numpy.array",
"arrayfire.conjg",
"opticaltomography.opticsmodel.Aberration",
"contexttimer.Timer",
"arrayfire.randu",
"numpy.asarray",
"numpy.exp",
"opticaltomography.opticsutil.ImageRotation",
"numpy.abs",
"numpy.ones",
"arrayfire.arg",
"numpy.any",
... | [((3729, 3792), 'numpy.exp', 'np.exp', (['(1.0j * k0 * (self.RI_obj - self.RI) * self.pixel_size_z)'], {}), '(1.0j * k0 * (self.RI_obj - self.RI) * self.pixel_size_z)\n', (3735, 3792), True, 'import numpy as np\n'), ((5695, 5800), 'opticaltomography.opticsmodel.Aberration', 'Aberration', (['phase_obj_3d.shape[:2]', 'phase_obj_3d.pixel_size', 'self.wavelength', "kwargs['na']"], {'pad': '(False)'}), "(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size, self.wavelength,\n kwargs['na'], pad=False)\n", (5705, 5800), False, 'from opticaltomography.opticsmodel import Defocus, Aberration\n'), ((5969, 6035), 'opticaltomography.opticsmodel.Defocus', 'Defocus', (['phase_obj_3d.shape[:2]', 'phase_obj_3d.pixel_size'], {}), '(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size, **kwargs)\n', (5976, 6035), False, 'from opticaltomography.opticsmodel import Defocus, Aberration\n'), ((8742, 8762), 'arrayfire.to_array', 'af.to_array', (['self._x'], {}), '(self._x)\n', (8753, 8762), True, 'import arrayfire as af\n'), ((11756, 11797), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np_complex_datatype'}), '(shape, dtype=np_complex_datatype)\n', (11763, 11797), True, 'import numpy as np\n'), ((12587, 12642), 'opticaltomography.opticsutil.calculateNumericalGradient', 'calculateNumericalGradient', (['func', 'x', 'point'], {'delta': 'delta'}), '(func, x, point, delta=delta)\n', (12613, 12642), False, 'from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient\n'), ((17029, 17160), 'arrayfire.constant', 'af.constant', (['(0.0)', 'self.phase_obj_3d.shape[0]', 'self.phase_obj_3d.shape[1]', 'self.phase_obj_3d.shape[2]'], {'dtype': 'af_complex_datatype'}), '(0.0, self.phase_obj_3d.shape[0], self.phase_obj_3d.shape[1],\n self.phase_obj_3d.shape[2], dtype=af_complex_datatype)\n', (17040, 17160), True, 'import arrayfire as af\n'), ((24258, 24287), 'opticaltomography.regularizers.Regularizer', 'Regularizer', (['configs', 'verbose'], {}), '(configs, verbose)\n', (24269, 24287), False, 'from opticaltomography.regularizers import Regularizer\n'), ((7194, 7224), 'numpy.any', 'np.any', (['(self.rot_angles != [0])'], {}), '(self.rot_angles != [0])\n', (7200, 7224), True, 'import numpy as np\n'), ((8776, 8796), 'contexttimer.Timer', 'contexttimer.Timer', ([], {}), '()\n', (8794, 8796), False, 'import contexttimer\n'), ((11490, 11517), 'numpy.random.randint', 'np.random.randint', (['shape[0]'], {}), '(shape[0])\n', (11507, 11517), True, 'import numpy as np\n'), ((11519, 11546), 'numpy.random.randint', 'np.random.randint', (['shape[1]'], {}), '(shape[1])\n', (11536, 11546), True, 'import numpy as np\n'), ((11548, 11575), 'numpy.random.randint', 'np.random.randint', (['shape[2]'], {}), '(shape[2])\n', (11565, 11575), True, 'import numpy as np\n'), ((11858, 11921), 'arrayfire.randu', 'af.randu', (['(shape[0] // 2)', '(shape[1] // 2)'], {'dtype': 'af_float_datatype'}), '(shape[0] // 2, shape[1] // 2, dtype=af_float_datatype)\n', (11866, 11921), True, 'import arrayfire as af\n'), ((11958, 12011), 'arrayfire.randu', 'af.randu', (['shape[0]', 'shape[1]'], {'dtype': 'af_float_datatype'}), '(shape[0], shape[1], dtype=af_float_datatype)\n', (11966, 12011), True, 'import arrayfire as af\n'), ((16186, 16317), 'arrayfire.constant', 'af.constant', (['(0.0)', 'self.phase_obj_3d.shape[0]', 'self.phase_obj_3d.shape[1]', 'self.phase_obj_3d.shape[2]'], {'dtype': 'af_complex_datatype'}), '(0.0, self.phase_obj_3d.shape[0], self.phase_obj_3d.shape[1],\n self.phase_obj_3d.shape[2], dtype=af_complex_datatype)\n', (16197, 16317), True, 'import arrayfire as af\n'), ((17488, 17508), 'contexttimer.Timer', 'contexttimer.Timer', ([], {}), '()\n', (17506, 17508), False, 'import contexttimer\n'), ((2929, 2970), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np_complex_datatype'}), '(shape, dtype=np_complex_datatype)\n', (2936, 2970), True, 'import numpy as np\n'), ((3547, 3596), 'numpy.ones', 'np.ones', (['(shape[2] - 1,)'], {'dtype': 'np_float_datatype'}), '((shape[2] - 1,), dtype=np_float_datatype)\n', (3554, 3596), True, 'import numpy as np\n'), ((7261, 7387), 'opticaltomography.opticsutil.ImageRotation', 'ImageRotation', (['self.phase_obj_3d.shape'], {'axis': '(0)', 'pad': 'self.rotation_pad', 'pad_value': '(1)', 'flag_gpu_inout': '(True)', 'flag_inplace': '(True)'}), '(self.phase_obj_3d.shape, axis=0, pad=self.rotation_pad,\n pad_value=1, flag_gpu_inout=True, flag_inplace=True)\n', (7274, 7387), False, 'from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient\n'), ((7688, 7718), 'numpy.any', 'np.any', (['(self.rot_angles != [0])'], {}), '(self.rot_angles != [0])\n', (7694, 7718), True, 'import numpy as np\n'), ((8141, 8171), 'numpy.any', 'np.any', (['(self.rot_angles != [0])'], {}), '(self.rot_angles != [0])\n', (8147, 8171), True, 'import numpy as np\n'), ((12409, 12430), 'arrayfire.abs', 'af.abs', (['field_measure'], {}), '(field_measure)\n', (12415, 12430), True, 'import arrayfire as af\n'), ((17754, 17774), 'arrayfire.to_array', 'af.to_array', (['self._x'], {}), '(self._x)\n', (17765, 17774), True, 'import arrayfire as af\n'), ((3387, 3415), 'numpy.asarray', 'np.asarray', (['slice_separation'], {}), '(slice_separation)\n', (3397, 3415), True, 'import numpy as np\n'), ((7755, 7881), 'opticaltomography.opticsutil.ImageRotation', 'ImageRotation', (['self.phase_obj_3d.shape'], {'axis': '(0)', 'pad': 'self.rotation_pad', 'pad_value': '(0)', 'flag_gpu_inout': '(True)', 'flag_inplace': '(True)'}), '(self.phase_obj_3d.shape, axis=0, pad=self.rotation_pad,\n pad_value=0, flag_gpu_inout=True, flag_inplace=True)\n', (7768, 7881), False, 'from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient\n'), ((8208, 8334), 'opticaltomography.opticsutil.ImageRotation', 'ImageRotation', (['self.phase_obj_3d.shape'], {'axis': '(0)', 'pad': 'self.rotation_pad', 'pad_value': '(0)', 'flag_gpu_inout': '(True)', 'flag_inplace': '(True)'}), '(self.phase_obj_3d.shape, axis=0, pad=self.rotation_pad,\n pad_value=0, flag_gpu_inout=True, flag_inplace=True)\n', (8221, 8334), False, 'from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient\n'), ((10426, 10461), 'numpy.array', 'np.array', (['forward_scattered_predict'], {}), '(forward_scattered_predict)\n', (10434, 10461), True, 'import numpy as np\n'), ((22772, 22789), 'numpy.array', 'np.array', (['obj_gpu'], {}), '(obj_gpu)\n', (22780, 22789), True, 'import numpy as np\n'), ((10584, 10619), 'numpy.array', 'np.array', (['forward_scattered_predict'], {}), '(forward_scattered_predict)\n', (10592, 10619), True, 'import numpy as np\n'), ((10791, 10823), 'numpy.array', 'np.array', (['back_scattered_predict'], {}), '(back_scattered_predict)\n', (10799, 10823), True, 'import numpy as np\n'), ((12493, 12511), 'arrayfire.conjg', 'af.conjg', (['residual'], {}), '(residual)\n', (12501, 12511), True, 'import arrayfire as af\n'), ((14885, 14906), 'arrayfire.arg', 'af.arg', (['field_measure'], {}), '(field_measure)\n', (14891, 14906), True, 'import arrayfire as af\n'), ((9644, 9687), 'numpy.array', 'np.array', (["fields['forward_scattered_field']"], {}), "(fields['forward_scattered_field'])\n", (9652, 9687), True, 'import numpy as np\n'), ((9952, 9993), 'numpy.abs', 'np.abs', (["fields['forward_scattered_field']"], {}), "(fields['forward_scattered_field'])\n", (9958, 9993), True, 'import numpy as np\n'), ((10948, 10980), 'numpy.array', 'np.array', (['back_scattered_predict'], {}), '(back_scattered_predict)\n', (10956, 10980), True, 'import numpy as np\n'), ((19901, 19952), 'arrayfire.to_array', 'af.to_array', (['amplitudes[:, :, :, illu_idx, rot_idx]'], {}), '(amplitudes[:, :, :, illu_idx, rot_idx])\n', (19912, 19952), True, 'import arrayfire as af\n'), ((21093, 21110), 'numpy.isnan', 'np.isnan', (['obj_gpu'], {}), '(obj_gpu)\n', (21101, 21110), True, 'import numpy as np\n'), ((9818, 9858), 'numpy.array', 'np.array', (["fields['back_scattered_field']"], {}), "(fields['back_scattered_field'])\n", (9826, 9858), True, 'import numpy as np\n'), ((10124, 10162), 'numpy.abs', 'np.abs', (["fields['back_scattered_field']"], {}), "(fields['back_scattered_field'])\n", (10130, 10162), True, 'import numpy as np\n'), ((20010, 20031), 'arrayfire.abs', 'af.abs', (['field_measure'], {}), '(field_measure)\n', (20016, 20031), True, 'import arrayfire as af\n'), ((23211, 23238), 'numpy.log10', 'np.log10', (['configs.error[-1]'], {}), '(configs.error[-1])\n', (23219, 23238), True, 'import numpy as np\n'), ((22971, 22998), 'numpy.log10', 'np.log10', (['configs.error[-1]'], {}), '(configs.error[-1])\n', (22979, 22998), True, 'import numpy as np\n'), ((20120, 20138), 'arrayfire.conjg', 'af.conjg', (['residual'], {}), '(residual)\n', (20128, 20138), True, 'import arrayfire as af\n'), ((21509, 21526), 'arrayfire.conjg', 'af.conjg', (['obj_gpu'], {}), '(obj_gpu)\n', (21517, 21526), True, 'import arrayfire as af\n'), ((21954, 21981), 'numpy.log10', 'np.log10', (['configs.error[-1]'], {}), '(configs.error[-1])\n', (21962, 21981), True, 'import numpy as np\n'), ((22140, 22167), 'numpy.log10', 'np.log10', (['configs.error[-1]'], {}), '(configs.error[-1])\n', (22148, 22167), True, 'import numpy as np\n')] |
from tsp.TSPGame import TSPGame as Game
from tsp.NNetShell import NNetShell
from TSPMCTS import TSPMCTS
import numpy as np
from utils import *
args = dotdict({
'numEps': 5, # Number of complete self-play games to simulate during a new iteration.
'numMCTSSims': 20, # Number of games moves for MCTS to simulate.
'cpuct': 1,
})
if __name__ == "__main__":
game = Game(10)
nnet = NNetShell(game)
mcts = TSPMCTS(args, game, nnet)
actions = [game.start_node]
wins, losses = 0, 0
player = 1
for i in range(args.numEps):
board = game.getInitBoard()
while game.getGameEnded(board, player) == 0:
canon = game.getCanonicalForm(board, player)
ap = mcts.getActionProb(canon, temp=1)
action = np.argmax(ap)
actions.append(action)
valid_moves = game.getValidMoves(canon, player)
if valid_moves[action] == 0:
exit(1)
board, player = game.getNextState(board, player, action)
print('my', actions)
result = game.getGameEnded(board, player)
print('-----',result,'------')
actions = [game.start_node]
if game.getGameEnded(board, player) == 1:
wins += 1
else:
losses += 1
print('wins', wins)
print('losses', losses)
| [
"numpy.argmax",
"TSPMCTS.TSPMCTS",
"tsp.TSPGame.TSPGame",
"tsp.NNetShell.NNetShell"
] | [((400, 408), 'tsp.TSPGame.TSPGame', 'Game', (['(10)'], {}), '(10)\n', (404, 408), True, 'from tsp.TSPGame import TSPGame as Game\n'), ((420, 435), 'tsp.NNetShell.NNetShell', 'NNetShell', (['game'], {}), '(game)\n', (429, 435), False, 'from tsp.NNetShell import NNetShell\n'), ((447, 472), 'TSPMCTS.TSPMCTS', 'TSPMCTS', (['args', 'game', 'nnet'], {}), '(args, game, nnet)\n', (454, 472), False, 'from TSPMCTS import TSPMCTS\n'), ((795, 808), 'numpy.argmax', 'np.argmax', (['ap'], {}), '(ap)\n', (804, 808), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Run an environment with the chosen policy.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import os
import random
import socket
import uuid
from builtins import input
import numpy as np
import h5py
import _init_paths # NOQA
from robovat import envs
from robovat import policies
from robovat.io import hdf5_utils
from robovat.io.episode_generation import generate_episodes
from robovat.simulation.simulator import Simulator
from robovat.utils import time_utils
from robovat.utils.logging import logger
from robovat.utils.yaml_config import YamlConfig
def parse_args():
"""
Parse arguments.
Returns:
args: The parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--env', dest='env', type=str, help='The environment.', required=True)
parser.add_argument('--policy', dest='policy', type=str, help='The policy.', default=None)
parser.add_argument('--env_config', dest='env_config', type=str, help='The configuration file for the environment.', default=None)
parser.add_argument('--policy_config', dest='policy_config', type=str, help='The configuration file for the policy.', default=None)
parser.add_argument('--config_bindings', dest='config_bindings', type=str, help='The configuration bindings.', default=None)
parser.add_argument('--use_simulator', dest='use_simulator', type=int, help='Run experiments in the simulation is it is True.', default=1)
parser.add_argument(
'--assets',
dest='assets_dir',
type=str,
help='The assets directory.',
default='./assets')
parser.add_argument(
'--output',
dest='output_dir',
type=str,
help='The output directory to save the episode history.',
default=None)
parser.add_argument(
'--num_steps',
dest='num_steps',
type=int,
help='Maximum number of time steps for each episode.',
default=None)
parser.add_argument(
'--num_episodes',
dest='num_episodes',
type=int,
help='Maximum number of episodes.',
default=None)
parser.add_argument(
'--num_episodes_per_file',
dest='num_episodes_per_file',
type=int,
help='The maximum number of episodes saved in each file.',
default=1000)
parser.add_argument(
'--debug',
dest='debug',
type=int,
help='True for debugging, False otherwise.',
default=0)
parser.add_argument(
'--worker_id',
dest='worker_id',
type=int,
help='The worker ID for running multiple simulations in parallel.',
default=0)
parser.add_argument(
'--seed',
dest='seed',
type=int,
help='None for random; any fixed integers for deterministic.',
default=None)
parser.add_argument(
'--pause',
dest='pause',
type=bool,
help='Whether to pause between episodes.',
default=False)
parser.add_argument(
'--timeout',
dest='timeout',
type=float,
help='Seconds of timeout for an episode.',
default=120)
args = parser.parse_args()
return args
def parse_config_files_and_bindings(args):
if args.env_config is None:
env_config = None
else:
env_config = YamlConfig(args.env_config).as_easydict()
if args.policy_config is None:
policy_config = None
else:
policy_config = YamlConfig(args.policy_config).as_easydict()
if args.config_bindings is not None:
parsed_bindings = ast.literal_eval(args.config_bindings)
logger.info('Config Bindings: %r', parsed_bindings)
env_config.update(parsed_bindings)
policy_config.update(parsed_bindings)
return env_config, policy_config
def main():
args = parse_args()
# Configuration.
env_config, policy_config = parse_config_files_and_bindings(args)
# Set the random seed.
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
# Simulator.
if args.use_simulator:
simulator = Simulator(worker_id=args.worker_id,
use_visualizer=bool(args.debug),
assets_dir=args.assets_dir)
else:
simulator = None
# Environment.
env_class = getattr(envs, args.env)
env = env_class(simulator=simulator,
config=env_config,
debug=args.debug)
# Policy.
policy_class = getattr(policies, args.policy)
policy = policy_class(env=env, config=policy_config)
# Output directory.
if args.output_dir is not None:
hostname = socket.gethostname()
hostname = hostname.split('.')[0]
output_dir = os.path.abspath(args.output_dir)
output_dir = os.path.join(output_dir, hostname, '%02d' % (args.key))
if not os.path.isdir(output_dir):
logger.info('Making output directory %s...', output_dir)
os.makedirs(output_dir)
# Generate and write episodes.
logger.info('Start running...')
# env.reset()
num_episodes_this_file = 0
for episode_index, episode in generate_episodes(
env,
policy,
num_steps=args.num_steps,
num_episodes=args.num_episodes,
timeout=args.timeout,
debug=args.debug):
if args.output_dir:
# Create a file for saving the episode data.
if num_episodes_this_file == 0:
timestamp = time_utils.get_timestamp_as_string()
filename = 'episodes_%s.hdf5' % (timestamp)
output_path = os.path.join(output_dir, filename)
logger.info('Created a new file %s...', output_path)
# Append the episode to the file.
logger.info('Saving episode %d to file %s (%d / %d)...',
episode_index,
output_path,
num_episodes_this_file,
args.num_episodes_per_file)
with h5py.File(output_path, 'a') as fout:
name = str(uuid.uuid4())
group = fout.create_group(name)
hdf5_utils.write_data_to_hdf5(group, episode)
num_episodes_this_file += 1
num_episodes_this_file %= args.num_episodes_per_file
if args.pause:
input('Press [Enter] to start a new episode.')
if __name__ == '__main__':
main()
| [
"robovat.io.hdf5_utils.write_data_to_hdf5",
"builtins.input",
"robovat.utils.time_utils.get_timestamp_as_string",
"argparse.ArgumentParser",
"os.makedirs",
"robovat.utils.yaml_config.YamlConfig",
"os.path.join",
"random.seed",
"h5py.File",
"ast.literal_eval",
"robovat.utils.logging.logger.info",... | [((794, 819), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (817, 819), False, 'import argparse\n'), ((5216, 5247), 'robovat.utils.logging.logger.info', 'logger.info', (['"""Start running..."""'], {}), "('Start running...')\n", (5227, 5247), False, 'from robovat.utils.logging import logger\n'), ((5331, 5464), 'robovat.io.episode_generation.generate_episodes', 'generate_episodes', (['env', 'policy'], {'num_steps': 'args.num_steps', 'num_episodes': 'args.num_episodes', 'timeout': 'args.timeout', 'debug': 'args.debug'}), '(env, policy, num_steps=args.num_steps, num_episodes=args.\n num_episodes, timeout=args.timeout, debug=args.debug)\n', (5348, 5464), False, 'from robovat.io.episode_generation import generate_episodes\n'), ((3719, 3757), 'ast.literal_eval', 'ast.literal_eval', (['args.config_bindings'], {}), '(args.config_bindings)\n', (3735, 3757), False, 'import ast\n'), ((3766, 3817), 'robovat.utils.logging.logger.info', 'logger.info', (['"""Config Bindings: %r"""', 'parsed_bindings'], {}), "('Config Bindings: %r', parsed_bindings)\n", (3777, 3817), False, 'from robovat.utils.logging import logger\n'), ((4141, 4163), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (4152, 4163), False, 'import random\n'), ((4172, 4197), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4186, 4197), True, 'import numpy as np\n'), ((4835, 4855), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4853, 4855), False, 'import socket\n'), ((4919, 4951), 'os.path.abspath', 'os.path.abspath', (['args.output_dir'], {}), '(args.output_dir)\n', (4934, 4951), False, 'import os\n'), ((4973, 5026), 'os.path.join', 'os.path.join', (['output_dir', 'hostname', "('%02d' % args.key)"], {}), "(output_dir, hostname, '%02d' % args.key)\n", (4985, 5026), False, 'import os\n'), ((5044, 5069), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (5057, 5069), False, 'import os\n'), ((5083, 5139), 'robovat.utils.logging.logger.info', 'logger.info', (['"""Making output directory %s..."""', 'output_dir'], {}), "('Making output directory %s...', output_dir)\n", (5094, 5139), False, 'from robovat.utils.logging import logger\n'), ((5152, 5175), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (5163, 5175), False, 'import os\n'), ((5982, 6122), 'robovat.utils.logging.logger.info', 'logger.info', (['"""Saving episode %d to file %s (%d / %d)..."""', 'episode_index', 'output_path', 'num_episodes_this_file', 'args.num_episodes_per_file'], {}), "('Saving episode %d to file %s (%d / %d)...', episode_index,\n output_path, num_episodes_this_file, args.num_episodes_per_file)\n", (5993, 6122), False, 'from robovat.utils.logging import logger\n'), ((6555, 6601), 'builtins.input', 'input', (['"""Press [Enter] to start a new episode."""'], {}), "('Press [Enter] to start a new episode.')\n", (6560, 6601), False, 'from builtins import input\n'), ((3465, 3492), 'robovat.utils.yaml_config.YamlConfig', 'YamlConfig', (['args.env_config'], {}), '(args.env_config)\n', (3475, 3492), False, 'from robovat.utils.yaml_config import YamlConfig\n'), ((3606, 3636), 'robovat.utils.yaml_config.YamlConfig', 'YamlConfig', (['args.policy_config'], {}), '(args.policy_config)\n', (3616, 3636), False, 'from robovat.utils.yaml_config import YamlConfig\n'), ((5692, 5728), 'robovat.utils.time_utils.get_timestamp_as_string', 'time_utils.get_timestamp_as_string', ([], {}), '()\n', (5726, 5728), False, 'from robovat.utils import time_utils\n'), ((5819, 5853), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (5831, 5853), False, 'import os\n'), ((5870, 5922), 'robovat.utils.logging.logger.info', 'logger.info', (['"""Created a new file %s..."""', 'output_path'], {}), "('Created a new file %s...', output_path)\n", (5881, 5922), False, 'from robovat.utils.logging import logger\n'), ((6233, 6260), 'h5py.File', 'h5py.File', (['output_path', '"""a"""'], {}), "(output_path, 'a')\n", (6242, 6260), False, 'import h5py\n'), ((6375, 6420), 'robovat.io.hdf5_utils.write_data_to_hdf5', 'hdf5_utils.write_data_to_hdf5', (['group', 'episode'], {}), '(group, episode)\n', (6404, 6420), False, 'from robovat.io import hdf5_utils\n'), ((6297, 6309), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6307, 6309), False, 'import uuid\n')] |
import numpy as np
import yaml
import os, sys
import copy
from functools import reduce
import random
from timeloop_env import TimeloopEnv
from multiprocessing.pool import Pool
from multiprocessing import cpu_count
import shutil
from functools import cmp_to_key, partial
class GammaTimeloopEnv(object):
def __init__(self, num_pes=256, l2_size=10800, l1_size=512, fitness_obj=['latency'], report_dir='./report', use_pool=True):
self.fitness_obj = fitness_obj
self.num_pes = num_pes
self.l1_size = l1_size
self.l2_size = l2_size
self.loc_to_dim_note = {0: 'K', 1: 'C', 2: 'Y', 3: 'X', 4: 'R', 5: 'S'}
self.dim_note = ['K', 'C', 'Y', 'X', 'R', 'S']
self.len_dimension = len(self.dim_note)
self.timeloop_configfile_path = './out_config'
self.report_dir = report_dir
self.timeloop_env = TimeloopEnv(config_path=self.timeloop_configfile_path)
self.use_pool = use_pool
def set_dimension(self, dimension):
self.dimension = dimension
self.dimension_dict = self.get_dimension_dict(dimension)
self.dimension_factor = self.get_dimension_factors(self.dimension_dict)
def get_dimension_dict(self, dim_value):
return {note: value for note, value in zip(self.dim_note, dim_value)}
def get_factors(self, n):
return list(reduce(list.__add__,
([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)))
def get_dimension_factors(self, dimension_dict):
dimension_factors = dict()
for key, value in dimension_dict.items():
factors = self.get_factors(value)
dimension_factors[key] = factors
return dimension_factors
def mutate_tiles(self, pops, parents, alpha=0.5, num_mu_loc=1):
len_parents = len(parents)
for i in range(len(pops)):
if random.random() < alpha:
sel_parent = random.randint(0, len_parents - 1)
indv = copy.deepcopy(parents[sel_parent])
l2_tile_size = indv['l2_tile_size']
l1_tile_size = indv['l1_tile_size']
for _ in range(num_mu_loc):
pick_loc = random.randint(0, self.len_dimension - 1)
pick_dim = self.loc_to_dim_note[pick_loc]
dim_value = self.dimension_dict[pick_dim]
factors = self.dimension_factor[pick_dim]
pick_factor_l2 = np.random.choice(factors)
pick_factor_l1 = np.random.choice(self.get_factors(dim_value//pick_factor_l2))
l2_tile_size[pick_loc] = pick_factor_l2
l1_tile_size[pick_loc] = pick_factor_l1
pops[i] = indv
return pops
def mutate_par(self, pops, parents, alpha=0.5):
len_parents = len(parents)
for i in range(len(pops)):
if random.random() < alpha:
sel_parent = random.randint(0, len_parents - 1)
indv = copy.deepcopy(parents[sel_parent])
pick_loc = random.randint(0, 1)
par_dims = indv['par_dims']
par_dims[pick_loc] = np.random.choice(self.dim_note)
pops[i] = indv
return pops
def mutate_order(self, pops, parents, alpha=0.5):
len_parents = len(parents)
for i in range(len(pops)):
if random.random() < alpha:
sel_parent = random.randint(0, len_parents - 1)
indv = copy.deepcopy(parents[sel_parent])
if random.random()<0.5:
pick = 'l2_loop_order'
else:
pick = 'l1_loop_order'
loop_order = indv[pick]
loop_order = list(loop_order)
idxs = random.sample(set(np.arange(0, self.len_dimension)), 2)
loop_order[idxs[0]], loop_order[idxs[1]] = loop_order[idxs[1]], loop_order[idxs[0]]
indv[pick] = ''.join(loop_order)
pops[i] = indv
return pops
def init_indv(self):
indv = {'l2_tile_size': [1]*6,
'l1_tile_size': [1]*6,
'l2_loop_order': 'KCYXRS',
'l1_loop_order': 'KCYXRS',
'par_dims': ['K', 'C']}
return indv
def init_pops(self, num_pops):
# return [self.init_indv() for _ in range(num_pops)], np.random.randint(0, 100, (num_pops, len(self.fitness_obj)))
return [self.init_indv() for _ in range(num_pops)], np.ones((num_pops, len(self.fitness_obj))) * np.NINF
def sort_rank_func(self, cand1, cand2, delta=1e-2):
def helper(item1, item2, is_last=False):
margin = abs((item1+item2) /2 * delta) if not is_last else 0
if margin == float('Inf'):
margin = 0
if item1 >= item2 + margin:
return 1
elif item1 +margin < item2:
return -1
else:
return 0
fitness_len = len(cand1) - 1
for i in range(fitness_len):
ret = helper(cand1[i], cand2[i], is_last=(i==fitness_len-1))
if ret != 0:
return ret
def select_parents(self, pops, fitness, num_parents, num_elites, num_pops, use_soft_margin=True):
fitness_list = [tuple(list(ar)+[-i]) for i, ar in enumerate(fitness)]
if not use_soft_margin:
sort_rank_func = partial(self.sort_rank_func, delta=0)
else:
sort_rank_func = self.sort_rank_func
fitness_list = sorted(fitness_list, key=cmp_to_key(sort_rank_func), reverse=True)
idx = [int(-ar[-1]) for ar in fitness_list]
new_pop = [pops[i] for i in idx][:num_pops]
new_fitness = fitness[idx][:num_pops]
parents = copy.deepcopy(new_pop[:num_parents])
elites = copy.deepcopy(new_pop[:num_elites])
elites_fitness = copy.deepcopy(new_fitness[:num_elites])
return new_pop, new_fitness, parents, elites, elites_fitness
def thread_fun(self, indv, pool_idx=0):
self.timeloop_env.create_timeloop_config(self.dimension, self.l2_size, self.l1_size, self.num_pes,
indv['l2_tile_size'], indv['l1_tile_size'], indv['l2_loop_order'],
indv['l1_loop_order'], indv['par_dims'], pool_idx=pool_idx)
fit = self.timeloop_env.run_timeloop(pool_idx=pool_idx, fitness_obj=self.fitness_obj)
return fit
def evaluate(self, pops, fitness, pool=None):
if not pool:
for i, indv in enumerate(pops):
fitness[i] = self.thread_fun(indv)
else:
rets = pool.starmap(self.thread_fun, zip(pops, np.arange(len(pops))))
fitness = np.array(rets)
return fitness
def create_timeloop_report(self, indv, dir_path='./report'):
fitness = self.thread_fun(indv, pool_idx=0)
os.makedirs(dir_path, exist_ok=True)
os.system(f'cp -d -r {os.path.join(self.timeloop_configfile_path, "pool-0")}/* {dir_path}')
with open(os.path.join(dir_path,'Gamma-Timeloop.txt'), 'w') as fd:
fd.write(f'Achieved Fitness: {fitness}')
fd.write(f'GammaTimeloop-style Sol: {self.get_genome(indv)}')
fd.write(f'Gamma-style Sol: {self.get_maestro_style_genome(indv)}')
def run(self, dimension, num_pops=100, num_gens=100, elite_ratio=0.05, parents_ratio=0.4):
self.set_dimension(dimension)
num_parents = int(num_pops*parents_ratio)
num_elites = max(1, int(num_pops*elite_ratio))
pops, fitness = self.init_pops(num_pops)
if self.use_pool:
pool = Pool(num_pops)
self.timeloop_env.create_pool_env(num_pops)
else:
pool = None
for g in range(num_gens):
if g == 0:
pops, fitness, parents, elites, elites_fitness = self.select_parents(pops, fitness, num_parents, num_elites, num_pops)
if g == 0:
alpha = 1
else:
alpha = 0.5
pops = self.mutate_par(pops, parents, alpha=alpha)
pops = self.mutate_order(pops, parents, alpha=alpha)
pops = self.mutate_tiles(pops, parents, alpha=alpha)
fitness = self.evaluate(pops, fitness, pool)
pops = elites + pops
fitness = np.concatenate((elites_fitness, fitness), axis=0)
pops, fitness, parents, elites, elites_fitness = self.select_parents(pops, fitness, num_parents, num_elites, num_pops)
best_idx = 0
best_sol = pops[best_idx]
print(f'[Gen{g}] fitness: {fitness[best_idx]} Sol: {self.get_genome(best_sol)}')
print(f'Achieved Fitness: {fitness[best_idx]}')
print(f'GammaTimeloop-style Sol: {self.get_genome(best_sol)}')
print(f'Gamma-style Sol: {self.get_maestro_style_genome(best_sol)}')
self.create_timeloop_report(best_sol, dir_path=self.report_dir)
self.clean_timeloop_output_files()
def get_genome(self, indv):
l2_tile_size, l1_tile_size = indv['l2_tile_size'], indv['l1_tile_size']
l2_loop_order, l1_loop_order = indv['l2_loop_order'],indv['l1_loop_order']
l2_par, l1_par = indv['par_dims']
l2_tile_dict = self.get_dimension_dict(l2_tile_size)
l1_tile_dict = self.get_dimension_dict(l1_tile_size)
genome_l2 = [[l2_par, self.num_pes]] + [[d, l2_tile_dict[d]] for d in l2_loop_order]
genome_l1 = [[l1_par, 1]] + [[d, l1_tile_dict[d]] for d in l1_loop_order]
genome = genome_l2 + genome_l1
return genome
def get_maestro_style_genome(self, indv):
l2_tile_size, l1_tile_size = indv['l2_tile_size'], indv['l1_tile_size']
l2_tile_size = [l2 * l1 for l2, l1 in zip(l2_tile_size, l1_tile_size)]
l2_loop_order, l1_loop_order = indv['l2_loop_order'],indv['l1_loop_order']
l2_par, l1_par = indv['par_dims']
l2_tile_dict = self.get_dimension_dict(l2_tile_size)
l1_tile_dict = self.get_dimension_dict(l1_tile_size)
l1_cluster_size = l1_tile_dict[l1_par]
l1_tile_dict[l1_par] = 1
l2_cluster_size = self.num_pes // l1_cluster_size
l2_tile_dict[l2_par] = max(1, l2_tile_dict[l2_par] // l2_cluster_size)
genome_l2 = [[l2_par, self.num_pes]] + [[d, l2_tile_dict[d]] for d in l2_loop_order]
genome_l1 = [[l1_par, l1_cluster_size]] + [[d, l1_tile_dict[d]] for d in l1_loop_order]
genome = genome_l2 + genome_l1
return genome
def clean_timeloop_output_files(self):
# out_prefix = "./timeloop-model."
# output_file_names = []
# output_file_names.append(out_prefix + "accelergy.log")
# output_file_names.append(out_prefix + ".log")
# output_file_names.append(out_prefix + "ART.yaml")
# output_file_names.append(out_prefix + "ART_summary.yaml")
# output_file_names.append(out_prefix + "ERT.yaml")
# output_file_names.append(out_prefix + "ERT_summary.yaml")
# output_file_names.append(out_prefix + "flattened_architecture.yaml")
# output_file_names.append(out_prefix + "map+stats.xml")
# output_file_names.append(out_prefix + "map.txt")
# output_file_names.append(out_prefix + "stats.txt")
# for f in output_file_names:
# if os.path.exists(f):
# os.remove(f)
shutil.rmtree(self.timeloop_configfile_path)
| [
"functools.cmp_to_key",
"copy.deepcopy",
"os.makedirs",
"multiprocessing.pool.Pool",
"numpy.random.choice",
"os.path.join",
"numpy.array",
"timeloop_env.TimeloopEnv",
"functools.partial",
"numpy.concatenate",
"shutil.rmtree",
"random.random",
"random.randint",
"numpy.arange"
] | [((867, 921), 'timeloop_env.TimeloopEnv', 'TimeloopEnv', ([], {'config_path': 'self.timeloop_configfile_path'}), '(config_path=self.timeloop_configfile_path)\n', (878, 921), False, 'from timeloop_env import TimeloopEnv\n'), ((5814, 5850), 'copy.deepcopy', 'copy.deepcopy', (['new_pop[:num_parents]'], {}), '(new_pop[:num_parents])\n', (5827, 5850), False, 'import copy\n'), ((5868, 5903), 'copy.deepcopy', 'copy.deepcopy', (['new_pop[:num_elites]'], {}), '(new_pop[:num_elites])\n', (5881, 5903), False, 'import copy\n'), ((5929, 5968), 'copy.deepcopy', 'copy.deepcopy', (['new_fitness[:num_elites]'], {}), '(new_fitness[:num_elites])\n', (5942, 5968), False, 'import copy\n'), ((6977, 7013), 'os.makedirs', 'os.makedirs', (['dir_path'], {'exist_ok': '(True)'}), '(dir_path, exist_ok=True)\n', (6988, 7013), False, 'import os, sys\n'), ((11481, 11525), 'shutil.rmtree', 'shutil.rmtree', (['self.timeloop_configfile_path'], {}), '(self.timeloop_configfile_path)\n', (11494, 11525), False, 'import shutil\n'), ((5455, 5492), 'functools.partial', 'partial', (['self.sort_rank_func'], {'delta': '(0)'}), '(self.sort_rank_func, delta=0)\n', (5462, 5492), False, 'from functools import cmp_to_key, partial\n'), ((6813, 6827), 'numpy.array', 'np.array', (['rets'], {}), '(rets)\n', (6821, 6827), True, 'import numpy as np\n'), ((7729, 7743), 'multiprocessing.pool.Pool', 'Pool', (['num_pops'], {}), '(num_pops)\n', (7733, 7743), False, 'from multiprocessing.pool import Pool\n'), ((8431, 8480), 'numpy.concatenate', 'np.concatenate', (['(elites_fitness, fitness)'], {'axis': '(0)'}), '((elites_fitness, fitness), axis=0)\n', (8445, 8480), True, 'import numpy as np\n'), ((1885, 1900), 'random.random', 'random.random', ([], {}), '()\n', (1898, 1900), False, 'import random\n'), ((1939, 1973), 'random.randint', 'random.randint', (['(0)', '(len_parents - 1)'], {}), '(0, len_parents - 1)\n', (1953, 1973), False, 'import random\n'), ((1997, 2031), 'copy.deepcopy', 'copy.deepcopy', (['parents[sel_parent]'], {}), '(parents[sel_parent])\n', (2010, 2031), False, 'import copy\n'), ((2911, 2926), 'random.random', 'random.random', ([], {}), '()\n', (2924, 2926), False, 'import random\n'), ((2965, 2999), 'random.randint', 'random.randint', (['(0)', '(len_parents - 1)'], {}), '(0, len_parents - 1)\n', (2979, 2999), False, 'import random\n'), ((3023, 3057), 'copy.deepcopy', 'copy.deepcopy', (['parents[sel_parent]'], {}), '(parents[sel_parent])\n', (3036, 3057), False, 'import copy\n'), ((3085, 3105), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (3099, 3105), False, 'import random\n'), ((3187, 3218), 'numpy.random.choice', 'np.random.choice', (['self.dim_note'], {}), '(self.dim_note)\n', (3203, 3218), True, 'import numpy as np\n'), ((3410, 3425), 'random.random', 'random.random', ([], {}), '()\n', (3423, 3425), False, 'import random\n'), ((3464, 3498), 'random.randint', 'random.randint', (['(0)', '(len_parents - 1)'], {}), '(0, len_parents - 1)\n', (3478, 3498), False, 'import random\n'), ((3522, 3556), 'copy.deepcopy', 'copy.deepcopy', (['parents[sel_parent]'], {}), '(parents[sel_parent])\n', (3535, 3556), False, 'import copy\n'), ((5604, 5630), 'functools.cmp_to_key', 'cmp_to_key', (['sort_rank_func'], {}), '(sort_rank_func)\n', (5614, 5630), False, 'from functools import cmp_to_key, partial\n'), ((7132, 7176), 'os.path.join', 'os.path.join', (['dir_path', '"""Gamma-Timeloop.txt"""'], {}), "(dir_path, 'Gamma-Timeloop.txt')\n", (7144, 7176), False, 'import os, sys\n'), ((2211, 2252), 'random.randint', 'random.randint', (['(0)', '(self.len_dimension - 1)'], {}), '(0, self.len_dimension - 1)\n', (2225, 2252), False, 'import random\n'), ((2477, 2502), 'numpy.random.choice', 'np.random.choice', (['factors'], {}), '(factors)\n', (2493, 2502), True, 'import numpy as np\n'), ((3576, 3591), 'random.random', 'random.random', ([], {}), '()\n', (3589, 3591), False, 'import random\n'), ((7044, 7097), 'os.path.join', 'os.path.join', (['self.timeloop_configfile_path', '"""pool-0"""'], {}), "(self.timeloop_configfile_path, 'pool-0')\n", (7056, 7097), False, 'import os, sys\n'), ((3832, 3864), 'numpy.arange', 'np.arange', (['(0)', 'self.len_dimension'], {}), '(0, self.len_dimension)\n', (3841, 3864), True, 'import numpy as np\n')] |
from impedance.preprocessing import readFile, readGamry
import numpy as np
# store some global test data
frequencies = np.array([0.0031623, 0.0039811, 0.0050119, 0.0063096,
0.0079433, 0.01, 0.012589, 0.015849, 0.019953,
0.025119, 0.031623, 0.039811, 0.050119, 0.063096,
0.079433, 0.1, 0.12589, 0.15849, 0.19953, 0.25119,
0.31623, 0.39811, 0.50119, 0.63096, 0.79433, 1.0,
1.2589, 1.5849, 1.9953, 2.5119, 3.1623, 3.9811,
5.0119, 6.3096, 7.9433, 10.0, 12.589, 15.849,
19.953, 25.119, 31.623, 39.811, 50.119, 63.096,
79.433, 100.0, 125.89, 158.49, 199.53, 251.19,
316.23, 398.11, 501.19, 630.96, 794.33, 1000.0,
1258.9, 1584.9, 1995.3, 2511.9, 3162.3, 3981.1,
5011.9, 6309.6, 7943.3, 10000.0])
real = [0.0494998977640506, 0.04776559257398882, 0.04613581757142157,
0.044596836301773274, 0.043142303861239205, 0.04181678802099761,
0.040606740929859095, 0.03951932383144551, 0.03856629404354767,
0.03773445890891119, 0.037013908851197805, 0.03639992299924442,
0.035883179203561086, 0.03544780816944048, 0.03506693121253139,
0.034721707243418394, 0.03440366308110683, 0.03410783765793668,
0.033821109172144787, 0.03353616639243526, 0.0332524554516705,
0.03295910966053001, 0.03265642728210896, 0.03232796211965539,
0.03197349380289498, 0.03158436174556338, 0.031069936132208306,
0.030461419854177326, 0.029900714166654168, 0.029379111339927506,
0.028614488514401064, 0.027877380810968015, 0.027051941695755265,
0.02622642987302172, 0.02539677675995668, 0.024674033206038913,
0.023984220630662276, 0.023376189861574193, 0.022795788586331325,
0.022290491192888506, 0.02183347892172112, 0.021423948245372654,
0.021044983846558948, 0.02061274834162727, 0.02020959510042839,
0.019760492004316906, 0.019397188854563818, 0.01898347057349932,
0.018562859805406066, 0.018173948838613962, 0.017777098024495532,
0.017382944047369668, 0.017027408256891644, 0.016664493440403796,
0.016338702344109557, 0.0160611742499297, 0.01580888106340524,
0.015584763288620133, 0.015355525008021014, 0.0151995284094296,
0.015171093447136087, 0.0151260119032158, 0.015086882844244285,
0.015276246310902308, 0.015467639396989145, 0.015771482660485933]
imag = [-0.020438698544418925, -0.0182856893045487, -0.016343158966700824,
-0.014589168660649915, -0.01300096361736358, -0.011573009182824043,
-0.010282133623145187, -0.009113366697002839, -0.00804494958277692,
-0.007075702921918925, -0.006209940124316647, -0.005450664199993216,
-0.004804611324614652, -0.0042630212172992624, -0.003816723014957778,
-0.003465230467686932, -0.0031936182833490197, -0.0029843274850640607,
-0.0028420187384119175, -0.0027510821389620833, -0.0027092774650327093,
-0.002716402585530142, -0.0027688021541761596, -0.0028687505233332576,
-0.002995332546857452, -0.0031633863009665544, -0.0034345232421858604,
-0.003652697342055591, -0.00389594513544332, -0.0041496368125138496,
-0.0043563647278047945, -0.004528514961203703, -0.004623972802104744,
-0.00463483440841946, -0.004562544489738368, -0.0044183840649258165,
-0.004213943600562558, -0.00397620055979716, -0.0037290248504921668,
-0.0035578892246933775, -0.0033509582749051627, -0.0031826464281827804,
-0.0030507184111723995, -0.0029386920239828154, -0.002848034411523496,
-0.0027583877127425357, -0.0026767011351060705, -0.002575856490231119,
-0.002455805016755156, -0.0023163152672671405, -0.002149498808757098,
-0.0019492643145405137, -0.0017151675874650793, -0.0014357936694323731,
-0.001109438368794195, -0.0007287022309982213, -0.0002827724289657194,
0.00024224721030238663, 0.0008560734952241664, 0.0015811469785105114,
0.002452846099159856, 0.003488131035300228, 0.004712940823286973,
0.006239444322658155, 0.008031686651315248, 0.010157474564938236]
Z_correct = np.array(real) + 1j*np.array(imag)
f_gamry = np.array([2.000156e+05, 1.589531e+05, 1.262344e+05, 1.002656e+05,
7.964063e+04, 6.332812e+04, 5.029688e+04, 3.998437e+04,
3.173438e+04, 2.526563e+04, 2.001562e+04, 1.589062e+04,
1.270313e+04, 1.007813e+04, 8.015625e+03, 6.328125e+03,
5.009191e+03, 3.998162e+03, 3.170956e+03, 2.527573e+03,
2.015625e+03, 1.577524e+03, 1.265625e+03, 9.982640e+02,
7.968750e+02, 6.277902e+02, 5.055147e+02, 3.979953e+02,
3.155048e+02, 2.524038e+02, 1.986229e+02, 1.583615e+02,
1.255580e+02, 1.004464e+02, 7.990057e+01, 6.334460e+01,
4.986702e+01, 3.972458e+01, 3.167230e+01, 2.493351e+01,
1.986229e+01, 1.583615e+01, 1.240079e+01, 9.931140e+00,
7.944915e+00, 6.317385e+00, 5.008013e+00, 4.020154e+00,
3.158693e+00, 2.504006e+00, 1.998082e+00, 1.584686e+00,
1.266892e+00, 9.990410e-01, 7.923428e-01, 6.334460e-01,
5.040323e-01, 4.006410e-01, 3.188775e-01, 2.520161e-01,
2.003205e-01, 1.588983e-01, 1.263477e-01, 1.003747e-01,
7.971940e-02, 6.325910e-02, 5.024120e-02, 3.992760e-02,
3.171520e-02, 2.518810e-02, 2.000640e-02, 1.588980e-02])
Zr_gamry = np.array([825.8584, 1100.361, 1401.721, 1739.625, 2087.403,
2422.298, 2720.257, 2982.016, 3212.336, 3359.629,
3499.298, 3598.306, 3688.117, 3766.628, 3808.92,
3842.264, 3902.565, 3927.298, 3944.01, 3987.966,
3998.507, 4029.045, 4044.939, 4077.349, 4068.979,
4072.986, 4078.837, 4107.241, 4130.96, 4143.088,
4164.664, 4183.986, 4206.823, 4225.685, 4230.309,
4228.707, 4242.562, 4250.716, 4219.722, 4208.409,
4203.486, 4213.595, 4241.487, 4258.891, 4295.819,
4297.472, 4313.771, 4361.165, 4408.525, 4430.184,
4495.299, 4571.314, 4632.138, 4753.051, 4889.047,
5038.293, 5218.515, 5444.164, 5926.305, 6461.792,
7343.626, 7986.202, 8435.127, 8973.929, 10123.93,
10823.63, 11628.01, 12514.52, 13482.75, 14713.85,
15701.81, 17007.49])
Zi_gamry = np.array([-1367.239, -1502.195, -1621.813, -1672.93, -1668.395,
-1620.144, -1506.859, -1407.856, -1266.296, -1091.802,
-947.1432, -813.0331, -704.9116, -606.206, -516.6904,
-439.2898, -382.0586, -327.8677, -279.8773, -247.0336,
-214.7129, -187.785, -163.6504, -146.0875, -128.724,
-114.1715, -105.0351, -95.6213, -88.8763, -82.36904,
-77.56557, -73.49171, -70.95162, -68.13791, -69.87909,
-68.70265, -73.79854, -78.35508, -81.94554, -94.86475,
-104.9407, -122.1189, -142.8908, -164.5738, -195.5508,
-228.205, -275.2808, -321.6813, -387.5287, -470.5144,
-558.4168, -655.9384, -773.8778, -913.8754, -1069.582,
-1243.894, -1440.5, -1644.846, -1891.697, -2170.397,
-2427.713, -2737.648, -3059.258, -3423.424, -3800.406,
-4165.968, -4477.789, -4931.03, -5301.367, -5703.416,
-6161.72, -6635.557])
Z_gamry = Zr_gamry + 1j*Zi_gamry
def test_readFile():
f, Z = readFile('./data/exampleData.csv')
assert (f == frequencies).all() and (Z == Z_correct).all()
def test_readGamry():
f, Z = readGamry('./data/Chalco-in-buffer-50mV.DTA')
assert (f == f_gamry).all() and (Z == Z_gamry).all()
| [
"numpy.array",
"impedance.preprocessing.readFile",
"impedance.preprocessing.readGamry"
] | [((120, 723), 'numpy.array', 'np.array', (['[0.0031623, 0.0039811, 0.0050119, 0.0063096, 0.0079433, 0.01, 0.012589, \n 0.015849, 0.019953, 0.025119, 0.031623, 0.039811, 0.050119, 0.063096, \n 0.079433, 0.1, 0.12589, 0.15849, 0.19953, 0.25119, 0.31623, 0.39811, \n 0.50119, 0.63096, 0.79433, 1.0, 1.2589, 1.5849, 1.9953, 2.5119, 3.1623,\n 3.9811, 5.0119, 6.3096, 7.9433, 10.0, 12.589, 15.849, 19.953, 25.119, \n 31.623, 39.811, 50.119, 63.096, 79.433, 100.0, 125.89, 158.49, 199.53, \n 251.19, 316.23, 398.11, 501.19, 630.96, 794.33, 1000.0, 1258.9, 1584.9,\n 1995.3, 2511.9, 3162.3, 3981.1, 5011.9, 6309.6, 7943.3, 10000.0]'], {}), '([0.0031623, 0.0039811, 0.0050119, 0.0063096, 0.0079433, 0.01, \n 0.012589, 0.015849, 0.019953, 0.025119, 0.031623, 0.039811, 0.050119, \n 0.063096, 0.079433, 0.1, 0.12589, 0.15849, 0.19953, 0.25119, 0.31623, \n 0.39811, 0.50119, 0.63096, 0.79433, 1.0, 1.2589, 1.5849, 1.9953, 2.5119,\n 3.1623, 3.9811, 5.0119, 6.3096, 7.9433, 10.0, 12.589, 15.849, 19.953, \n 25.119, 31.623, 39.811, 50.119, 63.096, 79.433, 100.0, 125.89, 158.49, \n 199.53, 251.19, 316.23, 398.11, 501.19, 630.96, 794.33, 1000.0, 1258.9,\n 1584.9, 1995.3, 2511.9, 3162.3, 3981.1, 5011.9, 6309.6, 7943.3, 10000.0])\n', (128, 723), True, 'import numpy as np\n'), ((4319, 5109), 'numpy.array', 'np.array', (['[200015.6, 158953.1, 126234.4, 100265.6, 79640.63, 63328.12, 50296.88, \n 39984.37, 31734.38, 25265.63, 20015.62, 15890.62, 12703.13, 10078.13, \n 8015.625, 6328.125, 5009.191, 3998.162, 3170.956, 2527.573, 2015.625, \n 1577.524, 1265.625, 998.264, 796.875, 627.7902, 505.5147, 397.9953, \n 315.5048, 252.4038, 198.6229, 158.3615, 125.558, 100.4464, 79.90057, \n 63.3446, 49.86702, 39.72458, 31.6723, 24.93351, 19.86229, 15.83615, \n 12.40079, 9.93114, 7.944915, 6.317385, 5.008013, 4.020154, 3.158693, \n 2.504006, 1.998082, 1.584686, 1.266892, 0.999041, 0.7923428, 0.633446, \n 0.5040323, 0.400641, 0.3188775, 0.2520161, 0.2003205, 0.1588983, \n 0.1263477, 0.1003747, 0.0797194, 0.0632591, 0.0502412, 0.0399276, \n 0.0317152, 0.0251881, 0.0200064, 0.0158898]'], {}), '([200015.6, 158953.1, 126234.4, 100265.6, 79640.63, 63328.12, \n 50296.88, 39984.37, 31734.38, 25265.63, 20015.62, 15890.62, 12703.13, \n 10078.13, 8015.625, 6328.125, 5009.191, 3998.162, 3170.956, 2527.573, \n 2015.625, 1577.524, 1265.625, 998.264, 796.875, 627.7902, 505.5147, \n 397.9953, 315.5048, 252.4038, 198.6229, 158.3615, 125.558, 100.4464, \n 79.90057, 63.3446, 49.86702, 39.72458, 31.6723, 24.93351, 19.86229, \n 15.83615, 12.40079, 9.93114, 7.944915, 6.317385, 5.008013, 4.020154, \n 3.158693, 2.504006, 1.998082, 1.584686, 1.266892, 0.999041, 0.7923428, \n 0.633446, 0.5040323, 0.400641, 0.3188775, 0.2520161, 0.2003205, \n 0.1588983, 0.1263477, 0.1003747, 0.0797194, 0.0632591, 0.0502412, \n 0.0399276, 0.0317152, 0.0251881, 0.0200064, 0.0158898])\n', (4327, 5109), True, 'import numpy as np\n'), ((5690, 6467), 'numpy.array', 'np.array', (['[825.8584, 1100.361, 1401.721, 1739.625, 2087.403, 2422.298, 2720.257, \n 2982.016, 3212.336, 3359.629, 3499.298, 3598.306, 3688.117, 3766.628, \n 3808.92, 3842.264, 3902.565, 3927.298, 3944.01, 3987.966, 3998.507, \n 4029.045, 4044.939, 4077.349, 4068.979, 4072.986, 4078.837, 4107.241, \n 4130.96, 4143.088, 4164.664, 4183.986, 4206.823, 4225.685, 4230.309, \n 4228.707, 4242.562, 4250.716, 4219.722, 4208.409, 4203.486, 4213.595, \n 4241.487, 4258.891, 4295.819, 4297.472, 4313.771, 4361.165, 4408.525, \n 4430.184, 4495.299, 4571.314, 4632.138, 4753.051, 4889.047, 5038.293, \n 5218.515, 5444.164, 5926.305, 6461.792, 7343.626, 7986.202, 8435.127, \n 8973.929, 10123.93, 10823.63, 11628.01, 12514.52, 13482.75, 14713.85, \n 15701.81, 17007.49]'], {}), '([825.8584, 1100.361, 1401.721, 1739.625, 2087.403, 2422.298, \n 2720.257, 2982.016, 3212.336, 3359.629, 3499.298, 3598.306, 3688.117, \n 3766.628, 3808.92, 3842.264, 3902.565, 3927.298, 3944.01, 3987.966, \n 3998.507, 4029.045, 4044.939, 4077.349, 4068.979, 4072.986, 4078.837, \n 4107.241, 4130.96, 4143.088, 4164.664, 4183.986, 4206.823, 4225.685, \n 4230.309, 4228.707, 4242.562, 4250.716, 4219.722, 4208.409, 4203.486, \n 4213.595, 4241.487, 4258.891, 4295.819, 4297.472, 4313.771, 4361.165, \n 4408.525, 4430.184, 4495.299, 4571.314, 4632.138, 4753.051, 4889.047, \n 5038.293, 5218.515, 5444.164, 5926.305, 6461.792, 7343.626, 7986.202, \n 8435.127, 8973.929, 10123.93, 10823.63, 11628.01, 12514.52, 13482.75, \n 14713.85, 15701.81, 17007.49])\n', (5698, 6467), True, 'import numpy as np\n'), ((6724, 7569), 'numpy.array', 'np.array', (['[-1367.239, -1502.195, -1621.813, -1672.93, -1668.395, -1620.144, -1506.859,\n -1407.856, -1266.296, -1091.802, -947.1432, -813.0331, -704.9116, -\n 606.206, -516.6904, -439.2898, -382.0586, -327.8677, -279.8773, -\n 247.0336, -214.7129, -187.785, -163.6504, -146.0875, -128.724, -\n 114.1715, -105.0351, -95.6213, -88.8763, -82.36904, -77.56557, -\n 73.49171, -70.95162, -68.13791, -69.87909, -68.70265, -73.79854, -\n 78.35508, -81.94554, -94.86475, -104.9407, -122.1189, -142.8908, -\n 164.5738, -195.5508, -228.205, -275.2808, -321.6813, -387.5287, -\n 470.5144, -558.4168, -655.9384, -773.8778, -913.8754, -1069.582, -\n 1243.894, -1440.5, -1644.846, -1891.697, -2170.397, -2427.713, -\n 2737.648, -3059.258, -3423.424, -3800.406, -4165.968, -4477.789, -\n 4931.03, -5301.367, -5703.416, -6161.72, -6635.557]'], {}), '([-1367.239, -1502.195, -1621.813, -1672.93, -1668.395, -1620.144, \n -1506.859, -1407.856, -1266.296, -1091.802, -947.1432, -813.0331, -\n 704.9116, -606.206, -516.6904, -439.2898, -382.0586, -327.8677, -\n 279.8773, -247.0336, -214.7129, -187.785, -163.6504, -146.0875, -\n 128.724, -114.1715, -105.0351, -95.6213, -88.8763, -82.36904, -77.56557,\n -73.49171, -70.95162, -68.13791, -69.87909, -68.70265, -73.79854, -\n 78.35508, -81.94554, -94.86475, -104.9407, -122.1189, -142.8908, -\n 164.5738, -195.5508, -228.205, -275.2808, -321.6813, -387.5287, -\n 470.5144, -558.4168, -655.9384, -773.8778, -913.8754, -1069.582, -\n 1243.894, -1440.5, -1644.846, -1891.697, -2170.397, -2427.713, -\n 2737.648, -3059.258, -3423.424, -3800.406, -4165.968, -4477.789, -\n 4931.03, -5301.367, -5703.416, -6161.72, -6635.557])\n', (6732, 7569), True, 'import numpy as np\n'), ((4273, 4287), 'numpy.array', 'np.array', (['real'], {}), '(real)\n', (4281, 4287), True, 'import numpy as np\n'), ((7879, 7913), 'impedance.preprocessing.readFile', 'readFile', (['"""./data/exampleData.csv"""'], {}), "('./data/exampleData.csv')\n", (7887, 7913), False, 'from impedance.preprocessing import readFile, readGamry\n'), ((8014, 8059), 'impedance.preprocessing.readGamry', 'readGamry', (['"""./data/Chalco-in-buffer-50mV.DTA"""'], {}), "('./data/Chalco-in-buffer-50mV.DTA')\n", (8023, 8059), False, 'from impedance.preprocessing import readFile, readGamry\n'), ((4293, 4307), 'numpy.array', 'np.array', (['imag'], {}), '(imag)\n', (4301, 4307), True, 'import numpy as np\n')] |
import numpy as np
from astropy import units as u
from sklearn.ensemble import RandomForestClassifier
from .regressor_classifier_base import RegressorClassifierBase
def proba_drifting(x):
"""
gives more weight to outliers -- i.e. close to 0 and 1
the polynomial was constructed with the following constraints:
• f(0) = 0
• f(0.5) = 0.5
• f(1) = 1
• f'(0) = 0
• f'(0.5) = 1
• f'(1) = 0
"""
return 10 * x**3 - 15 * x**4 + 6 * x**5
class EventClassifier(RegressorClassifierBase):
def __init__(self, classifier=RandomForestClassifier,
cam_id_list=("cam"), **kwargs):
super().__init__(model=classifier, cam_id_list=cam_id_list, **kwargs)
def predict_proba_by_event(self, X):
predict_proba = []
for evt in X:
tel_probas = None
tel_weights = []
for cam_id, tels in evt.items():
these_probas = self.model_dict[cam_id].predict_proba(tels)
tel_probas = np.append(these_probas, tel_probas, axis=0) \
if tel_probas is not None else these_probas
try:
# if a `namedtuple` is provided, we can weight the different images
# using some of the provided features
tel_weights += [t.sum_signal_cam / t.impact_dist for t in tels]
except AttributeError:
# otherwise give every image the same weight
tel_weights += [1] * len(tels)
predict_proba.append(np.average(proba_drifting(tel_probas),
weights=tel_weights, axis=0))
return np.array(predict_proba)
def predict_by_event(self, X):
proba = self.predict_proba_by_event(X)
predictions = self.classes_[np.argmax(proba, axis=1)]
return predictions
| [
"numpy.append",
"numpy.array",
"numpy.argmax"
] | [((1691, 1714), 'numpy.array', 'np.array', (['predict_proba'], {}), '(predict_proba)\n', (1699, 1714), True, 'import numpy as np\n'), ((1834, 1858), 'numpy.argmax', 'np.argmax', (['proba'], {'axis': '(1)'}), '(proba, axis=1)\n', (1843, 1858), True, 'import numpy as np\n'), ((1012, 1055), 'numpy.append', 'np.append', (['these_probas', 'tel_probas'], {'axis': '(0)'}), '(these_probas, tel_probas, axis=0)\n', (1021, 1055), True, 'import numpy as np\n')] |
import sys
from PyQt5.QtWidgets import (
QMainWindow, QApplication, QAction, qApp, QFormLayout, QVBoxLayout,
QHBoxLayout, QTabWidget, QWidget, QSizePolicy
)
from PyQt5 import uic
import matplotlib.pyplot as plt
from .cellular_automaton import CellularAutomaton
import numpy as np
import threading
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
uic.loadUi('./epydemic/forms/ePydemic.ui', self)
self.button_simulate.clicked.connect(self.worker)
self.show()
def worker(self):
thread = threading.Thread(target=self.start_simulation, daemon=True)
self.button_simulate.setEnabled(False)
thread.start()
def start_simulation(self):
current_iteration = 0
n_iterations = int(self.form_n_iterations.text())
height = int(self.form_height.text())
width = int(self.form_width.text())
p_cure = float(self.form_p_cure.text())
p_death_disease = float(self.form_p_death_disease.text())
p_death_other = float(self.form_p_death_other.text())
beta = float(self.form_beta.text())
d_max = 200
ca = CellularAutomaton(
height=height,
width=width,
p_cure=p_cure,
p_death_disease=p_death_disease,
p_death_other=p_death_other,
beta=beta,
d_max=200
)
data_i = np.empty(n_iterations+1)
data_s = np.empty(n_iterations+1)
data_r = np.empty(n_iterations+1)
while(current_iteration <= n_iterations):
self.statusBar().showMessage(
f"STATUS: RUNNING | t={current_iteration}")
i_count, s_count, r_count = ca.stats()
data_i[current_iteration] = i_count
data_s[current_iteration] = s_count
data_r[current_iteration] = r_count
ca.step()
current_iteration += 1
self.statusBar().showMessage(
f"STATUS: RUN COMPLETE | t={current_iteration-1}")
self.plot_stats(data_i, data_s, data_r, n_iterations, width * height)
self.button_simulate.setEnabled(True)
def plot_stats(self, data_i, data_s, data_r, n_iterations, n_individuals):
t = range(0, len(data_i))
plt.ticklabel_format(style='sci', scilimits=(0, 3), useMathText=True)
plt.title('Number of individuals')
plt.plot(t, data_i, 'k--', label='Infected')
plt.plot(t, data_r, 'k:', label='Recovered')
plt.plot(t, data_s, 'k', label='Susceptible')
plt.legend(loc='upper right')
plt.ylabel('Individuals')
plt.xlabel('Time')
plt.xlim(0, n_iterations)
plt.ylim(0, n_individuals)
plt.show()
def main():
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"PyQt5.uic.loadUi",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.empty",
"matplotlib.pyplot.ticklabel_format",
"PyQt5.QtWidgets.QApplication",
"threading.Thread",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplot... | [((2786, 2808), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2798, 2808), False, 'from PyQt5.QtWidgets import QMainWindow, QApplication, QAction, qApp, QFormLayout, QVBoxLayout, QHBoxLayout, QTabWidget, QWidget, QSizePolicy\n'), ((414, 462), 'PyQt5.uic.loadUi', 'uic.loadUi', (['"""./epydemic/forms/ePydemic.ui"""', 'self'], {}), "('./epydemic/forms/ePydemic.ui', self)\n", (424, 462), False, 'from PyQt5 import uic\n'), ((583, 642), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.start_simulation', 'daemon': '(True)'}), '(target=self.start_simulation, daemon=True)\n', (599, 642), False, 'import threading\n'), ((1436, 1462), 'numpy.empty', 'np.empty', (['(n_iterations + 1)'], {}), '(n_iterations + 1)\n', (1444, 1462), True, 'import numpy as np\n'), ((1478, 1504), 'numpy.empty', 'np.empty', (['(n_iterations + 1)'], {}), '(n_iterations + 1)\n', (1486, 1504), True, 'import numpy as np\n'), ((1520, 1546), 'numpy.empty', 'np.empty', (['(n_iterations + 1)'], {}), '(n_iterations + 1)\n', (1528, 1546), True, 'import numpy as np\n'), ((2301, 2370), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'scilimits': '(0, 3)', 'useMathText': '(True)'}), "(style='sci', scilimits=(0, 3), useMathText=True)\n", (2321, 2370), True, 'import matplotlib.pyplot as plt\n'), ((2379, 2413), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of individuals"""'], {}), "('Number of individuals')\n", (2388, 2413), True, 'import matplotlib.pyplot as plt\n'), ((2422, 2466), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'data_i', '"""k--"""'], {'label': '"""Infected"""'}), "(t, data_i, 'k--', label='Infected')\n", (2430, 2466), True, 'import matplotlib.pyplot as plt\n'), ((2475, 2519), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'data_r', '"""k:"""'], {'label': '"""Recovered"""'}), "(t, data_r, 'k:', label='Recovered')\n", (2483, 2519), True, 'import matplotlib.pyplot as plt\n'), ((2528, 2573), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'data_s', '"""k"""'], {'label': '"""Susceptible"""'}), "(t, data_s, 'k', label='Susceptible')\n", (2536, 2573), True, 'import matplotlib.pyplot as plt\n'), ((2582, 2611), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2592, 2611), True, 'import matplotlib.pyplot as plt\n'), ((2620, 2645), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Individuals"""'], {}), "('Individuals')\n", (2630, 2645), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2672), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2664, 2672), True, 'import matplotlib.pyplot as plt\n'), ((2681, 2706), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'n_iterations'], {}), '(0, n_iterations)\n', (2689, 2706), True, 'import matplotlib.pyplot as plt\n'), ((2715, 2741), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', 'n_individuals'], {}), '(0, n_individuals)\n', (2723, 2741), True, 'import matplotlib.pyplot as plt\n'), ((2751, 2761), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2759, 2761), True, 'import matplotlib.pyplot as plt\n')] |
"""
Tests related to miepy.interface
"""
import numpy as np
import miepy
import pytest
nm = 1e-9
wavelength = 600*nm
k = 2*np.pi/wavelength
radius = 75*nm
medium = miepy.materials.water()
material = miepy.materials.Ag()
width = 200*nm
polarization = [1,0]
zpos = 400*nm
@pytest.mark.parametrize("s1,s2,rtol", [
(miepy.sources.gaussian_beam(width=width, polarization=polarization, center=[0,0,-zpos]),
miepy.sources.gaussian_beam(width=width, polarization=polarization), 0),
(miepy.sources.plane_wave(polarization=polarization),
miepy.sources.plane_wave(polarization=polarization), 1e-4)
])
def test_interface_z_translation(s1, s2, rtol):
"""
Moving the source and particle is identical to moving the interface (cross-section comparison)
"""
interface = miepy.interface(miepy.constant_material(index=1.7))
cluster = miepy.sphere_cluster(position=[0,0,-zpos],
radius=radius,
material=material,
medium=medium,
lmax=2,
source=s1,
interface=interface,
wavelength=wavelength)
C1 = np.array(cluster.cross_sections())
interface = miepy.interface(miepy.constant_material(index=1.7), z=zpos)
cluster = miepy.sphere_cluster(position=[0,0,0],
radius=radius,
material=material,
medium=medium,
lmax=2,
source=s2,
interface=interface,
wavelength=wavelength)
C2 = np.array(cluster.cross_sections())
assert np.allclose(C1, C2, atol=0, rtol=rtol)
@pytest.mark.parametrize("source,rtol", [
(miepy.sources.gaussian_beam(width=width, polarization=polarization), 1e-15),
(miepy.sources.plane_wave(polarization=polarization), 0)
])
def test_index_matched_interface(source, rtol):
"""
An interface that is index-matched with the medium is identical to not having an interface (cross-section comparison)
"""
interface = miepy.interface(medium, z=zpos)
cluster = miepy.sphere_cluster(position=[0,0,0],
radius=radius,
material=material,
medium=medium,
lmax=2,
source=source,
interface=interface,
wavelength=wavelength)
C1 = np.array(cluster.cross_sections())
cluster = miepy.sphere_cluster(position=[0,0,0],
radius=radius,
material=material,
medium=medium,
lmax=2,
source=source,
wavelength=wavelength)
C2 = np.array(cluster.cross_sections())
assert np.allclose(C1, C2, atol=0, rtol=1e-15)
| [
"miepy.sources.plane_wave",
"numpy.allclose",
"miepy.materials.water",
"miepy.sources.gaussian_beam",
"miepy.materials.Ag",
"miepy.constant_material",
"miepy.sphere_cluster",
"miepy.interface"
] | [((166, 189), 'miepy.materials.water', 'miepy.materials.water', ([], {}), '()\n', (187, 189), False, 'import miepy\n'), ((201, 221), 'miepy.materials.Ag', 'miepy.materials.Ag', ([], {}), '()\n', (219, 221), False, 'import miepy\n'), ((868, 1033), 'miepy.sphere_cluster', 'miepy.sphere_cluster', ([], {'position': '[0, 0, -zpos]', 'radius': 'radius', 'material': 'material', 'medium': 'medium', 'lmax': '(2)', 'source': 's1', 'interface': 'interface', 'wavelength': 'wavelength'}), '(position=[0, 0, -zpos], radius=radius, material=\n material, medium=medium, lmax=2, source=s1, interface=interface,\n wavelength=wavelength)\n', (888, 1033), False, 'import miepy\n'), ((1404, 1565), 'miepy.sphere_cluster', 'miepy.sphere_cluster', ([], {'position': '[0, 0, 0]', 'radius': 'radius', 'material': 'material', 'medium': 'medium', 'lmax': '(2)', 'source': 's2', 'interface': 'interface', 'wavelength': 'wavelength'}), '(position=[0, 0, 0], radius=radius, material=material,\n medium=medium, lmax=2, source=s2, interface=interface, wavelength=\n wavelength)\n', (1424, 1565), False, 'import miepy\n'), ((1856, 1894), 'numpy.allclose', 'np.allclose', (['C1', 'C2'], {'atol': '(0)', 'rtol': 'rtol'}), '(C1, C2, atol=0, rtol=rtol)\n', (1867, 1894), True, 'import numpy as np\n'), ((2286, 2317), 'miepy.interface', 'miepy.interface', (['medium'], {'z': 'zpos'}), '(medium, z=zpos)\n', (2301, 2317), False, 'import miepy\n'), ((2332, 2497), 'miepy.sphere_cluster', 'miepy.sphere_cluster', ([], {'position': '[0, 0, 0]', 'radius': 'radius', 'material': 'material', 'medium': 'medium', 'lmax': '(2)', 'source': 'source', 'interface': 'interface', 'wavelength': 'wavelength'}), '(position=[0, 0, 0], radius=radius, material=material,\n medium=medium, lmax=2, source=source, interface=interface, wavelength=\n wavelength)\n', (2352, 2497), False, 'import miepy\n'), ((2792, 2931), 'miepy.sphere_cluster', 'miepy.sphere_cluster', ([], {'position': '[0, 0, 0]', 'radius': 'radius', 'material': 'material', 'medium': 'medium', 'lmax': '(2)', 'source': 'source', 'wavelength': 'wavelength'}), '(position=[0, 0, 0], radius=radius, material=material,\n medium=medium, lmax=2, source=source, wavelength=wavelength)\n', (2812, 2931), False, 'import miepy\n'), ((3192, 3231), 'numpy.allclose', 'np.allclose', (['C1', 'C2'], {'atol': '(0)', 'rtol': '(1e-15)'}), '(C1, C2, atol=0, rtol=1e-15)\n', (3203, 3231), True, 'import numpy as np\n'), ((818, 852), 'miepy.constant_material', 'miepy.constant_material', ([], {'index': '(1.7)'}), '(index=1.7)\n', (841, 852), False, 'import miepy\n'), ((1346, 1380), 'miepy.constant_material', 'miepy.constant_material', ([], {'index': '(1.7)'}), '(index=1.7)\n', (1369, 1380), False, 'import miepy\n'), ((321, 415), 'miepy.sources.gaussian_beam', 'miepy.sources.gaussian_beam', ([], {'width': 'width', 'polarization': 'polarization', 'center': '[0, 0, -zpos]'}), '(width=width, polarization=polarization, center=\n [0, 0, -zpos])\n', (348, 415), False, 'import miepy\n'), ((420, 487), 'miepy.sources.gaussian_beam', 'miepy.sources.gaussian_beam', ([], {'width': 'width', 'polarization': 'polarization'}), '(width=width, polarization=polarization)\n', (447, 487), False, 'import miepy\n'), ((498, 549), 'miepy.sources.plane_wave', 'miepy.sources.plane_wave', ([], {'polarization': 'polarization'}), '(polarization=polarization)\n', (522, 549), False, 'import miepy\n'), ((561, 612), 'miepy.sources.plane_wave', 'miepy.sources.plane_wave', ([], {'polarization': 'polarization'}), '(polarization=polarization)\n', (585, 612), False, 'import miepy\n'), ((1943, 2010), 'miepy.sources.gaussian_beam', 'miepy.sources.gaussian_beam', ([], {'width': 'width', 'polarization': 'polarization'}), '(width=width, polarization=polarization)\n', (1970, 2010), False, 'import miepy\n'), ((2025, 2076), 'miepy.sources.plane_wave', 'miepy.sources.plane_wave', ([], {'polarization': 'polarization'}), '(polarization=polarization)\n', (2049, 2076), False, 'import miepy\n')] |
import numpy as np
import pandas as pd
USAGE_DESC = ['AN', 'AW', 'atomic radius', 'electronegativity',
'm. p.', 'b. p.', 'delta_fus H', 'density',
'ionization enegy', 'Surface energy']
def read_desc():
desc = pd.read_csv('data/Descriptors_WGS.csv',
skiprows=[0], index_col="symbol")
desc = desc.loc[:, ['AN', 'AW', 'atomic radius', 'electronegativity',
'm. p.', 'b. p.', 'delta_fus H ', 'density',
'ionization enegy ', 'Surface energy ']]
desc.columns = USAGE_DESC
desc = desc.fillna(desc.mean())
return desc
def data_convert():
data = pd.read_excel(
'data/WGS.xlsx', skiprows=8).drop(['Total # of Data', 'Reference', 'Data'], axis=1)
print('# of Original Datapoints:', len(data))
drop_support = ['ZEO', 'HAP', 'ACC', 'YSZ']
idx = (data.loc[:, drop_support] == 0).all(axis=1)
data = data[idx].drop(drop_support, axis=1)
data.index = np.arange(len(data))
print('# of Data after preprocessing:', len(data))
desc = read_desc()
support = pd.read_excel('data/support.xlsx')
element = list(desc.index)
data = pd.concat([pd.DataFrame(columns=element), data]).fillna(0.0)
support_wt = np.array(100 - data.loc[:, element].sum(axis=1)
).reshape(-1, 1)*np.array(data.loc[:, support.support])
support_wt = support_wt / np.array(support.ave_MW).T
data.loc[:, element] = data.loc[:, element] / desc.AW
data.loc[:, support.key] += support_wt
data.loc[:, element] = data.loc[:, element] / \
np.array(data.loc[:, element].sum(axis=1)).reshape(-1, 1) * 100
data = data.drop(support.support, axis=1)
swed_names = []
for i in range(4):
for s in list(desc.columns):
swed_names.append(f"{s} ({i + 1})")
swed = pd.DataFrame(comp_times_base(
data.loc[:, element], desc.T, sort=True)).iloc[:, :40]
swed.columns = swed_names
data = pd.concat([data, swed], axis=1)
data.to_csv('data/wgs.csv', index=None)
return data, desc
def data_loader(convert=False, desc_names=USAGE_DESC, temp=None):
for s in desc_names:
if s not in USAGE_DESC:
print(f'{s} is not avaiable!!')
print('Please use only in ', USAGE_DESC)
return None
if convert:
data, desc = data_convert()
else:
data = pd.read_csv('data/wgs.csv')
desc = read_desc()
if temp is not None:
idx = data.loc[:, 'Reaction Temperture (℃)'] <= temp
data = data[idx]
data.index = np.arange(len(data))
cols = get_columns(data, desc_names)
return data, desc, cols
def comp_times_base(comp, base, sort=False, times=True, attention=False):
count = 0
for key, rows in comp.iterrows():
stack = np.vstack((rows, base))
if times == True:
time = np.array(base) * np.array(rows)
stack = np.vstack((rows, time))
if sort == True:
stack = pd.DataFrame(stack).sort_values(
[0], ascending=False, axis=1)
stack = pd.DataFrame(stack).iloc[1:, :]
stack = np.array(stack)
if count == 0:
if attention:
res = np.sum(stack, axis=1)
else:
res = np.array(stack.T.flatten())
count += 1
else:
if attention:
res = np.vstack((res, np.sum(stack, axis=1)))
else:
res = np.vstack((res, np.array(stack.T.flatten())))
count += 1
return res
def get_columns(data, use_cols):
element = list(data.loc[:, 'Li':'Th'].columns)
preparation = list(data.loc[:, 'IWI': 'DP'].columns)
condition = list(
data.loc[:, 'Calcination Temperture (℃)':'F/W (mg.min/ml)'].columns)
swed_names = []
for i in range(4):
for s in list(use_cols):
swed_names.append(f"{s} ({i + 1})")
cols = {}
cols['element'] = element
cols['preparation'] = preparation
cols['condition'] = condition
cols['use_cols'] = use_cols
cols['swed'] = swed_names
cols['conv'] = element + preparation + condition
cols['prop1'] = element + preparation + condition + swed_names
cols['prop2'] = preparation + condition + swed_names
cols['target'] = 'CO Conversion'
return cols
| [
"pandas.read_csv",
"numpy.array",
"numpy.sum",
"numpy.vstack",
"pandas.read_excel",
"pandas.DataFrame",
"pandas.concat"
] | [((245, 318), 'pandas.read_csv', 'pd.read_csv', (['"""data/Descriptors_WGS.csv"""'], {'skiprows': '[0]', 'index_col': '"""symbol"""'}), "('data/Descriptors_WGS.csv', skiprows=[0], index_col='symbol')\n", (256, 318), True, 'import pandas as pd\n'), ((1106, 1140), 'pandas.read_excel', 'pd.read_excel', (['"""data/support.xlsx"""'], {}), "('data/support.xlsx')\n", (1119, 1140), True, 'import pandas as pd\n'), ((1996, 2027), 'pandas.concat', 'pd.concat', (['[data, swed]'], {'axis': '(1)'}), '([data, swed], axis=1)\n', (2005, 2027), True, 'import pandas as pd\n'), ((1353, 1391), 'numpy.array', 'np.array', (['data.loc[:, support.support]'], {}), '(data.loc[:, support.support])\n', (1361, 1391), True, 'import numpy as np\n'), ((2418, 2445), 'pandas.read_csv', 'pd.read_csv', (['"""data/wgs.csv"""'], {}), "('data/wgs.csv')\n", (2429, 2445), True, 'import pandas as pd\n'), ((2842, 2865), 'numpy.vstack', 'np.vstack', (['(rows, base)'], {}), '((rows, base))\n', (2851, 2865), True, 'import numpy as np\n'), ((3177, 3192), 'numpy.array', 'np.array', (['stack'], {}), '(stack)\n', (3185, 3192), True, 'import numpy as np\n'), ((666, 708), 'pandas.read_excel', 'pd.read_excel', (['"""data/WGS.xlsx"""'], {'skiprows': '(8)'}), "('data/WGS.xlsx', skiprows=8)\n", (679, 708), True, 'import pandas as pd\n'), ((1422, 1446), 'numpy.array', 'np.array', (['support.ave_MW'], {}), '(support.ave_MW)\n', (1430, 1446), True, 'import numpy as np\n'), ((2963, 2986), 'numpy.vstack', 'np.vstack', (['(rows, time)'], {}), '((rows, time))\n', (2972, 2986), True, 'import numpy as np\n'), ((2911, 2925), 'numpy.array', 'np.array', (['base'], {}), '(base)\n', (2919, 2925), True, 'import numpy as np\n'), ((2928, 2942), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (2936, 2942), True, 'import numpy as np\n'), ((3129, 3148), 'pandas.DataFrame', 'pd.DataFrame', (['stack'], {}), '(stack)\n', (3141, 3148), True, 'import pandas as pd\n'), ((3265, 3286), 'numpy.sum', 'np.sum', (['stack'], {'axis': '(1)'}), '(stack, axis=1)\n', (3271, 3286), True, 'import numpy as np\n'), ((1194, 1223), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'element'}), '(columns=element)\n', (1206, 1223), True, 'import pandas as pd\n'), ((3033, 3052), 'pandas.DataFrame', 'pd.DataFrame', (['stack'], {}), '(stack)\n', (3045, 3052), True, 'import pandas as pd\n'), ((3457, 3478), 'numpy.sum', 'np.sum', (['stack'], {'axis': '(1)'}), '(stack, axis=1)\n', (3463, 3478), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import random
import torch
import matplotlib.pyplot as plt
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizer, BertForSequenceClassification, AdamW
from transformers import get_linear_schedule_with_warmup
import warnings
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import time
import datetime
SEED = 2021
BATCH_SIZE = 60 #20
LEARNING_RATE = 1e-5 #-5
WEIGHT_DECAY = 5e-3 #-3
EPSILON = 1e-6 #-8
save_path="saved_models/"
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
train_data = pd.read_csv('./train_data_public.csv')
test_data = pd.read_csv('./test_public.csv')
X_train = list(train_data['text'])
y_train = list(train_data['class'])
test_data_sent = list(test_data['text']) # 列表格式
text_all = X_train + test_data_sent
total_targets = torch.tensor(y_train)
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese', cache_dir="./transformer_file/")
def convert_text_to_token(tokenizer, sentence, limit_size=126):# 将每一句转成数字(大于126做截断,小于126做PADDING,加上首尾两个标识,长度总共等于128)
tokens = tokenizer.encode(sentence[:limit_size]) # 直接截断
# 补齐(pad的索引号就是0)
if len(tokens) < limit_size + 2:
tokens.extend([0] * (limit_size + 2 - len(tokens)))
return tokens
input_ids = [convert_text_to_token(tokenizer, x) for x in X_train]
input_tokens = torch.tensor(input_ids)
# 建立mask
def attention_masks(input_ids):
atten_masks = []
for seq in input_ids:
# 如果有编码(>0)即为1, pad为0
seq_mask = [float(x > 0) for x in seq]
atten_masks.append(seq_mask)
return atten_masks
# 生成attention_masks
atten_masks = attention_masks(input_ids)
# 将atten_masks放到tensor中
attention_tokens = torch.tensor(atten_masks)
# 使用random_state固定切分方式,切分 train_inputs, train_labels, train_masks,
input_tokens=np.array(input_tokens)
total_targets=np.array(total_targets)
train_inputs, test_inputs, train_labels, test_labels = train_test_split(input_tokens, total_targets, random_state=2021,
test_size=0.2)
attention_tokens=np.array(attention_tokens)
train_masks, test_masks, _, _ = train_test_split(attention_tokens, input_tokens, random_state=2021, test_size=0.2)
# 使用TensorDataset对tensor进行打包
train_inputs = torch.autograd.Variable(torch.from_numpy(train_inputs))
train_masks = torch.autograd.Variable(torch.from_numpy(train_masks))
train_labels = torch.autograd.Variable(torch.from_numpy(train_labels))
train_data = TensorDataset(train_inputs, train_masks, train_labels)
# 无放回地随机采样样本元素
train_sampler = RandomSampler(train_data)
# 对于训练集,random sampler, shuffle
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=BATCH_SIZE)
test_inputs = torch.autograd.Variable(torch.from_numpy(test_inputs))
test_masks = torch.autograd.Variable(torch.from_numpy(test_masks))
test_labels = torch.autograd.Variable(torch.from_numpy(test_labels))
test_data = TensorDataset(test_inputs, test_masks, test_labels)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=BATCH_SIZE)
# 查看dataloader内容
for i, (train, mask, label) in enumerate(train_dataloader):
# torch.Size([16, 128]) torch.Size([16, 128]) torch.Size([16, 1])
print(train)
print(mask)
print(label)
print(train.shape, mask.shape, label.shape)
break
model = BertForSequenceClassification.from_pretrained("bert-base-chinese", num_labels=3)# 加载预训练模型, num_labels表示3个分类,好评和差评, 会下载模型 大约412M
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model需要放到GPU中
model.to(device)
tokenizer.save_pretrained(save_path)
model.save_pretrained(save_path)
print("save complete")
# 定义优化器 AdamW, eps默认就为1e-8(增加分母的数值,用来提高数值稳定性)
# optimizer = AdamW(model.parameters(), lr = LEARNING_RATE, eps = EPSILON)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': WEIGHT_DECAY},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
# AdamW优化器 性能强于Adam
optimizer = AdamW(optimizer_grouped_parameters, lr=LEARNING_RATE, eps=EPSILON)
epochs = 3
# training steps 的数量: [number of batches] x [number of epochs].
total_steps = len(train_dataloader) * epochs
# 设计 learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
# 分类结果评估
def class_acc(preds, labels): # preds.shape=(16, 2) labels.shape=torch.Size([16, 1])
# eq里面的两个参数的shape=torch.Size([16])
correct = torch.eq(torch.max(preds, dim=1)[1], labels.flatten()).float()
if 0:
print('binary acc ********')
print('preds = ', preds)
print('labels = ', labels)
print('correct = ', correct)
acc = correct.sum().item() / len(correct)
return acc
def format_time(elapsed):
elapsed_rounded = int(round((elapsed)))
return str(datetime.timedelta(seconds=elapsed_rounded)) # 返回 hh:mm:ss 形式的时间
def train(model, optimizer):
# 记录当前时刻
t0 = time.time()
# 统计m每个batch的loss 和 acc
avg_loss, avg_acc = [], []
# 开启训练模式
for step, batch in enumerate(train_dataloader):
# 每隔40个batch 输出一下所用时间.
if step % 40 == 0 and not step == 0:
elapsed = format_time(time.time() - t0)
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
# 从batch中取数据,并放到GPU中
b_input_ids, b_input_mask, b_labels = batch[0].long().to(device), batch[1].long().to(device), batch[
2].long().to(device)
# 前向传播,得到output
output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
# 得到loss和预测结果logits = [0.95, 0.5]
loss, logits = output[0], output[1]
# 记录每次的loss和acc
avg_loss.append(loss.item())
# 评估acc
acc = class_acc(logits, b_labels)
avg_acc.append(acc)
# 清空上一轮梯度
optimizer.zero_grad()
# 反向传播
loss.backward()
# 大于1的梯度将其设为1.0, 以防梯度爆炸
clip_grad_norm_(model.parameters(), 1.0)
# 更新模型参数
optimizer.step()
# 更新learning rate
scheduler.step()
# 统计平均loss和acc
avg_loss = np.array(avg_loss).mean()
avg_acc = np.array(avg_acc).mean()
tokenizer.save_pretrained(save_path)
model.save_pretrained(save_path)
print("save complete")
return avg_loss, avg_acc
# 模型评估
def evaluate(model):
avg_acc = []
# 表示进入测试模式
model.eval()
with torch.no_grad():
for batch in test_dataloader:
# 从batch中取数据,并放到GPU中
b_input_ids, b_input_mask, b_labels = batch[0].long().to(device), batch[1].long().to(device), batch[
2].long().to(device)
# 前向传播,得到output
output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
# 统计当前batch的acc
acc = class_acc(output[0], b_labels)
avg_acc.append(acc)
# 统计平均acc
avg_acc = np.array(avg_acc).mean()
return avg_acc
for epoch in range(epochs):
# 模型训练
train_loss, train_acc = train(model, optimizer)
print('epoch={},训练准确率={},损失={}'.format(epoch, train_acc, train_loss))
# 模型评估
test_acc = evaluate(model)
print("epoch={},测试准确率={}".format(epoch, test_acc))
| [
"pandas.read_csv",
"torch.max",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"datetime.timedelta",
"transformers.AdamW",
"numpy.random.seed",
"sklearn.model_selection.train_test_split",
"torch.utils.data.SequentialSampler",
"torch.utils.data.TensorDataset",
"transformers.BertF... | [((648, 665), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (659, 665), False, 'import random\n'), ((666, 686), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (680, 686), True, 'import numpy as np\n'), ((687, 710), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (704, 710), False, 'import torch\n'), ((724, 762), 'pandas.read_csv', 'pd.read_csv', (['"""./train_data_public.csv"""'], {}), "('./train_data_public.csv')\n", (735, 762), True, 'import pandas as pd\n'), ((775, 807), 'pandas.read_csv', 'pd.read_csv', (['"""./test_public.csv"""'], {}), "('./test_public.csv')\n", (786, 807), True, 'import pandas as pd\n'), ((980, 1001), 'torch.tensor', 'torch.tensor', (['y_train'], {}), '(y_train)\n', (992, 1001), False, 'import torch\n'), ((1014, 1102), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-chinese"""'], {'cache_dir': '"""./transformer_file/"""'}), "('bert-base-chinese', cache_dir=\n './transformer_file/')\n", (1043, 1102), False, 'from transformers import BertTokenizer, BertForSequenceClassification, AdamW\n'), ((1496, 1519), 'torch.tensor', 'torch.tensor', (['input_ids'], {}), '(input_ids)\n', (1508, 1519), False, 'import torch\n'), ((1851, 1876), 'torch.tensor', 'torch.tensor', (['atten_masks'], {}), '(atten_masks)\n', (1863, 1876), False, 'import torch\n'), ((1957, 1979), 'numpy.array', 'np.array', (['input_tokens'], {}), '(input_tokens)\n', (1965, 1979), True, 'import numpy as np\n'), ((1994, 2017), 'numpy.array', 'np.array', (['total_targets'], {}), '(total_targets)\n', (2002, 2017), True, 'import numpy as np\n'), ((2073, 2152), 'sklearn.model_selection.train_test_split', 'train_test_split', (['input_tokens', 'total_targets'], {'random_state': '(2021)', 'test_size': '(0.2)'}), '(input_tokens, total_targets, random_state=2021, test_size=0.2)\n', (2089, 2152), False, 'from sklearn.model_selection import train_test_split\n'), ((2242, 2268), 'numpy.array', 'np.array', (['attention_tokens'], {}), '(attention_tokens)\n', (2250, 2268), True, 'import numpy as np\n'), ((2301, 2387), 'sklearn.model_selection.train_test_split', 'train_test_split', (['attention_tokens', 'input_tokens'], {'random_state': '(2021)', 'test_size': '(0.2)'}), '(attention_tokens, input_tokens, random_state=2021,\n test_size=0.2)\n', (2317, 2387), False, 'from sklearn.model_selection import train_test_split\n'), ((2637, 2691), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train_inputs', 'train_masks', 'train_labels'], {}), '(train_inputs, train_masks, train_labels)\n', (2650, 2691), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((2723, 2748), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (2736, 2748), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((2800, 2868), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'BATCH_SIZE'}), '(train_data, sampler=train_sampler, batch_size=BATCH_SIZE)\n', (2810, 2868), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3088, 3139), 'torch.utils.data.TensorDataset', 'TensorDataset', (['test_inputs', 'test_masks', 'test_labels'], {}), '(test_inputs, test_masks, test_labels)\n', (3101, 3139), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3155, 3183), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['test_data'], {}), '(test_data)\n', (3172, 3183), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3202, 3268), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'sampler': 'test_sampler', 'batch_size': 'BATCH_SIZE'}), '(test_data, sampler=test_sampler, batch_size=BATCH_SIZE)\n', (3212, 3268), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3534, 3619), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['"""bert-base-chinese"""'], {'num_labels': '(3)'}), "('bert-base-chinese', num_labels=3\n )\n", (3579, 3619), False, 'from transformers import BertTokenizer, BertForSequenceClassification, AdamW\n'), ((4332, 4398), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'LEARNING_RATE', 'eps': 'EPSILON'}), '(optimizer_grouped_parameters, lr=LEARNING_RATE, eps=EPSILON)\n', (4337, 4398), False, 'from transformers import BertTokenizer, BertForSequenceClassification, AdamW\n'), ((4564, 4662), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0)', 'num_training_steps': 'total_steps'}), '(optimizer, num_warmup_steps=0,\n num_training_steps=total_steps)\n', (4595, 4662), False, 'from transformers import get_linear_schedule_with_warmup\n'), ((2452, 2482), 'torch.from_numpy', 'torch.from_numpy', (['train_inputs'], {}), '(train_inputs)\n', (2468, 2482), False, 'import torch\n'), ((2522, 2551), 'torch.from_numpy', 'torch.from_numpy', (['train_masks'], {}), '(train_masks)\n', (2538, 2551), False, 'import torch\n'), ((2592, 2622), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (2608, 2622), False, 'import torch\n'), ((2909, 2938), 'torch.from_numpy', 'torch.from_numpy', (['test_inputs'], {}), '(test_inputs)\n', (2925, 2938), False, 'import torch\n'), ((2977, 3005), 'torch.from_numpy', 'torch.from_numpy', (['test_masks'], {}), '(test_masks)\n', (2993, 3005), False, 'import torch\n'), ((3045, 3074), 'torch.from_numpy', 'torch.from_numpy', (['test_labels'], {}), '(test_labels)\n', (3061, 3074), False, 'import torch\n'), ((5289, 5300), 'time.time', 'time.time', ([], {}), '()\n', (5298, 5300), False, 'import time\n'), ((3694, 3719), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3717, 3719), False, 'import torch\n'), ((5170, 5213), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'elapsed_rounded'}), '(seconds=elapsed_rounded)\n', (5188, 5213), False, 'import datetime\n'), ((6779, 6794), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6792, 6794), False, 'import torch\n'), ((6492, 6510), 'numpy.array', 'np.array', (['avg_loss'], {}), '(avg_loss)\n', (6500, 6510), True, 'import numpy as np\n'), ((6532, 6549), 'numpy.array', 'np.array', (['avg_acc'], {}), '(avg_acc)\n', (6540, 6549), True, 'import numpy as np\n'), ((7272, 7289), 'numpy.array', 'np.array', (['avg_acc'], {}), '(avg_acc)\n', (7280, 7289), True, 'import numpy as np\n'), ((4817, 4840), 'torch.max', 'torch.max', (['preds'], {'dim': '(1)'}), '(preds, dim=1)\n', (4826, 4840), False, 'import torch\n'), ((5536, 5547), 'time.time', 'time.time', ([], {}), '()\n', (5545, 5547), False, 'import time\n')] |
#coding:utf-8
import sys
sys.path.insert(1, "./crnn")
import torch
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import util
import dataset
import models.crnn as crnn
import keys
from math import *
import cv2
GPU = False
def dumpRotateImage_(img,degree,pt1,pt2,pt3,pt4):
height,width=img.shape[:2]
heightNew = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree))))
widthNew = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
matRotation=cv2.getRotationMatrix2D((width/2,height/2),degree,1)
matRotation[0, 2] += (widthNew - width) / 2
matRotation[1, 2] += (heightNew - height) / 2
imgRotation = cv2.warpAffine(img, matRotation, (widthNew, heightNew), borderValue=(255, 255, 255))
pt1 = list(pt1)
pt3 = list(pt3)
[[pt1[0]], [pt1[1]]] = np.dot(matRotation, np.array([[pt1[0]], [pt1[1]], [1]]))
[[pt3[0]], [pt3[1]]] = np.dot(matRotation, np.array([[pt3[0]], [pt3[1]], [1]]))
imgOut=imgRotation[int(pt1[1]):int(pt3[1]),int(pt1[0]):int(pt3[0])]
height,width=imgOut.shape[:2]
return imgOut
def crnnSource():
alphabet = keys.alphabet
converter = util.strLabelConverter(alphabet)
if torch.cuda.is_available() and GPU:
model = crnn.CRNN(32, 1, len(alphabet)+1, 256, 1).cuda()
else:
model = crnn.CRNN(32, 1, len(alphabet)+1, 256, 1).cpu()
path = './crnn/samples/model_acc97.pth'
model.eval()
model.load_state_dict(torch.load(path))
return model,converter
##加载模型
model,converter = crnnSource()
def crnnOcr(image):
"""
crnn模型,ocr识别
@@model,
@@converter,
@@im
@@text_recs:text box
"""
scale = image.size[1]*1.0 / 32
w = image.size[0] / scale
w = int(w)
#print "im size:{},{}".format(image.size,w)
transformer = dataset.resizeNormalize((w, 32))
if torch.cuda.is_available() and GPU:
image = transformer(image).cuda()
else:
image = transformer(image).cpu()
image = image.view(1, *image.size())
image = Variable(image)
model.eval()
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
if len(sim_pred)>0:
if sim_pred[0]==u'-':
sim_pred=sim_pred[1:]
return sim_pred
| [
"sys.path.insert",
"cv2.warpAffine",
"dataset.resizeNormalize",
"torch.load",
"numpy.array",
"torch.cuda.is_available",
"util.strLabelConverter",
"cv2.getRotationMatrix2D",
"torch.autograd.Variable"
] | [((25, 53), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""./crnn"""'], {}), "(1, './crnn')\n", (40, 53), False, 'import sys\n'), ((540, 599), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(width / 2, height / 2)', 'degree', '(1)'], {}), '((width / 2, height / 2), degree, 1)\n', (563, 599), False, 'import cv2\n'), ((709, 798), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'matRotation', '(widthNew, heightNew)'], {'borderValue': '(255, 255, 255)'}), '(img, matRotation, (widthNew, heightNew), borderValue=(255, \n 255, 255))\n', (723, 798), False, 'import cv2\n'), ((1200, 1232), 'util.strLabelConverter', 'util.strLabelConverter', (['alphabet'], {}), '(alphabet)\n', (1222, 1232), False, 'import util\n'), ((1885, 1917), 'dataset.resizeNormalize', 'dataset.resizeNormalize', (['(w, 32)'], {}), '((w, 32))\n', (1908, 1917), False, 'import dataset\n'), ((2137, 2152), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (2145, 2152), False, 'from torch.autograd import Variable\n'), ((891, 926), 'numpy.array', 'np.array', (['[[pt1[0]], [pt1[1]], [1]]'], {}), '([[pt1[0]], [pt1[1]], [1]])\n', (899, 926), True, 'import numpy as np\n'), ((975, 1010), 'numpy.array', 'np.array', (['[[pt3[0]], [pt3[1]], [1]]'], {}), '([[pt3[0]], [pt3[1]], [1]])\n', (983, 1010), True, 'import numpy as np\n'), ((1240, 1265), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1263, 1265), False, 'import torch\n'), ((1500, 1516), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (1510, 1516), False, 'import torch\n'), ((1928, 1953), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1951, 1953), False, 'import torch\n')] |
import argparse
import sys
import time
from multiprocessing import Pool
import numpy as np
import pandas as pd
from terminaltables import *
from dataset import VideoDataSet
from ops.utils import temporal_nms
sys.path.append('./anet_toolkit/Evaluation')
import os
import pdb
import pickle
from anet_toolkit.Evaluation.eval_detection import \
compute_average_precision_detection
from ops.utils import get_configs, softmax
# options
parser = argparse.ArgumentParser(
description="Evaluate detection performance metrics")
parser.add_argument('dataset', type=str, choices=['thumos14', 'muses'])
parser.add_argument('detection_pickles', type=str, nargs='+')
parser.add_argument('--nms_threshold', type=float, default=0.4)
parser.add_argument('--no_regression', default=False, action="store_true")
parser.add_argument('-j', '--ap_workers', type=int, default=16)
parser.add_argument('--top_k', type=int, default=None)
parser.add_argument('--cls_scores', type=str, nargs='+')
parser.add_argument('--reg_scores', type=str, default=None)
parser.add_argument('--cls_top_k', type=int, default=1)
parser.add_argument('--cfg', default='data/dataset_cfg.yml')
parser.add_argument('--score_weights', type=float, default=None, nargs='+')
parser.add_argument('--min_length', type=float, default=None, help='minimum duration of proposals in second')
parser.add_argument('--one_iou', action='store_true')
parser.add_argument('--no_comp', action='store_true')
args = parser.parse_args()
configs = get_configs(args.dataset, args.cfg)
dataset_configs = configs['dataset_configs']
model_configs = configs["model_configs"]
num_class = model_configs['num_class']
nms_threshold = args.nms_threshold if args.nms_threshold else configs['evaluation']['nms_threshold']
top_k = args.top_k if args.top_k else configs['evaluation']['top_k']
print('---'*10)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("initiating evaluation of detection results {}".format(args.detection_pickles))
print('top_k={}'.format(top_k))
sys.stdout.flush()
score_pickle_list = []
for pc in args.detection_pickles:
score_pickle_list.append(pickle.load(open(pc, 'rb')))
if args.score_weights:
weights = np.array(args.score_weights) / sum(args.score_weights)
else:
weights = [1.0/len(score_pickle_list) for _ in score_pickle_list]
def merge_scores(vid):
def merge_part(arrs, index, weights):
if arrs[0][index] is not None:
return np.sum([a[index] * w for a, w in zip(arrs, weights)], axis=0)
else:
return None
arrays = [pc[vid] for pc in score_pickle_list]
act_weights = weights
comp_weights = weights
reg_weights = weights
rel_props = score_pickle_list[0][vid][0]
return rel_props, \
merge_part(arrays, 1, act_weights), \
merge_part(arrays, 2, comp_weights), \
merge_part(arrays, 3, reg_weights)
print('Merge detection scores from {} sources...'.format(len(score_pickle_list)))
detection_scores = {k: merge_scores(k) for k in score_pickle_list[0]}
print('Done.')
if 'deploy_prop_file' in dataset_configs:
prop_file = dataset_configs['deploy_prop_file']
else:
prop_file = dataset_configs['test_prop_file']
if 'deploy_online_slice' in dataset_configs:
online_slice = dataset_configs['deploy_online_slice']
else:
online_slice = dataset_configs.get('online_slice', False)
dataset = VideoDataSet(dataset_configs,
prop_file=prop_file,
ft_path=dataset_configs['train_ft_path'],
test_mode=True)
from functools import reduce
gt_lens = np.array(reduce(lambda x,y: x+y, [[(x.end_frame-x.start_frame)/6 for x in v.gt] for v in dataset.video_list]))
# pdb.set_trace()
dataset_detections = [dict() for i in range(num_class)]
def merge_all_vid_scores(pickle_list):
def merge_op(arrs, index, weights):
if arrs[0][index] is not None:
return np.sum([a[index] * w for a, w in zip(arrs, weights)], axis=0)
else:
return None
out_score_dict = {}
for vid in pickle_list[0]:
arrays = [pc[vid] for pc in pickle_list]
act_weights = weights
comp_weights = weights
reg_weights = weights
rel_props = pickle_list[0][vid][0]
out_score_dict[vid] = [rel_props, \
merge_op(arrays, 1, act_weights), \
merge_op(arrays, 2, comp_weights), \
merge_op(arrays, 3, reg_weights)]
return out_score_dict
if args.cls_scores:
print('Using classifier scores from {}'.format(args.cls_scores))
cls_score_pickle_list = []
for pc in args.cls_scores:
cls_score_pickle_list.append(pickle.load(open(pc, 'rb')))
cls_score_dict = merge_all_vid_scores(cls_score_pickle_list)
# cls_score_pc = pickle.load(open(args.cls_scores, 'rb'), encoding='bytes')
# cls_score_dict = cls_score_pc
# cls_score_dict = {os.path.splitext(os.path.basename(k.decode('utf-8')))[0]:v for k, v in cls_score_pc.items()}
else:
cls_score_dict = None
if args.reg_scores:
print('Using regression scores from {}'.format(args.reg_scores))
reg_score_dict = pickle.load(open(args.reg_scores, 'rb'))
else:
reg_score_dict = None
# generate detection results
def gen_detection_results(video_id, score_tp):
if len(score_tp[0].shape) == 3:
rel_prop = np.squeeze(score_tp[0], 0)
else:
rel_prop = score_tp[0]
# standardize regression scores
reg_scores = score_tp[3]
if reg_scores is None:
reg_scores = np.zeros((len(rel_prop), num_class, 2), dtype=np.float32)
reg_scores = reg_scores.reshape((-1, num_class, 2))
if cls_score_dict is None:
combined_scores = softmax(score_tp[1][:, :])
combined_scores = combined_scores[:,1:]
else:
combined_scores = softmax(cls_score_dict[video_id][1])[:, 1:]
if combined_scores.shape[1] < score_tp[2].shape[1]:
combined_scores = np.concatenate(
(combined_scores, np.zeros([len(combined_scores), score_tp[2].shape[1]-combined_scores.shape[1]])), axis=1)
elif combined_scores.shape[1] > score_tp[2].shape[1]:
combined_scores = combined_scores[:, :score_tp[2].shape[1]]
if not args.no_comp:
combined_scores = combined_scores * np.exp(score_tp[2])
keep_idx = np.argsort(combined_scores.ravel())[-top_k:]
# pdb.set_trace()
delete_short = args.min_length is not None
if delete_short:
print('delete short proposals')
duration = dataset.video_dict[video_id].num_frames / 6
prop_duration = duration * (rel_prop[:,1] - rel_prop[:, 0])
non_short_prop_idx = np.where(prop_duration <= args.min_length)[0]
keep_idx = [x for x in keep_idx if x // num_class in non_short_prop_idx]
# keep_prop_num = len({x//num_class for x in keep_idx})
for k in keep_idx:
cls = k % num_class
prop_idx = k // num_class
if video_id not in dataset_detections[cls]:
dataset_detections[cls][video_id] = np.array([
[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[prop_idx, cls],
reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]
])
else:
dataset_detections[cls][video_id] = np.vstack(
[dataset_detections[cls][video_id],
[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[prop_idx, cls],
reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]])
return len(keep_idx)
print("Preprocessing detections...")
orig_num_list = []
keep_num_list = []
def mean(x):
return sum(x)/len(x)
for k, v in detection_scores.items():
orig_num = len(v[0])
keep_num = gen_detection_results(k, v)
orig_num_list.append(orig_num)
keep_num_list.append(keep_num)
print('Done. {} videos, avg prop num {:.0f} => {:.0f}'.format(len(detection_scores), mean(orig_num_list), mean(keep_num_list)))
# perform NMS
print("Performing nms with thr {} ...".format(nms_threshold))
for cls in range(num_class):
dataset_detections[cls] = {
k: temporal_nms(v, nms_threshold) for k,v in dataset_detections[cls].items()
}
print("NMS Done.")
def perform_regression(detections):
t0 = detections[:, 0]
t1 = detections[:, 1]
center = (t0 + t1) / 2
duration = (t1 - t0)
new_center = center + duration * detections[:, 3]
new_duration = duration * np.exp(detections[:, 4])
new_detections = np.concatenate((
np.clip(new_center - new_duration / 2, 0, 1)[:, None], np.clip(new_center + new_duration / 2, 0, 1)[:, None], detections[:, 2:]
), axis=1)
return new_detections
# perform regression
if not args.no_regression:
print("Performing location regression...")
for cls in range(num_class):
dataset_detections[cls] = {
k: perform_regression(v) for k, v in dataset_detections[cls].items()
}
print("Regression Done.")
else:
print("Skip regresssion as requested by --no_regression")
# ravel test detections
def ravel_detections(detection_db, cls):
detection_list = []
for vid, dets in detection_db[cls].items():
detection_list.extend([[vid, cls] + x[:3] for x in dets.tolist()])
df = pd.DataFrame(detection_list, columns=["video-id", "cls","t-start", "t-end", "score"])
return df
plain_detections = [ravel_detections(dataset_detections, cls) for cls in range(num_class)]
# get gt
gt_list = []
all_gt = dataset.get_all_gt()
all_gt = pd.DataFrame(all_gt, columns=["video-id", "cls","t-start", "t-end"])
gt_by_cls = []
for cls in range(num_class):
gt_by_cls.append(all_gt[all_gt.cls == cls].reset_index(drop=True).drop('cls', 1))
print(cls, len(gt_by_cls[cls]))
# pdb.set_trace()
pickle.dump(gt_by_cls, open('gt_dump.pc', 'wb'), pickle.HIGHEST_PROTOCOL)
pickle.dump(plain_detections, open('pred_dump.pc', 'wb'), pickle.HIGHEST_PROTOCOL)
print("Calling mean AP calculator from toolkit with {} workers...".format(args.ap_workers))
if args.one_iou:
iou_range = [0.5]
else:
if args.dataset == 'thumos14':
iou_range = np.arange(0.1, 1.0, 0.1)
elif args.dataset == 'muses':
iou_range = [0.3, 0.4, 0.5, 0.6, 0.7]
else:
iou_range = np.arange(0.5, 1.0, 0.05)
# raise ValueError("unknown dataset {}".format(args.dataset))
ap_values = np.zeros((num_class, len(iou_range)))
def eval_ap(iou, iou_idx, cls, gt, predition):
ap = compute_average_precision_detection(gt, predition, iou)
sys.stdout.flush()
return cls, iou_idx, ap
def callback(rst):
sys.stdout.flush()
ap_values[rst[0], rst[1]] = rst[2][0]
pool = Pool(args.ap_workers)
jobs = []
for iou_idx, min_overlap in enumerate(iou_range):
for cls in range(num_class):
if len(gt_by_cls[cls]) == 0:
continue
jobs.append(pool.apply_async(eval_ap, args=([min_overlap], iou_idx, cls, gt_by_cls[cls], plain_detections[cls],),callback=callback))
pool.close()
pool.join()
print("Evaluation done.\n\n")
map_iou = ap_values.mean(axis=0)
per_cls_map = ap_values.mean(axis=1)
#
# for
display_title = "Detection Performance on {}".format(args.dataset)
display_data = [["IoU thresh"], ["mAP"]]
for i in range(len(iou_range)):
display_data[0].append("{:.02f}".format(iou_range[i]))
display_data[1].append("{:.04f}".format(map_iou[i]))
display_data[0].append('Average')
display_data[1].append("{:.04f}".format(map_iou.mean()))
table = AsciiTable(display_data, display_title)
table.justify_columns[-1] = 'right'
table.inner_footing_row_border = True
print(table.table)
# first_line = '\t'.join(['iou'], ['{:.02f}'])
print('Per-class average AP over all iou thresholds')
for i,x in enumerate(per_cls_map):
print('%.4f' % x, end='\t')
print(time.strftime('%Y-%m-%d %H:%M:%S') + ' Done')
| [
"numpy.clip",
"numpy.array",
"sys.path.append",
"ops.utils.temporal_nms",
"numpy.arange",
"dataset.VideoDataSet",
"argparse.ArgumentParser",
"numpy.where",
"numpy.exp",
"numpy.vstack",
"pandas.DataFrame",
"sys.stdout.flush",
"functools.reduce",
"ops.utils.get_configs",
"numpy.squeeze",
... | [((211, 255), 'sys.path.append', 'sys.path.append', (['"""./anet_toolkit/Evaluation"""'], {}), "('./anet_toolkit/Evaluation')\n", (226, 255), False, 'import sys\n'), ((450, 527), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate detection performance metrics"""'}), "(description='Evaluate detection performance metrics')\n", (473, 527), False, 'import argparse\n'), ((1493, 1528), 'ops.utils.get_configs', 'get_configs', (['args.dataset', 'args.cfg'], {}), '(args.dataset, args.cfg)\n', (1504, 1528), False, 'from ops.utils import get_configs, softmax\n'), ((2002, 2020), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2018, 2020), False, 'import sys\n'), ((3380, 3493), 'dataset.VideoDataSet', 'VideoDataSet', (['dataset_configs'], {'prop_file': 'prop_file', 'ft_path': "dataset_configs['train_ft_path']", 'test_mode': '(True)'}), "(dataset_configs, prop_file=prop_file, ft_path=dataset_configs[\n 'train_ft_path'], test_mode=True)\n", (3392, 3493), False, 'from dataset import VideoDataSet\n'), ((9524, 9593), 'pandas.DataFrame', 'pd.DataFrame', (['all_gt'], {'columns': "['video-id', 'cls', 't-start', 't-end']"}), "(all_gt, columns=['video-id', 'cls', 't-start', 't-end'])\n", (9536, 9593), True, 'import pandas as pd\n'), ((10674, 10695), 'multiprocessing.Pool', 'Pool', (['args.ap_workers'], {}), '(args.ap_workers)\n', (10678, 10695), False, 'from multiprocessing import Pool\n'), ((1848, 1882), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (1861, 1882), False, 'import time\n'), ((3598, 3712), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', '[[((x.end_frame - x.start_frame) / 6) for x in v.gt] for v in dataset.\n video_list]'], {}), '(lambda x, y: x + y, [[((x.end_frame - x.start_frame) / 6) for x in v\n .gt] for v in dataset.video_list])\n', (3604, 3712), False, 'from functools import reduce\n'), ((9267, 9357), 'pandas.DataFrame', 'pd.DataFrame', (['detection_list'], {'columns': "['video-id', 'cls', 't-start', 't-end', 'score']"}), "(detection_list, columns=['video-id', 'cls', 't-start', 't-end',\n 'score'])\n", (9279, 9357), True, 'import pandas as pd\n'), ((10473, 10528), 'anet_toolkit.Evaluation.eval_detection.compute_average_precision_detection', 'compute_average_precision_detection', (['gt', 'predition', 'iou'], {}), '(gt, predition, iou)\n', (10508, 10528), False, 'from anet_toolkit.Evaluation.eval_detection import compute_average_precision_detection\n'), ((10533, 10551), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10549, 10551), False, 'import sys\n'), ((10605, 10623), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10621, 10623), False, 'import sys\n'), ((2175, 2203), 'numpy.array', 'np.array', (['args.score_weights'], {}), '(args.score_weights)\n', (2183, 2203), True, 'import numpy as np\n'), ((5339, 5365), 'numpy.squeeze', 'np.squeeze', (['score_tp[0]', '(0)'], {}), '(score_tp[0], 0)\n', (5349, 5365), True, 'import numpy as np\n'), ((5693, 5719), 'ops.utils.softmax', 'softmax', (['score_tp[1][:, :]'], {}), '(score_tp[1][:, :])\n', (5700, 5719), False, 'from ops.utils import get_configs, softmax\n'), ((8124, 8154), 'ops.utils.temporal_nms', 'temporal_nms', (['v', 'nms_threshold'], {}), '(v, nms_threshold)\n', (8136, 8154), False, 'from ops.utils import temporal_nms\n'), ((8449, 8473), 'numpy.exp', 'np.exp', (['detections[:, 4]'], {}), '(detections[:, 4])\n', (8455, 8473), True, 'import numpy as np\n'), ((10128, 10152), 'numpy.arange', 'np.arange', (['(0.1)', '(1.0)', '(0.1)'], {}), '(0.1, 1.0, 0.1)\n', (10137, 10152), True, 'import numpy as np\n'), ((11794, 11828), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (11807, 11828), False, 'import time\n'), ((5804, 5840), 'ops.utils.softmax', 'softmax', (['cls_score_dict[video_id][1]'], {}), '(cls_score_dict[video_id][1])\n', (5811, 5840), False, 'from ops.utils import get_configs, softmax\n'), ((6281, 6300), 'numpy.exp', 'np.exp', (['score_tp[2]'], {}), '(score_tp[2])\n', (6287, 6300), True, 'import numpy as np\n'), ((6652, 6694), 'numpy.where', 'np.where', (['(prop_duration <= args.min_length)'], {}), '(prop_duration <= args.min_length)\n', (6660, 6694), True, 'import numpy as np\n'), ((7030, 7189), 'numpy.array', 'np.array', (['[[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[prop_idx,\n cls], reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]]'], {}), '([[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[\n prop_idx, cls], reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls,\n 1]]])\n', (7038, 7189), True, 'import numpy as np\n'), ((7289, 7484), 'numpy.vstack', 'np.vstack', (['[dataset_detections[cls][video_id], [rel_prop[prop_idx, 0], rel_prop[\n prop_idx, 1], combined_scores[prop_idx, cls], reg_scores[prop_idx, cls,\n 0], reg_scores[prop_idx, cls, 1]]]'], {}), '([dataset_detections[cls][video_id], [rel_prop[prop_idx, 0],\n rel_prop[prop_idx, 1], combined_scores[prop_idx, cls], reg_scores[\n prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]])\n', (7298, 7484), True, 'import numpy as np\n'), ((10268, 10293), 'numpy.arange', 'np.arange', (['(0.5)', '(1.0)', '(0.05)'], {}), '(0.5, 1.0, 0.05)\n', (10277, 10293), True, 'import numpy as np\n'), ((8521, 8565), 'numpy.clip', 'np.clip', (['(new_center - new_duration / 2)', '(0)', '(1)'], {}), '(new_center - new_duration / 2, 0, 1)\n', (8528, 8565), True, 'import numpy as np\n'), ((8576, 8620), 'numpy.clip', 'np.clip', (['(new_center + new_duration / 2)', '(0)', '(1)'], {}), '(new_center + new_duration / 2, 0, 1)\n', (8583, 8620), True, 'import numpy as np\n')] |
from multitask_data_collator import DataLoaderWithTaskname
import nlp
import numpy as np
import torch
import transformers
from datasets import load_metric
import pandas as pd
def multitask_eval_fn(multitask_model, model_name, features_dict, batch_size=8):
preds_dict = {}
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
metric = load_metric("rmse")
for idx, task_name in enumerate(["BERTScore", "CushLEPOR", "COMET", "TransQuest"]):
val_len = len(features_dict[task_name]["validation"])
eval = 0
for index in range(0, val_len, batch_size):
batch = features_dict[task_name]["validation"][index : min(index + batch_size, val_len)]["doc"]
labels = features_dict[task_name]["validation"][index : min(index + batch_size, val_len)]["target"]
inputs = tokenizer(batch, max_length=512)
inputs["input_ids"] = torch.LongTensor(inputs["input_ids"]).cuda()
inputs["attention_mask"] = torch.LongTensor(inputs["attention_mask"]).cuda()
logits = multitask_model(task_name, **inputs).logits
predictions = torch.argmax(torch.FloatTensor(torch.softmax(logits, dim=1).detach().cpu().tolist()),dim=1)
metric.add_batch(predictions=predictions, references=np.array(labels))
print(f"\nRMSE value for current batch: {metric.compute()['rmse']}")
eval += metric.compute()["rmse"]
print("\nCurrent total RMSE value: {eval}")
eval = eval/val_len
preds_dict[task_name] = eval
print(f"\nTask name: {task_name}\tFinal RMSE: {eval}\n\n")
preds = pd.DataFrame.from_dict(preds_dict)
print(preds)
| [
"datasets.load_metric",
"torch.LongTensor",
"pandas.DataFrame.from_dict",
"torch.softmax",
"numpy.array",
"transformers.AutoTokenizer.from_pretrained"
] | [((293, 347), 'transformers.AutoTokenizer.from_pretrained', 'transformers.AutoTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (335, 347), False, 'import transformers\n'), ((362, 381), 'datasets.load_metric', 'load_metric', (['"""rmse"""'], {}), "('rmse')\n", (373, 381), False, 'from datasets import load_metric\n'), ((1767, 1801), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['preds_dict'], {}), '(preds_dict)\n', (1789, 1801), True, 'import pandas as pd\n'), ((941, 978), 'torch.LongTensor', 'torch.LongTensor', (["inputs['input_ids']"], {}), "(inputs['input_ids'])\n", (957, 978), False, 'import torch\n'), ((1025, 1067), 'torch.LongTensor', 'torch.LongTensor', (["inputs['attention_mask']"], {}), "(inputs['attention_mask'])\n", (1041, 1067), False, 'import torch\n'), ((1382, 1398), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1390, 1398), True, 'import numpy as np\n'), ((1223, 1251), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (1236, 1251), False, 'import torch\n')] |
"""
"""
import argparse
import numpy as np
def main(fname, limit):
"""
"""
lines = None
with open(fname) as f:
lines = f.readlines()
t = []
a = []
b = []
c = []
s = []
state = 'HEADER'
for line in lines:
# Check state
if state == 'HEADER':
# Look for pattern keyword
if 'PATTERN' in line:
state = 'DATA'
elif state == 'DATA':
# Pattern ends in semicolon
if t and t[-1] >= limit or ';' in line:
state = 'DONE'
break
# Split string
t_, line = line.split('>')
line, s_ = line.split('=')
a_, b_, c_ = line.split()
# Convert to float and append
t.append(float(t_.strip()))
if 'X' not in a_:
a.append(int(a_.strip(), 16))
if 'X' not in b_:
b.append(int(b_.strip(), 16))
if 'X' not in c_:
c.append(int(c_.strip(), 16))
if 'X' not in s_:
s.append(int(s_.strip(), 16))
if state != 'DONE':
raise RuntimeError('Badness')
tint = np.diff(t)
sb = []
for i in range(0, 33):
bits = []
for output in s:
bits.append((output >> i) & 1)
sb.append(bits)
for i, bits in reversed(list(enumerate(sb))):
print('S[%d] &' % i)
for interval, value in zip(tint, bits):
print('\t%g%s' % (interval, 'H' if value else 'L'), end=' ')
print('\n\t\\\\')
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fname', '-f', type=str, default=None)
parser.add_argument('--limit', '-l', type=float, default=200.0)
args = parser.parse_args()
main(args.fname, args.limit)
| [
"numpy.diff",
"argparse.ArgumentParser"
] | [((919, 929), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (926, 929), True, 'import numpy as np\n'), ((1280, 1305), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1303, 1305), False, 'import argparse\n')] |
import pandas as pd
import numpy as np
import pickle
x = np.arange(0,15).reshape(5,3)
y = np.array([7,15,15,23,11]).reshape(-1,1)
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(x,y)
pickle.dump(reg,open('model.pkl','wb'))
model = pickle.load(open('model.pkl','rb'))
c=model.predict(np.array([1,5,4]).reshape(-1,3)) | [
"numpy.array",
"sklearn.linear_model.LinearRegression",
"numpy.arange"
] | [((190, 208), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (206, 208), False, 'from sklearn.linear_model import LinearRegression\n'), ((60, 76), 'numpy.arange', 'np.arange', (['(0)', '(15)'], {}), '(0, 15)\n', (69, 76), True, 'import numpy as np\n'), ((93, 122), 'numpy.array', 'np.array', (['[7, 15, 15, 23, 11]'], {}), '([7, 15, 15, 23, 11])\n', (101, 122), True, 'import numpy as np\n'), ((325, 344), 'numpy.array', 'np.array', (['[1, 5, 4]'], {}), '([1, 5, 4])\n', (333, 344), True, 'import numpy as np\n')] |
"""Script that calculates the parameters for a log normal distribution given the input
To use: python calculate_parameters file1.csv file2.csv ... fileN.csv [optional output_dir=output]
The details of the calculations in this script are in the appendix of the docs.
"""
import sys, csv
from scipy.optimize import minimize, Bounds, NonlinearConstraint
from scipy.stats import norm, lognorm
import numpy as np
def main(files, output_dir):
to_ignore = 0
for file in files:
company_sizes = read_file(file)
parameters = {}
options = []
for key, size_dist in company_sizes.items():
option_1 = max_likelihood(size_dist)
option_2 = match_expectation(size_dist)
options.append((option_1, option_2))
if option_1 is not None:
var = lognorm.var(option_1[1],scale=np.exp(option_1[0]))
elif option_2 is not None:
option_1 = option_2
var = lognorm.var(option_2[1],scale=np.exp(option_2[0]))
else:
continue
if option_1[0] == 0 and option_2[1] == 1:
for n in size_dist.values():
to_ignore += n
print('ignoring ' + key)
else:
parameters[key] = option_1
#max_likelyhood(size_dist)
with open(output_dir + file[:-4] + '_out.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
for key, params in parameters.items():
writer.writerow([key, params[0], params[1]])
print(to_ignore)
""" size_dist parameter is a dictionary with form {'lower-upper': n, ... 'lower+': n}
like the ONS size distributions
return mean and standard deviation (not variance)
"""
def match_expectation(size_dist):
result = minimize(lambda x: expectation_difference(x, size_dist), (0, 1), bounds=Bounds([-np.inf, 0], [np.inf, np.inf]))
if result.success:
return result.x
else:
return None
def max_likelihood(size_dist, distribution_mean=None):
""" Returns the estimated mean, sd from size dist
Arguments
------------
size_dist: dict of the form {str: float or int} where the string is 'a_i-a_i+1' or 'a_n+' and the float or int is the proportion or number of companies in that bin.
(optional) distribution_mean: if the mean of the distribution is known then this is a constraint that can be used to improve the estimation.
"""
if distribution_mean is None:
result = minimize(lambda x: -likelihood(x, size_dist), (0.5, 1.5), jac=lambda x: -likelihood_jacobian(x, size_dist), bounds=Bounds([-np.inf, 0], [np.inf, np.inf]))
else:
result = minimize(lambda x: -likelihood(x, size_dist), (0.5, 1.5), jac=lambda x: -likelihood_jacobian(x, size_dist), bounds=Bounds([-np.inf, 0], [np.inf, np.inf]), constraints={'type': 'eq', 'fun': lambda x: np.exp(x[0] + x[1] ** 2 / 2) - distribution_mean})
#print(result)
if result.success:
return result.x
else:
return None
def likelihood(params, size_dist):
mean, sd = params
total = 0
for size_band, n in size_dist.items():
if '-' in size_band:
lower = int(size_band.split('-')[0])
upper = int(size_band.split('-')[1]) + 1
else:
lower = int(size_band.split('+')[0])
upper = np.inf
if upper == np.inf:
x = 1 - norm.cdf((np.log(lower) - mean) / sd)
elif lower == 0:
x = norm.cdf((np.log(upper) - mean) / sd)
else:
x = norm.cdf((np.log(upper) - mean) / sd) - norm.cdf((np.log(lower) - mean) / sd)
#print(x)
total += n * np.log(x)
return total
def likelihood_jacobian(params, size_dist):
jacobian = np.zeros(2)
mean, sd = params
for size_band, n in size_dist.items():
if '-' in size_band:
lower = int(size_band.split('-')[0])
upper = int(size_band.split('-')[1]) + 1
else:
lower = int(size_band.split('+')[0])
upper = np.inf
if upper == np.inf:
d_l = n / (1 - norm.cdf((np.log(lower) - mean) / sd))
jacobian[0] += -d_l * (- norm.pdf((np.log(lower) - mean) / sd)) / sd
jacobian[1] += -d_l * (- norm.pdf((np.log(lower) - mean) / sd) * (np.log(lower) - mean)) / sd ** 2
elif lower == 0:
d_l = n / (norm.cdf((np.log(upper) - mean) / sd))
jacobian[0] += -d_l * (norm.pdf((np.log(upper) - mean) / sd)) / sd
jacobian[1] += -d_l * (norm.pdf((np.log(upper) - mean) / sd) * (np.log(upper) - mean)) / sd ** 2
else:
d_l = n / (norm.cdf((np.log(upper) - mean) / sd) - norm.cdf((np.log(lower) - mean) / sd))
jacobian[0] += -d_l * (norm.pdf((np.log(upper) - mean) / sd) - norm.pdf((np.log(lower) - mean) / sd)) / sd
jacobian[1] += -d_l * (norm.pdf((np.log(upper) - mean) / sd) * (np.log(upper) - mean) - norm.pdf((np.log(lower) - mean) / sd) * (np.log(lower) - mean)) / sd ** 2
return jacobian
def expectation_difference(params, size_dist):
mean, sd = params
expectation = []
actual = []
total = 0
for size_band, n in size_dist.items():
total += n
if '-' in size_band:
lower = int(size_band.split('-')[0])
upper = int(size_band.split('-')[1]) + 1
else:
lower = int(size_band.split('+')[0])
upper = np.inf
expectation.append(lognorm.cdf(upper, sd, scale=np.exp(mean)) - lognorm.cdf(lower, sd, scale=np.exp(mean)))
actual.append(n)
return ((total * np.array(expectation) - np.array(actual)) ** 2).mean()
def read_file(file):
base_sizes = ['0-4', '5-9', '10-19', '20-49', '50-99', '100-249']
with open(file, 'r') as csvfile:
reader = csv.reader(csvfile)
first_line = next(reader)
csvfile.seek(0)
read_as_dict = '0-4' in first_line
if read_as_dict:
reader = csv.DictReader(csvfile)
id_key = first_line[0]
if len(first_line) == 8 or len(first_line) == 9:
sizes = base_sizes + ['250+']
elif len(first_line) == 10 or len(first_line) == 11:
sizes = base_sizes + ['250-499', '499-999', '1000+']
else:
raise ValueError('Line length for' + file + ' does not match size distribution table')
file_data = {}
for line in reader:
if read_as_dict:
file_data[line[id_key]] = {size: float(line[size]) for size in sizes}
else:
file_data[line[0]] = {size: float(line[i]) for i, size in sizes}
return file_data
if __name__ == '__main__':
options = [s for s in sys.argv[1:] if '=' in s]
for option in options:
if option.split('=')[0] != 'output_dir':
print('Unrecognised option: ' + option.split('=')[0])
exit()
else:
output_dir = str.join(option.split('=')[1:])
else:
output_dir = './'
files = sys.argv[1:]
main(files, output_dir)
| [
"csv.DictReader",
"numpy.log",
"csv.writer",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"scipy.optimize.Bounds",
"csv.reader"
] | [((3820, 3831), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (3828, 3831), True, 'import numpy as np\n'), ((5889, 5908), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (5899, 5908), False, 'import sys, csv\n'), ((1442, 1461), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (1452, 1461), False, 'import sys, csv\n'), ((1901, 1939), 'scipy.optimize.Bounds', 'Bounds', (['[-np.inf, 0]', '[np.inf, np.inf]'], {}), '([-np.inf, 0], [np.inf, np.inf])\n', (1907, 1939), False, 'from scipy.optimize import minimize, Bounds, NonlinearConstraint\n'), ((3731, 3740), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (3737, 3740), True, 'import numpy as np\n'), ((6056, 6079), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (6070, 6079), False, 'import sys, csv\n'), ((2665, 2703), 'scipy.optimize.Bounds', 'Bounds', (['[-np.inf, 0]', '[np.inf, np.inf]'], {}), '([-np.inf, 0], [np.inf, np.inf])\n', (2671, 2703), False, 'from scipy.optimize import minimize, Bounds, NonlinearConstraint\n'), ((2847, 2885), 'scipy.optimize.Bounds', 'Bounds', (['[-np.inf, 0]', '[np.inf, np.inf]'], {}), '([-np.inf, 0], [np.inf, np.inf])\n', (2853, 2885), False, 'from scipy.optimize import minimize, Bounds, NonlinearConstraint\n'), ((5708, 5724), 'numpy.array', 'np.array', (['actual'], {}), '(actual)\n', (5716, 5724), True, 'import numpy as np\n'), ((860, 879), 'numpy.exp', 'np.exp', (['option_1[0]'], {}), '(option_1[0])\n', (866, 879), True, 'import numpy as np\n'), ((5574, 5586), 'numpy.exp', 'np.exp', (['mean'], {}), '(mean)\n', (5580, 5586), True, 'import numpy as np\n'), ((5619, 5631), 'numpy.exp', 'np.exp', (['mean'], {}), '(mean)\n', (5625, 5631), True, 'import numpy as np\n'), ((5684, 5705), 'numpy.array', 'np.array', (['expectation'], {}), '(expectation)\n', (5692, 5705), True, 'import numpy as np\n'), ((1008, 1027), 'numpy.exp', 'np.exp', (['option_2[0]'], {}), '(option_2[0])\n', (1014, 1027), True, 'import numpy as np\n'), ((2931, 2959), 'numpy.exp', 'np.exp', (['(x[0] + x[1] ** 2 / 2)'], {}), '(x[0] + x[1] ** 2 / 2)\n', (2937, 2959), True, 'import numpy as np\n'), ((3475, 3488), 'numpy.log', 'np.log', (['lower'], {}), '(lower)\n', (3481, 3488), True, 'import numpy as np\n'), ((3554, 3567), 'numpy.log', 'np.log', (['upper'], {}), '(upper)\n', (3560, 3567), True, 'import numpy as np\n'), ((4372, 4385), 'numpy.log', 'np.log', (['lower'], {}), '(lower)\n', (4378, 4385), True, 'import numpy as np\n'), ((3622, 3635), 'numpy.log', 'np.log', (['upper'], {}), '(upper)\n', (3628, 3635), True, 'import numpy as np\n'), ((3662, 3675), 'numpy.log', 'np.log', (['lower'], {}), '(lower)\n', (3668, 3675), True, 'import numpy as np\n'), ((4184, 4197), 'numpy.log', 'np.log', (['lower'], {}), '(lower)\n', (4190, 4197), True, 'import numpy as np\n'), ((4463, 4476), 'numpy.log', 'np.log', (['upper'], {}), '(upper)\n', (4469, 4476), True, 'import numpy as np\n'), ((4647, 4660), 'numpy.log', 'np.log', (['upper'], {}), '(upper)\n', (4653, 4660), True, 'import numpy as np\n'), ((4260, 4273), 'numpy.log', 'np.log', (['lower'], {}), '(lower)\n', (4266, 4273), True, 'import numpy as np\n'), ((4537, 4550), 'numpy.log', 'np.log', (['upper'], {}), '(upper)\n', (4543, 4550), True, 'import numpy as np\n'), ((4727, 4740), 'numpy.log', 'np.log', (['upper'], {}), '(upper)\n', (4733, 4740), True, 'import numpy as np\n'), ((4767, 4780), 'numpy.log', 'np.log', (['lower'], {}), '(lower)\n', (4773, 4780), True, 'import numpy as np\n'), ((4991, 5004), 'numpy.log', 'np.log', (['upper'], {}), '(upper)\n', (4997, 5004), True, 'import numpy as np\n'), ((5056, 5069), 'numpy.log', 'np.log', (['lower'], {}), '(lower)\n', (5062, 5069), True, 'import numpy as np\n'), ((4341, 4354), 'numpy.log', 'np.log', (['lower'], {}), '(lower)\n', (4347, 4354), True, 'import numpy as np\n'), ((4616, 4629), 'numpy.log', 'np.log', (['upper'], {}), '(upper)\n', (4622, 4629), True, 'import numpy as np\n'), ((4841, 4854), 'numpy.log', 'np.log', (['upper'], {}), '(upper)\n', (4847, 4854), True, 'import numpy as np\n'), ((4881, 4894), 'numpy.log', 'np.log', (['lower'], {}), '(lower)\n', (4887, 4894), True, 'import numpy as np\n'), ((4960, 4973), 'numpy.log', 'np.log', (['upper'], {}), '(upper)\n', (4966, 4973), True, 'import numpy as np\n'), ((5025, 5038), 'numpy.log', 'np.log', (['lower'], {}), '(lower)\n', (5031, 5038), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.