code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""read-data.py: Module is used to fetch the images from the SDO data store"""
__author__ = "<NAME>."
__copyright__ = "Copyright 2021, Shibaji"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import os
import datetime as dt
import argparse
from dateutil import parser as prs
from lxml import html
import requests
import glob
import numpy as np
class SDOFile(object):
""" Class that holds SDO file objects """
def __init__(self, _dict_):
for p in _dict_.keys():
setattr(self, p, _dict_[p])
self.uri = "https://sdo.gsfc.nasa.gov/assets/img/browse/"
self._fetch_file_list_()
self.folder = "data/SDO-Database/{:4d}.{:02d}.{:02d}/{:d}/{:04d}/".format(self.date.year, self.date.month,
self.date.day, self.resolution,
self.wavelength)
if not os.path.exists("data/SDO-Database/"): os.system("mkdir -p data/SDO-Database/")
if not os.path.exists(self.folder): os.system("mkdir -p " + self.folder)
return
def _fetch_file_list_(self):
uri = self.uri + "index.php?b={:4d}%2F{:02d}%2F{:02d}".format(self.date.year,
self.date.month,
self.date.day)
print(" URI:", uri)
page = requests.get(uri)
tree = html.fromstring(page.content)
self.filenames = tree.xpath("//a[@class=\"name file\"]/text()")
self.hrefs = []
for a in tree.xpath("//a[@class=\"name file\"]"):
items = a.items()
for item in items:
if item[0] == "href": self.hrefs.append(self.uri + item[1])
return
def fetch(self):
tag = "{:d}_{:04d}.jpg".format(self.resolution, self.wavelength)
for href, fname in zip(self.hrefs, self.filenames):
if tag in href: self._download_sdo_data_(href, fname)
return self
def _download_sdo_data_(self, h, fname):
print(" Downloading from:", h)
r = requests.get(h)
with open(self.folder + fname,"wb") as f: f.write(r.content)
return
def fetch_sdo(_dict_):
""" Parse SDO files from remote """
sdo = SDOFile(_dict_)
sdo.fetch()
return
def get_files(dates=[dt.datetime(2018,5,30)], resolution=1024, wavelength=193):
""" Get data file names and location """
files = []
for date in dates:
folder = "data/SDO-Database/{:4d}.{:02d}.{:02d}/{:d}/{:04d}/".format(date.year, date.month, date.day, resolution, wavelength)
_fs = [f.split("/")[-1] for f in glob.glob(folder + "*_{:d}_{:04d}.jpg".format(resolution, wavelength))]
_fdates = [np.abs(dt.datetime.strptime(f.split("_")[0]+f.split("_")[1], "%Y%m%d%H%M%S")-date).total_seconds() for f in _fs]
_ix = np.argmin(_fdates)
files.append(folder+_fs[_ix])
return files
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-dn", "--date", default=dt.datetime(2018,5,30), help="Date [2015-3-11]", type=prs.parse)
parser.add_argument("-r", "--resolution", default=1024, help="Resolution of the files [512]", type=int)
parser.add_argument("-w", "--wavelength", default=193, help="Wavelength of the files [193]", type=int)
args = parser.parse_args()
_dict_ = {}
print("\n Parameter list ")
for k in vars(args).keys():
print(" " + k + "->" + str(vars(args)[k]))
_dict_[k] = vars(args)[k]
fetch_sdo(_dict_)
pass | [
"argparse.ArgumentParser",
"os.path.exists",
"os.system",
"numpy.argmin",
"datetime.datetime",
"lxml.html.fromstring",
"requests.get"
] | [((3225, 3250), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3248, 3250), False, 'import argparse\n'), ((1621, 1638), 'requests.get', 'requests.get', (['uri'], {}), '(uri)\n', (1633, 1638), False, 'import requests\n'), ((1654, 1683), 'lxml.html.fromstring', 'html.fromstring', (['page.content'], {}), '(page.content)\n', (1669, 1683), False, 'from lxml import html\n'), ((2336, 2351), 'requests.get', 'requests.get', (['h'], {}), '(h)\n', (2348, 2351), False, 'import requests\n'), ((2575, 2599), 'datetime.datetime', 'dt.datetime', (['(2018)', '(5)', '(30)'], {}), '(2018, 5, 30)\n', (2586, 2599), True, 'import datetime as dt\n'), ((3110, 3128), 'numpy.argmin', 'np.argmin', (['_fdates'], {}), '(_fdates)\n', (3119, 3128), True, 'import numpy as np\n'), ((1052, 1088), 'os.path.exists', 'os.path.exists', (['"""data/SDO-Database/"""'], {}), "('data/SDO-Database/')\n", (1066, 1088), False, 'import os\n'), ((1090, 1130), 'os.system', 'os.system', (['"""mkdir -p data/SDO-Database/"""'], {}), "('mkdir -p data/SDO-Database/')\n", (1099, 1130), False, 'import os\n'), ((1146, 1173), 'os.path.exists', 'os.path.exists', (['self.folder'], {}), '(self.folder)\n', (1160, 1173), False, 'import os\n'), ((1175, 1211), 'os.system', 'os.system', (["('mkdir -p ' + self.folder)"], {}), "('mkdir -p ' + self.folder)\n", (1184, 1211), False, 'import os\n'), ((3300, 3324), 'datetime.datetime', 'dt.datetime', (['(2018)', '(5)', '(30)'], {}), '(2018, 5, 30)\n', (3311, 3324), True, 'import datetime as dt\n')] |
"""
Tests module grey
More tests needed
# Author: <NAME>
# $Id$
"""
from __future__ import unicode_literals
from __future__ import absolute_import
__version__ = "$Revision$"
import importlib
import numpy
import scipy
import unittest
import numpy.testing as np_test
import pyto
from pyto.segmentation.grey import Grey
from pyto.segmentation.segment import Segment
from pyto.segmentation.morphology import Morphology
from pyto.segmentation.contact import Contact
from pyto.segmentation.test import common
class TestGrey(np_test.TestCase):
"""
"""
def setUp(self):
importlib.reload(common) # to avoid problems when running multiple tests
self.shapes = common.make_shapes()
self.grey = common.make_grey()
def testSegmentStats(self):
actual = self.grey.getSegmentStats(segment=self.shapes)
np_test.assert_almost_equal(actual.mean[self.shapes.ids],
[ 22., 27., 84.92307692, 79.57142857])
np_test.assert_almost_equal(actual.std[self.shapes.ids],
[8.206, 12.79, 8.22, 11.22], decimal=2)
np_test.assert_almost_equal(actual.min[self.shapes.ids],
[ 11., 6., 72., 65.])
np_test.assert_almost_equal(actual.max[self.shapes.ids],
[ 33., 48., 96., 98.])
def testSegmentDensitySimple(self):
actual = self.grey.getSegmentDensitySimple(segments=self.shapes)
np_test.assert_almost_equal(actual.mean[self.shapes.ids],
[ 22., 27., 84.92307692, 79.57142857])
np_test.assert_almost_equal(actual.std[self.shapes.ids],
[8.206, 12.79, 8.22, 11.22], decimal=2)
np_test.assert_almost_equal(actual.min[self.shapes.ids],
[ 11., 6., 72., 65.])
np_test.assert_almost_equal(actual.max[self.shapes.ids],
[ 33., 48., 96., 98.])
np_test.assert_equal(actual.volume[self.shapes.ids],
[ 9, 21, 13, 7])
def testLabelByBins(self):
"""
Tests labelByBins()
"""
# 1-parameter segmentation
values = numpy.array([[-1, 1, 2, 2, 3, 3],
[1, 1, 2, -1, 3, -1],
[1, 1, 2, 2, 3, -1],
[-1, 1, 2, 2, 3, 3]])
# simple
labels, bins = Grey.labelByBins(values=values,
bins=[1,2,3,4])
desired = numpy.array([[0, 1, 2, 2, 3, 3],
[1, 1, 2, 0, 3, 0],
[1, 1, 2, 2, 3, 0],
[0, 1, 2, 2, 3, 3]])
np_test.assert_equal(labels, desired)
desired_bins = {1:[1,2], 2:[2,3], 3:[3,4]}
np_test.assert_equal(bins, desired_bins)
# masked array
values = numpy.ma.array(values, mask=(values==2))
labels, bins = Grey.labelByBins(values=values,
bins=[1,2,3,4])
desired = numpy.array([[0, 1, 0, 0, 3, 3],
[1, 1, 0, 0, 3, 0],
[1, 1, 0, 0, 3, 0],
[0, 1, 0, 0, 3, 3]])
np_test.assert_equal(labels, desired)
desired_bins = {1:[1,2], 2:[2,3], 3:[3,4]}
np_test.assert_equal(bins, desired_bins)
# multi-parameter segmentation
values = numpy.zeros(shape=(2, 4, 6), dtype=int)
values[0] = numpy.array([[-1, 1, 2, 2, 3, 3],
[1, 1, 2, -1, 3, -1],
[1, 1, 2, 2, 3, -1],
[-1, 1, 2, 2, 3, 3]])
values[1] = numpy.array([[-1, 1, 1, 1, 1, 1],
[2, 2, 2, -1, 2, -1],
[2, 3, 3, 3, 3, -1],
[-1, 4, 4, 4, 3, 4]])
# simple
labels, bins = Grey.labelByBins(values=values,
bins=[[1,2,3,4], [1,2,3,4,5]])
desired = numpy.array([[0, 1, 5, 5, 9, 9],
[2, 2, 6, 0, 10, 0],
[2, 3, 7, 7, 11, 0],
[0, 4, 8, 8, 11, 12]])
np_test.assert_equal(labels, desired)
desired = {
1:[[1,2],[1,2]], 2:[[1,2],[2,3]], 3:[[1,2],[3,4]], 4:[[1,2],[4,5]],
5:[[2,3],[1,2]], 6:[[2,3],[2,3]], 7:[[2,3],[3,4]], 8:[[2,3],[4,5]],
9:[[3,4],[1,2]], 10:[[3,4],[2,3]], 11:[[3,4],[3,4]],
12:[[3,4],[4,5]]}
np_test.assert_equal(bins, desired)
# check upper limit
labels, bins = Grey.labelByBins(values=values,
bins=[[1,2,3], [1,2,3]])
desired = numpy.array([[0, 1, 3, 3, 3, 3],
[2, 2, 4, 0, 4, 0],
[2, 2, 4, 4, 4, 0],
[0, 0, 0, 0, 4, 0]])
np_test.assert_equal(labels, desired)
desired_bins = {
1:[[1,2],[1,2]], 2:[[1,2],[2,3]],
3:[[2,3],[1,2]], 4:[[2,3],[2,3]]}
np_test.assert_equal(bins, desired_bins)
# check masked arrays, use the above desired
mask = numpy.array(2 * [values[0] == 2])
ma_values = numpy.ma.array(values, mask=mask)
labels, bins = Grey.labelByBins(values=ma_values,
bins=[[1,2,3], [1,2,3]])
desired = numpy.array([[0, 1, 0, 0, 3, 3],
[2, 2, 0, 0, 4, 0],
[2, 2, 0, 0, 4, 0],
[0, 0, 0, 0, 4, 0]])
np_test.assert_equal(labels, desired)
np_test.assert_equal(bins, desired_bins)
# check implicit bins
labels, bins = Grey.labelByBins(values=values,
bins=[[1,2,3]])
desired = numpy.array([[0, 1, 2, 2, 2, 2],
[1, 1, 2, 0, 2, 0],
[1, 1, 2, 2, 2, 0],
[0, 1, 2, 2, 2, 2]])
np_test.assert_equal(labels, desired)
desired_bins = {
1:[[1,2]], 2:[[2,3]]}
np_test.assert_equal(bins, desired_bins)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestGrey)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"unittest.TextTestRunner",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.ma.array",
"importlib.reload",
"pyto.segmentation.test.common.make_grey",
"numpy.array",
"pyto.segmentation.grey.Grey.labelByBins",
"numpy.testing.assert_equal",
"unittest.TestLoader",
"pyto.segmentation.test.c... | [((592, 616), 'importlib.reload', 'importlib.reload', (['common'], {}), '(common)\n', (608, 616), False, 'import importlib\n'), ((687, 707), 'pyto.segmentation.test.common.make_shapes', 'common.make_shapes', ([], {}), '()\n', (705, 707), False, 'from pyto.segmentation.test import common\n'), ((728, 746), 'pyto.segmentation.test.common.make_grey', 'common.make_grey', ([], {}), '()\n', (744, 746), False, 'from pyto.segmentation.test import common\n'), ((862, 964), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual.mean[self.shapes.ids]', '[22.0, 27.0, 84.92307692, 79.57142857]'], {}), '(actual.mean[self.shapes.ids], [22.0, 27.0, \n 84.92307692, 79.57142857])\n', (889, 964), True, 'import numpy.testing as np_test\n'), ((1006, 1107), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual.std[self.shapes.ids]', '[8.206, 12.79, 8.22, 11.22]'], {'decimal': '(2)'}), '(actual.std[self.shapes.ids], [8.206, 12.79, \n 8.22, 11.22], decimal=2)\n', (1033, 1107), True, 'import numpy.testing as np_test\n'), ((1147, 1233), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual.min[self.shapes.ids]', '[11.0, 6.0, 72.0, 65.0]'], {}), '(actual.min[self.shapes.ids], [11.0, 6.0, 72.0, \n 65.0])\n', (1174, 1233), True, 'import numpy.testing as np_test\n'), ((1274, 1360), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual.max[self.shapes.ids]', '[33.0, 48.0, 96.0, 98.0]'], {}), '(actual.max[self.shapes.ids], [33.0, 48.0, 96.0,\n 98.0])\n', (1301, 1360), True, 'import numpy.testing as np_test\n'), ((1525, 1627), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual.mean[self.shapes.ids]', '[22.0, 27.0, 84.92307692, 79.57142857]'], {}), '(actual.mean[self.shapes.ids], [22.0, 27.0, \n 84.92307692, 79.57142857])\n', (1552, 1627), True, 'import numpy.testing as np_test\n'), ((1669, 1770), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual.std[self.shapes.ids]', '[8.206, 12.79, 8.22, 11.22]'], {'decimal': '(2)'}), '(actual.std[self.shapes.ids], [8.206, 12.79, \n 8.22, 11.22], decimal=2)\n', (1696, 1770), True, 'import numpy.testing as np_test\n'), ((1810, 1896), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual.min[self.shapes.ids]', '[11.0, 6.0, 72.0, 65.0]'], {}), '(actual.min[self.shapes.ids], [11.0, 6.0, 72.0, \n 65.0])\n', (1837, 1896), True, 'import numpy.testing as np_test\n'), ((1937, 2023), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual.max[self.shapes.ids]', '[33.0, 48.0, 96.0, 98.0]'], {}), '(actual.max[self.shapes.ids], [33.0, 48.0, 96.0,\n 98.0])\n', (1964, 2023), True, 'import numpy.testing as np_test\n'), ((2064, 2132), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['actual.volume[self.shapes.ids]', '[9, 21, 13, 7]'], {}), '(actual.volume[self.shapes.ids], [9, 21, 13, 7])\n', (2084, 2132), True, 'import numpy.testing as np_test\n'), ((2317, 2419), 'numpy.array', 'numpy.array', (['[[-1, 1, 2, 2, 3, 3], [1, 1, 2, -1, 3, -1], [1, 1, 2, 2, 3, -1], [-1, 1, 2,\n 2, 3, 3]]'], {}), '([[-1, 1, 2, 2, 3, 3], [1, 1, 2, -1, 3, -1], [1, 1, 2, 2, 3, -1],\n [-1, 1, 2, 2, 3, 3]])\n', (2328, 2419), False, 'import numpy\n'), ((2547, 2597), 'pyto.segmentation.grey.Grey.labelByBins', 'Grey.labelByBins', ([], {'values': 'values', 'bins': '[1, 2, 3, 4]'}), '(values=values, bins=[1, 2, 3, 4])\n', (2563, 2597), False, 'from pyto.segmentation.grey import Grey\n'), ((2654, 2751), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 2, 3, 3], [1, 1, 2, 0, 3, 0], [1, 1, 2, 2, 3, 0], [0, 1, 2, 2, 3, 3]\n ]'], {}), '([[0, 1, 2, 2, 3, 3], [1, 1, 2, 0, 3, 0], [1, 1, 2, 2, 3, 0], [0,\n 1, 2, 2, 3, 3]])\n', (2665, 2751), False, 'import numpy\n'), ((2852, 2889), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['labels', 'desired'], {}), '(labels, desired)\n', (2872, 2889), True, 'import numpy.testing as np_test\n'), ((2949, 2989), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['bins', 'desired_bins'], {}), '(bins, desired_bins)\n', (2969, 2989), True, 'import numpy.testing as np_test\n'), ((3038, 3078), 'numpy.ma.array', 'numpy.ma.array', (['values'], {'mask': '(values == 2)'}), '(values, mask=values == 2)\n', (3052, 3078), False, 'import numpy\n'), ((3102, 3152), 'pyto.segmentation.grey.Grey.labelByBins', 'Grey.labelByBins', ([], {'values': 'values', 'bins': '[1, 2, 3, 4]'}), '(values=values, bins=[1, 2, 3, 4])\n', (3118, 3152), False, 'from pyto.segmentation.grey import Grey\n'), ((3209, 3306), 'numpy.array', 'numpy.array', (['[[0, 1, 0, 0, 3, 3], [1, 1, 0, 0, 3, 0], [1, 1, 0, 0, 3, 0], [0, 1, 0, 0, 3, 3]\n ]'], {}), '([[0, 1, 0, 0, 3, 3], [1, 1, 0, 0, 3, 0], [1, 1, 0, 0, 3, 0], [0,\n 1, 0, 0, 3, 3]])\n', (3220, 3306), False, 'import numpy\n'), ((3407, 3444), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['labels', 'desired'], {}), '(labels, desired)\n', (3427, 3444), True, 'import numpy.testing as np_test\n'), ((3504, 3544), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['bins', 'desired_bins'], {}), '(bins, desired_bins)\n', (3524, 3544), True, 'import numpy.testing as np_test\n'), ((3609, 3648), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(2, 4, 6)', 'dtype': 'int'}), '(shape=(2, 4, 6), dtype=int)\n', (3620, 3648), False, 'import numpy\n'), ((3669, 3771), 'numpy.array', 'numpy.array', (['[[-1, 1, 2, 2, 3, 3], [1, 1, 2, -1, 3, -1], [1, 1, 2, 2, 3, -1], [-1, 1, 2,\n 2, 3, 3]]'], {}), '([[-1, 1, 2, 2, 3, 3], [1, 1, 2, -1, 3, -1], [1, 1, 2, 2, 3, -1],\n [-1, 1, 2, 2, 3, 3]])\n', (3680, 3771), False, 'import numpy\n'), ((3884, 3986), 'numpy.array', 'numpy.array', (['[[-1, 1, 1, 1, 1, 1], [2, 2, 2, -1, 2, -1], [2, 3, 3, 3, 3, -1], [-1, 4, 4,\n 4, 3, 4]]'], {}), '([[-1, 1, 1, 1, 1, 1], [2, 2, 2, -1, 2, -1], [2, 3, 3, 3, 3, -1],\n [-1, 4, 4, 4, 3, 4]])\n', (3895, 3986), False, 'import numpy\n'), ((4120, 4189), 'pyto.segmentation.grey.Grey.labelByBins', 'Grey.labelByBins', ([], {'values': 'values', 'bins': '[[1, 2, 3, 4], [1, 2, 3, 4, 5]]'}), '(values=values, bins=[[1, 2, 3, 4], [1, 2, 3, 4, 5]])\n', (4136, 4189), False, 'from pyto.segmentation.grey import Grey\n'), ((4242, 4343), 'numpy.array', 'numpy.array', (['[[0, 1, 5, 5, 9, 9], [2, 2, 6, 0, 10, 0], [2, 3, 7, 7, 11, 0], [0, 4, 8, 8,\n 11, 12]]'], {}), '([[0, 1, 5, 5, 9, 9], [2, 2, 6, 0, 10, 0], [2, 3, 7, 7, 11, 0],\n [0, 4, 8, 8, 11, 12]])\n', (4253, 4343), False, 'import numpy\n'), ((4444, 4481), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['labels', 'desired'], {}), '(labels, desired)\n', (4464, 4481), True, 'import numpy.testing as np_test\n'), ((4766, 4801), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['bins', 'desired'], {}), '(bins, desired)\n', (4786, 4801), True, 'import numpy.testing as np_test\n'), ((4855, 4915), 'pyto.segmentation.grey.Grey.labelByBins', 'Grey.labelByBins', ([], {'values': 'values', 'bins': '[[1, 2, 3], [1, 2, 3]]'}), '(values=values, bins=[[1, 2, 3], [1, 2, 3]])\n', (4871, 4915), False, 'from pyto.segmentation.grey import Grey\n'), ((4971, 5068), 'numpy.array', 'numpy.array', (['[[0, 1, 3, 3, 3, 3], [2, 2, 4, 0, 4, 0], [2, 2, 4, 4, 4, 0], [0, 0, 0, 0, 4, 0]\n ]'], {}), '([[0, 1, 3, 3, 3, 3], [2, 2, 4, 0, 4, 0], [2, 2, 4, 4, 4, 0], [0,\n 0, 0, 0, 4, 0]])\n', (4982, 5068), False, 'import numpy\n'), ((5169, 5206), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['labels', 'desired'], {}), '(labels, desired)\n', (5189, 5206), True, 'import numpy.testing as np_test\n'), ((5333, 5373), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['bins', 'desired_bins'], {}), '(bins, desired_bins)\n', (5353, 5373), True, 'import numpy.testing as np_test\n'), ((5444, 5477), 'numpy.array', 'numpy.array', (['(2 * [values[0] == 2])'], {}), '(2 * [values[0] == 2])\n', (5455, 5477), False, 'import numpy\n'), ((5498, 5531), 'numpy.ma.array', 'numpy.ma.array', (['values'], {'mask': 'mask'}), '(values, mask=mask)\n', (5512, 5531), False, 'import numpy\n'), ((5555, 5618), 'pyto.segmentation.grey.Grey.labelByBins', 'Grey.labelByBins', ([], {'values': 'ma_values', 'bins': '[[1, 2, 3], [1, 2, 3]]'}), '(values=ma_values, bins=[[1, 2, 3], [1, 2, 3]])\n', (5571, 5618), False, 'from pyto.segmentation.grey import Grey\n'), ((5674, 5771), 'numpy.array', 'numpy.array', (['[[0, 1, 0, 0, 3, 3], [2, 2, 0, 0, 4, 0], [2, 2, 0, 0, 4, 0], [0, 0, 0, 0, 4, 0]\n ]'], {}), '([[0, 1, 0, 0, 3, 3], [2, 2, 0, 0, 4, 0], [2, 2, 0, 0, 4, 0], [0,\n 0, 0, 0, 4, 0]])\n', (5685, 5771), False, 'import numpy\n'), ((5872, 5909), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['labels', 'desired'], {}), '(labels, desired)\n', (5892, 5909), True, 'import numpy.testing as np_test\n'), ((5918, 5958), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['bins', 'desired_bins'], {}), '(bins, desired_bins)\n', (5938, 5958), True, 'import numpy.testing as np_test\n'), ((6014, 6063), 'pyto.segmentation.grey.Grey.labelByBins', 'Grey.labelByBins', ([], {'values': 'values', 'bins': '[[1, 2, 3]]'}), '(values=values, bins=[[1, 2, 3]])\n', (6030, 6063), False, 'from pyto.segmentation.grey import Grey\n'), ((6121, 6218), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 2, 2, 2], [1, 1, 2, 0, 2, 0], [1, 1, 2, 2, 2, 0], [0, 1, 2, 2, 2, 2]\n ]'], {}), '([[0, 1, 2, 2, 2, 2], [1, 1, 2, 0, 2, 0], [1, 1, 2, 2, 2, 0], [0,\n 1, 2, 2, 2, 2]])\n', (6132, 6218), False, 'import numpy\n'), ((6319, 6356), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['labels', 'desired'], {}), '(labels, desired)\n', (6339, 6356), True, 'import numpy.testing as np_test\n'), ((6424, 6464), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['bins', 'desired_bins'], {}), '(bins, desired_bins)\n', (6444, 6464), True, 'import numpy.testing as np_test\n'), ((6507, 6528), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (6526, 6528), False, 'import unittest\n'), ((6565, 6601), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (6588, 6601), False, 'import unittest\n')] |
"""
Author: <NAME>.
This code is written for the 3D-Human-Action-Recognition Project, started March 14 2014.
"""
import numpy as np
from SOM import SOM
from SNN import SNN
class somagent_phase_I:
def __init__(self, learning, l_x, l_y, input_size, sigma, softmax_exponent, max_epoch, dyn_as_input):
self.net_1 = SOM(learning=learning,
outputsize_x=l_x,
outputsize_y=l_y,
inputsize=input_size,
sigma=sigma,
softmax_exponent=softmax_exponent,
max_epoch=max_epoch)
self.dyn_as_input = dyn_as_input
def run(self, data, data_d1, data_d2, data_index, learning=None):
self.net_1.learning = learning
all_activity_pattern = []
iteration = 0
epoch = 0
run = True
while run:
epoch += 1
# Random selection
rseq = np.random.permutation(len(data_index))
all_activity_pattern = []
for nseq in range(len(data_index)): # Sequences
if learning is False:
ind_seq = int(data_index[nseq])
else:
ind_seq = int(data_index[rseq[nseq]])
data_seq_d0 = data[ind_seq]
data_seq_d1 = data_d1[ind_seq]
data_seq_d2 = data_d2[ind_seq]
if self.dyn_as_input == 1:
data_seq_d0 = np.concatenate((data_seq_d0, data_seq_d1), axis=1)
elif self.dyn_as_input == 2:
data_seq_d0 = np.concatenate((data_seq_d0, data_seq_d1), axis=1)
data_seq_d0 = np.concatenate((data_seq_d0, data_seq_d2), axis=1)
activity_pattern = np.zeros((np.size(data_seq_d0, 0), 2))
for nfr in range(np.size(data_seq_d0, 0)): # Frames per sequence
iteration += 1
# running first-layer SOM
# print('input dim phase_I:', len(data_seq_d0[nfr, :]))
activity, winner = self.net_1.run_SOM(data_seq_d0[nfr, :])
# print("\nInput:\n", data_seq_d0[nfr, :])
# print("\nWinner:", winner)
# print("\nmin_actinity:", np.min(activity), "\t max_actinity:", np.max(activity))
activity_pattern[nfr, 0] = winner[0]
activity_pattern[nfr, 1] = winner[1]
all_activity_pattern.append(activity_pattern)
if learning:
print("", end='\r')
print("Phase:{} \t Epoch:{} \t Row:{} \t Column:{}".format(2, epoch, np.size(self.net_1.weights, 0),
np.size(self.net_1.weights, 1)), end="", flush=True)
if epoch == self.net_1.max_epoch or learning is False:
run = False
return all_activity_pattern
class somagent_phase_II:
def __init__(self, learning, l_x, l_y, input_size, sigma, softmax_exponent, max_epoch, class_number):
self.net_2 = SOM(learning=learning,
outputsize_x=l_x,
outputsize_y=l_y,
inputsize=input_size,
sigma=sigma,
softmax_exponent=softmax_exponent,
max_epoch=max_epoch)
self.net_3 = SNN(learning=learning,
outputsize_x=class_number,
outputsize_y=1,
inputsize=l_x*l_y)
def run(self, data, data_index, data_class_info, learning=None):
self.net_2.learning = learning
self.net_3.learning = learning
# Performance results
result_per_class = np.zeros((1, self.net_3.outputsize_x))
snn_activity_map = []
epoch = 0
run = True
while run:
epoch += 1
rseq = np.random.permutation(len(data_index))
t_res = 0
for nseq in range(len(data_index)):
if learning is False:
ind_seq = int(data_index[nseq])
else:
ind_seq = int(data_index[rseq[nseq]])
# class label
class_seq = data_class_info[ind_seq]
# print('input dim phase_II:', len(data[ind_seq]))
activity, winner = self.net_2.run_SOM(data[ind_seq])
snn_activity, snn_result = self.net_3.run_SNN(activity.flatten(), int(class_seq[2]))
result_per_class[0, int(class_seq[2])] += snn_result
t_res += snn_result
# get third-layer activation maps for Test sequences
if learning is False:
snn_activity_map.append(snn_activity.T)
if learning:
print("", end='\r')
print("Phase:{} \t Epoch:{} \t Row:{} \t Column:{} \t Result:{}".format(2, epoch,
np.size(self.net_2.weights, 0),
np.size(self.net_2.weights, 1),
100*t_res/len(data_index)), end="", flush=True)
if epoch == self.net_2.max_epoch or learning is False:
run = False
return result_per_class, snn_activity_map
| [
"numpy.size",
"SNN.SNN",
"numpy.zeros",
"SOM.SOM",
"numpy.concatenate"
] | [((341, 500), 'SOM.SOM', 'SOM', ([], {'learning': 'learning', 'outputsize_x': 'l_x', 'outputsize_y': 'l_y', 'inputsize': 'input_size', 'sigma': 'sigma', 'softmax_exponent': 'softmax_exponent', 'max_epoch': 'max_epoch'}), '(learning=learning, outputsize_x=l_x, outputsize_y=l_y, inputsize=\n input_size, sigma=sigma, softmax_exponent=softmax_exponent, max_epoch=\n max_epoch)\n', (344, 500), False, 'from SOM import SOM\n'), ((3157, 3316), 'SOM.SOM', 'SOM', ([], {'learning': 'learning', 'outputsize_x': 'l_x', 'outputsize_y': 'l_y', 'inputsize': 'input_size', 'sigma': 'sigma', 'softmax_exponent': 'softmax_exponent', 'max_epoch': 'max_epoch'}), '(learning=learning, outputsize_x=l_x, outputsize_y=l_y, inputsize=\n input_size, sigma=sigma, softmax_exponent=softmax_exponent, max_epoch=\n max_epoch)\n', (3160, 3316), False, 'from SOM import SOM\n'), ((3479, 3570), 'SNN.SNN', 'SNN', ([], {'learning': 'learning', 'outputsize_x': 'class_number', 'outputsize_y': '(1)', 'inputsize': '(l_x * l_y)'}), '(learning=learning, outputsize_x=class_number, outputsize_y=1, inputsize\n =l_x * l_y)\n', (3482, 3570), False, 'from SNN import SNN\n'), ((3845, 3883), 'numpy.zeros', 'np.zeros', (['(1, self.net_3.outputsize_x)'], {}), '((1, self.net_3.outputsize_x))\n', (3853, 3883), True, 'import numpy as np\n'), ((1507, 1557), 'numpy.concatenate', 'np.concatenate', (['(data_seq_d0, data_seq_d1)'], {'axis': '(1)'}), '((data_seq_d0, data_seq_d1), axis=1)\n', (1521, 1557), True, 'import numpy as np\n'), ((1882, 1905), 'numpy.size', 'np.size', (['data_seq_d0', '(0)'], {}), '(data_seq_d0, 0)\n', (1889, 1905), True, 'import numpy as np\n'), ((1638, 1688), 'numpy.concatenate', 'np.concatenate', (['(data_seq_d0, data_seq_d1)'], {'axis': '(1)'}), '((data_seq_d0, data_seq_d1), axis=1)\n', (1652, 1688), True, 'import numpy as np\n'), ((1723, 1773), 'numpy.concatenate', 'np.concatenate', (['(data_seq_d0, data_seq_d2)'], {'axis': '(1)'}), '((data_seq_d0, data_seq_d2), axis=1)\n', (1737, 1773), True, 'import numpy as np\n'), ((1820, 1843), 'numpy.size', 'np.size', (['data_seq_d0', '(0)'], {}), '(data_seq_d0, 0)\n', (1827, 1843), True, 'import numpy as np\n'), ((2708, 2738), 'numpy.size', 'np.size', (['self.net_1.weights', '(0)'], {}), '(self.net_1.weights, 0)\n', (2715, 2738), True, 'import numpy as np\n'), ((2816, 2846), 'numpy.size', 'np.size', (['self.net_1.weights', '(1)'], {}), '(self.net_1.weights, 1)\n', (2823, 2846), True, 'import numpy as np\n'), ((5138, 5168), 'numpy.size', 'np.size', (['self.net_2.weights', '(0)'], {}), '(self.net_2.weights, 0)\n', (5145, 5168), True, 'import numpy as np\n'), ((5259, 5289), 'numpy.size', 'np.size', (['self.net_2.weights', '(1)'], {}), '(self.net_2.weights, 1)\n', (5266, 5289), True, 'import numpy as np\n')] |
import cv2, os
import numpy as np
from PIL import Image
recognizer1 = cv2.face.createLBPHFaceRecognizer(1,1,7,7)
recognizer2=cv2.face.createEigenFaceRecognizer(15)
path='dataset'
def img_id(path):
# Get all file path
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
# Initialize empty face sample
ImgList=[]
# Initialize empty id
ids = []
# Loop all the file path
for imagePath in imagePaths:
# Get the image and convert it to grayscale
img = Image.open(imagePath).convert('L')
img=img.resize((110,110))
# PIL image to numpy array
img_np = np.array(img,'uint8')
# Get the image id
Id = int(os.path.split(imagePath)[-1].split(".")[1])
ImgList.append(img_np)
ids.append(Id)
cv2.imshow("Data Training ",img_np)
cv2.waitKey(1)
print("Img List=",ImgList)
return np.array(ids),ImgList
ids,ImgList=img_id(path)
print("Data is being Trained....." )
recognizer1.train(ImgList,ids)
recognizer2.train(ImgList,ids)
print('Data Training Complete')
recognizer1.save('trainer/trainedData1.xml')
recognizer2.save('trainer/trainedData2.xml')
print('Xml File saved.')
cv2.destroyAllWindows()
| [
"cv2.waitKey",
"cv2.imshow",
"PIL.Image.open",
"cv2.face.createLBPHFaceRecognizer",
"numpy.array",
"cv2.face.createEigenFaceRecognizer",
"cv2.destroyAllWindows",
"os.path.join",
"os.listdir",
"os.path.split"
] | [((70, 115), 'cv2.face.createLBPHFaceRecognizer', 'cv2.face.createLBPHFaceRecognizer', (['(1)', '(1)', '(7)', '(7)'], {}), '(1, 1, 7, 7)\n', (103, 115), False, 'import cv2, os\n'), ((125, 163), 'cv2.face.createEigenFaceRecognizer', 'cv2.face.createEigenFaceRecognizer', (['(15)'], {}), '(15)\n', (159, 163), False, 'import cv2, os\n'), ((1206, 1229), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1227, 1229), False, 'import cv2, os\n'), ((240, 261), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (252, 261), False, 'import cv2, os\n'), ((639, 661), 'numpy.array', 'np.array', (['img', '"""uint8"""'], {}), "(img, 'uint8')\n", (647, 661), True, 'import numpy as np\n'), ((812, 848), 'cv2.imshow', 'cv2.imshow', (['"""Data Training """', 'img_np'], {}), "('Data Training ', img_np)\n", (822, 848), False, 'import cv2, os\n'), ((856, 870), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (867, 870), False, 'import cv2, os\n'), ((913, 926), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (921, 926), True, 'import numpy as np\n'), ((270, 286), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (280, 286), False, 'import cv2, os\n'), ((518, 539), 'PIL.Image.open', 'Image.open', (['imagePath'], {}), '(imagePath)\n', (528, 539), False, 'from PIL import Image\n'), ((706, 730), 'os.path.split', 'os.path.split', (['imagePath'], {}), '(imagePath)\n', (719, 730), False, 'import cv2, os\n')] |
from collections import Counter
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from vk_text_likeness.logs import log_method_begin, log_method_end
class PredictActionModel:
def __init__(self, action_data):
self.action_data = action_data
self.like_model = RandomForestClassifier(n_jobs=-1)
self.repost_model = RandomForestClassifier(n_jobs=-1)
self.is_fitted = False
def fit(self, post_subset=None):
df = self.action_data.get_all()
if post_subset is not None:
df = df[df['post_id'].isin(post_subset)]
log_method_begin()
x_df = df.drop(['user_id', 'post_id', 'is_member', 'is_liked', 'is_reposted'], axis=1)
self.like_model.fit(x_df, df['is_liked'])
self.repost_model.fit(x_df, df['is_reposted'])
self.is_fitted = True
log_method_end()
def predict(self, post_subset=None):
df = self.action_data.get_all()
if post_subset is not None:
df = df[df['post_id'].isin(post_subset)]
log_method_begin()
x_df = df.drop(['user_id', 'post_id', 'is_member', 'is_liked', 'is_reposted'], axis=1)
pred = [df['user_id'], df['post_id'], df['is_member'], self.like_model.predict(x_df), self.repost_model.predict(x_df)]
result = pd.DataFrame(np.array(pred).T, columns=['user_id', 'post_id', 'is_member', 'is_liked', 'is_reposted'])
log_method_end()
return result
class PredictStatsModel:
def __init__(self, predict_action_model, raw_users_data, action_data):
self.predict_action_model = predict_action_model
self.raw_users_data = raw_users_data
self.action_data = action_data
def predict(self, post_subset=None):
log_method_begin()
direct_likes_count = Counter()
direct_reposts_count = Counter()
non_direct_likes_count = Counter()
non_direct_reposts_count = Counter()
pred_df = self.predict_action_model.predict(post_subset)
for i, row in pred_df.iterrows():
if row['is_liked']:
if row['is_member']:
direct_likes_count[row['post_id']] += 1
else:
non_direct_likes_count[row['post_id']] += 1
if row['is_reposted']:
if row['is_member']:
direct_reposts_count[row['post_id']] += 1
else:
non_direct_reposts_count[row['post_id']] += 1
post_ids = list(direct_likes_count.keys() | direct_reposts_count.keys() | non_direct_likes_count.keys() | non_direct_reposts_count.keys())
rows = []
for post_id in post_ids:
rows.append([direct_likes_count[post_id], direct_reposts_count[post_id], non_direct_likes_count[post_id], non_direct_reposts_count[post_id]])
result = pd.DataFrame(rows, index=post_ids, columns=['direct_likes_count', 'direct_reposts_count', 'non_direct_likes_count', 'non_direct_reposts_count'])
log_method_end()
return result
| [
"sklearn.ensemble.RandomForestClassifier",
"pandas.DataFrame",
"vk_text_likeness.logs.log_method_end",
"vk_text_likeness.logs.log_method_begin",
"numpy.array",
"collections.Counter"
] | [((322, 355), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (344, 355), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((384, 417), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (406, 417), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((624, 642), 'vk_text_likeness.logs.log_method_begin', 'log_method_begin', ([], {}), '()\n', (640, 642), False, 'from vk_text_likeness.logs import log_method_begin, log_method_end\n'), ((881, 897), 'vk_text_likeness.logs.log_method_end', 'log_method_end', ([], {}), '()\n', (895, 897), False, 'from vk_text_likeness.logs import log_method_begin, log_method_end\n'), ((1077, 1095), 'vk_text_likeness.logs.log_method_begin', 'log_method_begin', ([], {}), '()\n', (1093, 1095), False, 'from vk_text_likeness.logs import log_method_begin, log_method_end\n'), ((1446, 1462), 'vk_text_likeness.logs.log_method_end', 'log_method_end', ([], {}), '()\n', (1460, 1462), False, 'from vk_text_likeness.logs import log_method_begin, log_method_end\n'), ((1778, 1796), 'vk_text_likeness.logs.log_method_begin', 'log_method_begin', ([], {}), '()\n', (1794, 1796), False, 'from vk_text_likeness.logs import log_method_begin, log_method_end\n'), ((1827, 1836), 'collections.Counter', 'Counter', ([], {}), '()\n', (1834, 1836), False, 'from collections import Counter\n'), ((1868, 1877), 'collections.Counter', 'Counter', ([], {}), '()\n', (1875, 1877), False, 'from collections import Counter\n'), ((1911, 1920), 'collections.Counter', 'Counter', ([], {}), '()\n', (1918, 1920), False, 'from collections import Counter\n'), ((1956, 1965), 'collections.Counter', 'Counter', ([], {}), '()\n', (1963, 1965), False, 'from collections import Counter\n'), ((2881, 3033), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'index': 'post_ids', 'columns': "['direct_likes_count', 'direct_reposts_count', 'non_direct_likes_count',\n 'non_direct_reposts_count']"}), "(rows, index=post_ids, columns=['direct_likes_count',\n 'direct_reposts_count', 'non_direct_likes_count',\n 'non_direct_reposts_count'])\n", (2893, 3033), True, 'import pandas as pd\n'), ((3034, 3050), 'vk_text_likeness.logs.log_method_end', 'log_method_end', ([], {}), '()\n', (3048, 3050), False, 'from vk_text_likeness.logs import log_method_begin, log_method_end\n'), ((1348, 1362), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (1356, 1362), True, 'import numpy as np\n')] |
import numpy as np
import warnings
from collections import namedtuple
from numdifftools import Gradient, Hessian, Jacobian
from scipy.optimize import minimize
from scipy.linalg import sqrtm
from .sobol import multivariate_normal
from .misc import make_positive
__all__ = ['Laplace']
LaplaceResult = namedtuple("LaplaceResult",
"x_max, f_max, samples, cov, beta, opt_result")
class Laplace:
"""
Evaluating and sampling the Laplace approximation for the target density.
Parameters
----------
optimize_method : str or callable, optional
The ``method`` parameter for ``scipy.optimize.minimize``. Set to
``'Newton-CG'`` by default.
optimize_tol : float, optional
The ``tol`` parameter for ``scipy.optimize.minimize``. Set to ``1e-5``
by default.
optimize_options : dict, optional
The ``options`` parameter for ``scipy.optimize.minimize``. Set to ``{}``
by default.
max_cond : positive float, optional
The maximum conditional number allowed for the Hessian matrix. All the
eigenvalues that are smaller than ``max_eigen_value / max_cond`` will be
truncated at this value. Set to ``1e5`` by default.
n_sample : positive int or None, optional
The number of samples to draw from the approximated Gaussian
distribution. If None, will be determined by
``min(1000, x_0.shape[-1] * 10)`` during runtime. Set to ``None`` by
default.
beta : positive float, optional
Scaling the approximate distribution ``logq``, i.e. the final samples
will come from ``beta * logq``. Set to ``1.`` by default.
mvn_generator : None or callable, optional
Random number generator for the multivairate normal distribution. Should
have signature ``(mean, cov, size) -> samples``. If None, will use
``bayesfast.utils.sobol.multivariate_normal``. Set to ``None`` by
default.
grad_options : dict, optional
Additional keyword arguments for ``numdifftools`` to compute the
gradient. Will be ignored if direct expression for the gradient is
provided in ``run``. Set to ``{}`` by default.
hess_options : dict, optional
Additional keyword arguments for ``numdifftools`` to compute the
Hessian. Will be ignored if direct expression for the Hessian is
provided in ``run``. Set to ``{}`` by default.
"""
def __init__(self, optimize_method='Newton-CG', optimize_tol=1e-5,
optimize_options=None, max_cond=1e5, n_sample=2000, beta=1.,
mvn_generator=None, grad_options=None, hess_options=None):
if callable(optimize_method):
self._optimize_method = optimize_method
else:
try:
self._optimize_method = str(optimize_method)
except Exception:
raise ValueError('invalid value for optimize_method.')
if optimize_tol is None:
pass
else:
try:
optimize_tol = float(optimize_tol)
assert optimize_tol > 0
except Exception:
raise ValueError('invalid value for optimize_tol.')
self._optimize_tol = optimize_tol
try:
if optimize_options is None:
optimize_options = {}
self._optimize_options = dict(optimize_options)
except Exception:
raise ValueError('invalid value for optimize_options.')
try:
max_cond = float(max_cond)
assert max_cond > 0
self._max_cond = max_cond
except Exception:
raise ValueError('max_cond should be a positive float.')
if n_sample is None:
pass
else:
try:
n_sample = int(n_sample)
assert n_sample > 0
except Exception:
raise ValueError('invalid value for n_sample.')
self._n_sample = n_sample
try:
beta = float(beta)
assert beta > 0
self._beta = beta
except Exception:
raise ValueError('beta should be a positive float.')
if mvn_generator is None:
mvn_generator = multivariate_normal
if callable(mvn_generator):
self._mvn_generator = mvn_generator
else:
raise ValueError('invalid value for mvn_generator.')
try:
if grad_options is None:
grad_options = {}
self._grad_options = dict(grad_options)
except Exception:
raise ValueError('invalid value for grad_options.')
try:
if hess_options is None:
hess_options = {}
self._hess_options = dict(hess_options)
except Exception:
raise ValueError('invalid value for hess_options.')
def run(self, logp, x_0, grad=None, hess=None):
"""
Running optimization and Laplace approximate sampling.
Parameters
----------
logp : callable
The logarithmic probability density to sample.
x_0 : 1-d array_like of float
The starting point for optimization.
grad : callable or None, optional
The gradient of the target density. If not callable, will use finite
difference methods in ``numdifftools``. Set to ``None`` by default.
hess : callable or None, optional
The Hessian of the target density. If not callable, will use finite
difference methods in ``numdifftools`` (by computing the Jacobian of
gradient). Set to ``None`` by default.
"""
if not callable(logp):
raise ValueError('logp should be callable.')
try:
x_0 = np.atleast_1d(x_0)
assert x_0.ndim == 1
except Exception:
raise ValueError('invalid value for x_0.')
if self._n_sample is None:
n_sample = min(1000, x_0.shape[-1] * 10)
else:
n_sample = self._n_sample
if not callable(hess):
if callable(grad):
def _hess(*args, **kwargs):
foo = Jacobian(grad, **self._hess_options)(*args, **kwargs)
return (foo + foo.T) / 2
hess = _hess
else:
hess = Hessian(logp, **self._hess_options)
if not callable(grad):
grad = Gradient(logp, **self._grad_options)
opt = minimize(fun=lambda x: -logp(x), x0=x_0,
method=self._optimize_method, jac=lambda x: -grad(x),
hess=lambda x: -hess(x), tol=self._optimize_tol,
options=self._optimize_options)
if not opt.success:
warnings.warn(
'the optimization stopped at {}, but maybe it has not '
'converged yet.'.format(opt.x), RuntimeWarning)
x_max = opt.x
f_max = -opt.fun
cov = np.linalg.inv(make_positive(-hess(x_max), self._max_cond))
samples = self._mvn_generator(x_max, cov / self._beta, n_sample)
return LaplaceResult(x_max, f_max, samples, cov, self._beta, opt)
@staticmethod
def untemper_laplace_samples(laplace_result):
"""
Retrieve untempered (beta=1) Laplace results.
Parameters
----------
laplace_result : LaplaceResult
The results returned by a previous run.
Returns
-------
x : 2-d numpy.ndarray of float
The untempered Laplace samples.
"""
if isinstance(laplace_result, LaplaceResult):
delta = laplace_result.samples - laplace_result.x_max
delta *= laplace_result.beta**0.5
return laplace_result.x_max + delta
else:
raise ValueError('laplace_result should be a LaplaceResult.')
| [
"numdifftools.Gradient",
"collections.namedtuple",
"numpy.atleast_1d",
"numdifftools.Jacobian",
"numdifftools.Hessian"
] | [((302, 377), 'collections.namedtuple', 'namedtuple', (['"""LaplaceResult"""', '"""x_max, f_max, samples, cov, beta, opt_result"""'], {}), "('LaplaceResult', 'x_max, f_max, samples, cov, beta, opt_result')\n", (312, 377), False, 'from collections import namedtuple\n'), ((5845, 5863), 'numpy.atleast_1d', 'np.atleast_1d', (['x_0'], {}), '(x_0)\n', (5858, 5863), True, 'import numpy as np\n'), ((6505, 6541), 'numdifftools.Gradient', 'Gradient', (['logp'], {}), '(logp, **self._grad_options)\n', (6513, 6541), False, 'from numdifftools import Gradient, Hessian, Jacobian\n'), ((6419, 6454), 'numdifftools.Hessian', 'Hessian', (['logp'], {}), '(logp, **self._hess_options)\n', (6426, 6454), False, 'from numdifftools import Gradient, Hessian, Jacobian\n'), ((6250, 6286), 'numdifftools.Jacobian', 'Jacobian', (['grad'], {}), '(grad, **self._hess_options)\n', (6258, 6286), False, 'from numdifftools import Gradient, Hessian, Jacobian\n')] |
from phone_sensor import PhoneSensor
from matplotlib import pyplot as plt
import numpy as np # type: ignore
# Hosts a webserver in a background thread.
# And display a QR code link to the app
with PhoneSensor(qrcode=True) as phone:
# wait for button press to snap a photo
bgr, time = phone.grab(button=True)
# get device orientation as a Quaternion
imu_data = phone.imu()
plt.subplot(1, 2, 1)
# img is bgr (opencv style), matplotlib uses RGB - so flip
rgb = np.flip(bgr, axis=2) # type: ignore
plt.imshow(rgb) # type: ignore
plt.title(f"t = {time}") # type: ignore
plt.subplot(1, 2, 2)
plt.bar(['x', 'y', 'z', 'w'], imu_data.quaternion) # type: ignore
plt.title(f"t = {imu_data.unix_timestamp}") # type: ignore
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.flip",
"matplotlib.pyplot.show",
"phone_sensor.PhoneSensor",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.bar"
] | [((198, 222), 'phone_sensor.PhoneSensor', 'PhoneSensor', ([], {'qrcode': '(True)'}), '(qrcode=True)\n', (209, 222), False, 'from phone_sensor import PhoneSensor\n'), ((394, 414), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (405, 414), True, 'from matplotlib import pyplot as plt\n'), ((488, 508), 'numpy.flip', 'np.flip', (['bgr'], {'axis': '(2)'}), '(bgr, axis=2)\n', (495, 508), True, 'import numpy as np\n'), ((528, 543), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rgb'], {}), '(rgb)\n', (538, 543), True, 'from matplotlib import pyplot as plt\n'), ((564, 588), 'matplotlib.pyplot.title', 'plt.title', (['f"""t = {time}"""'], {}), "(f't = {time}')\n", (573, 588), True, 'from matplotlib import pyplot as plt\n'), ((613, 633), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (624, 633), True, 'from matplotlib import pyplot as plt\n'), ((638, 688), 'matplotlib.pyplot.bar', 'plt.bar', (["['x', 'y', 'z', 'w']", 'imu_data.quaternion'], {}), "(['x', 'y', 'z', 'w'], imu_data.quaternion)\n", (645, 688), True, 'from matplotlib import pyplot as plt\n'), ((709, 752), 'matplotlib.pyplot.title', 'plt.title', (['f"""t = {imu_data.unix_timestamp}"""'], {}), "(f't = {imu_data.unix_timestamp}')\n", (718, 752), True, 'from matplotlib import pyplot as plt\n'), ((772, 782), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (780, 782), True, 'from matplotlib import pyplot as plt\n')] |
import numpy as np
import os
import tempfile
import argparse
parser = argparse.ArgumentParser(description='Run model parallelism on MNIST.')
parser.add_argument('--splits', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--img_size', type=int, default=400)
args = parser.parse_args()
print(args)
if args.splits > 1:
import runai
runai.mp.init(splits=args.splits, method=runai.mp.Method.Cout)
# import runai.profiler
# runai.profiler.profile(20, './')
import keras
from keras import backend as K
from keras import layers
from keras.datasets import mnist
import time
import tensorflow as tf
#from scipy.misc import imresize
if K.backend() != 'tensorflow':
raise RuntimeError('This example can only run with the TensorFlow backend,'
' because it requires the Datset API, which is not'
' supported on other platforms.')
class StepTimeReporter1(keras.callbacks.Callback):
def __init__(self):
self.start = 0
def on_batch_begin(self, batch, logs={}):
self.start = time.time()
def on_batch_end(self, batch, logs={}):
print(' >> Step %d took %g sec' % (batch, time.time() - self.start))
class StepTimeReporter(keras.callbacks.Callback):
def __init__(self):
self.fname='mnist.txt'
if (os.path.isfile(self.fname)):
os.remove(self.fname)
self.start = 0
self.saved=False
def on_batch_begin(self, batch, logs={}):
#if(os.path.isfile(self.fname))
if batch == 20 and not self.saved :
with open(self.fname, 'w') as f:
f.write(str(tf.get_default_graph().as_graph_def()))
self.saved=True
self.start = time.time()
def on_batch_end(self, batch, logs={}):
print(' >> Step %d took %g sec' % (batch, time.time() - self.start))
def cnn_layers(inputs):
x = layers.Conv2D(32, (3, 3),
activation='relu', padding='valid')(inputs)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Conv2D(64, (3, 3), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Flatten()(x)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.5)(x)
predictions = layers.Dense(num_classes,
activation='softmax',
name='x_train_out')(x)
return predictions
batch_size = args.batch_size
buffer_size = 1000
steps_per_epoch = int(np.ceil(60000 / float(batch_size))) # = 469
epochs = 5
num_classes = 10
lr = 2e-3 * batch_size / 128.
img_size = args.img_size
def modify(x,y):
#print (x.shape,x.__class__.__name__)
#print (y.shape)
#return np.resize(x,(200,200,1)),y
#print (np.squeeze(x).shape)
#return imresize(np.squeeze(x), [200,200]),y
return tf.squeeze(tf.image.resize_bilinear(tf.expand_dims(x,0), [img_size,img_size]),0),y
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype(np.float32) / 255
x_train = np.expand_dims(x_train, -1)
y_train = tf.one_hot(y_train, num_classes)
# Create the dataset and its associated one-shot iterator.
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat()
#dataset = dataset.shuffle(buffer_size)
# def tf_random_rotate_image(image, label):
#
# im_shape = image.shape
# [image,] = tf.py_function(random_rotate_image, [image], [tf.float32])
# image.set_shape(im_shape)
# return image, label
# dataset = dataset.map(tf_random_rotate_image)
#dataset = dataset.map(lambda x, y: (imresize(x, (200,200), 'bilinear', 'L'), y))
# dataset = dataset.map(lambda x: 2.*x)
dataset = dataset.map(modify)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
# Model creation using tensors from the get_next() graph node.
inputs, targets = iterator.get_next()
model_input = layers.Input(tensor=inputs)
model_output = cnn_layers(model_input)
train_model = keras.models.Model(inputs=model_input, outputs=model_output)
train_model.compile(optimizer=keras.optimizers.RMSprop(lr=lr, decay=1e-5),
loss='categorical_crossentropy',
metrics=['accuracy'],
target_tensors=[targets])
train_model.summary()
time_reporter = StepTimeReporter()
callbacks = [time_reporter]
train_model.fit(epochs=epochs,
steps_per_epoch=steps_per_epoch,callbacks=callbacks)
# Save the model weights.
weight_path = os.path.join(tempfile.gettempdir(), 'saved_wt.h5')
train_model.save_weights(weight_path)
# Clean up the TF session.
K.clear_session()
# Second session to test loading trained model without tensors.
x_test = x_test.astype(np.float32)
x_test = np.expand_dims(x_test, -1)
x_test_inp = layers.Input(shape=x_test.shape[1:])
test_out = cnn_layers(x_test_inp)
test_model = keras.models.Model(inputs=x_test_inp, outputs=test_out)
test_model.load_weights(weight_path)
test_model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
test_model.summary()
loss, acc = test_model.evaluate(x_test, y_test, num_classes)
print('\nTest accuracy: {0}'.format(acc)) | [
"os.remove",
"argparse.ArgumentParser",
"runai.mp.init",
"keras.models.Model",
"os.path.isfile",
"keras.layers.Input",
"tensorflow.get_default_graph",
"tensorflow.one_hot",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.backend.clear_session",
"keras.layers.Dropout",
"keras.back... | [((74, 144), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run model parallelism on MNIST."""'}), "(description='Run model parallelism on MNIST.')\n", (97, 144), False, 'import argparse\n'), ((3001, 3018), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (3016, 3018), False, 'from keras.datasets import mnist\n'), ((3072, 3099), 'numpy.expand_dims', 'np.expand_dims', (['x_train', '(-1)'], {}), '(x_train, -1)\n', (3086, 3099), True, 'import numpy as np\n'), ((3110, 3142), 'tensorflow.one_hot', 'tf.one_hot', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (3120, 3142), True, 'import tensorflow as tf\n'), ((3213, 3267), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), '((x_train, y_train))\n', (3247, 3267), True, 'import tensorflow as tf\n'), ((3939, 3966), 'keras.layers.Input', 'layers.Input', ([], {'tensor': 'inputs'}), '(tensor=inputs)\n', (3951, 3966), False, 'from keras import layers\n'), ((4020, 4080), 'keras.models.Model', 'keras.models.Model', ([], {'inputs': 'model_input', 'outputs': 'model_output'}), '(inputs=model_input, outputs=model_output)\n', (4038, 4080), False, 'import keras\n'), ((4642, 4659), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (4657, 4659), True, 'from keras import backend as K\n'), ((4769, 4795), 'numpy.expand_dims', 'np.expand_dims', (['x_test', '(-1)'], {}), '(x_test, -1)\n', (4783, 4795), True, 'import numpy as np\n'), ((4810, 4846), 'keras.layers.Input', 'layers.Input', ([], {'shape': 'x_test.shape[1:]'}), '(shape=x_test.shape[1:])\n', (4822, 4846), False, 'from keras import layers\n'), ((4894, 4949), 'keras.models.Model', 'keras.models.Model', ([], {'inputs': 'x_test_inp', 'outputs': 'test_out'}), '(inputs=x_test_inp, outputs=test_out)\n', (4912, 4949), False, 'import keras\n'), ((396, 458), 'runai.mp.init', 'runai.mp.init', ([], {'splits': 'args.splits', 'method': 'runai.mp.Method.Cout'}), '(splits=args.splits, method=runai.mp.Method.Cout)\n', (409, 458), False, 'import runai\n'), ((697, 708), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (706, 708), True, 'from keras import backend as K\n'), ((4538, 4559), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (4557, 4559), False, 'import tempfile\n'), ((1107, 1118), 'time.time', 'time.time', ([], {}), '()\n', (1116, 1118), False, 'import time\n'), ((1359, 1385), 'os.path.isfile', 'os.path.isfile', (['self.fname'], {}), '(self.fname)\n', (1373, 1385), False, 'import os\n'), ((1767, 1778), 'time.time', 'time.time', ([], {}), '()\n', (1776, 1778), False, 'import time\n'), ((1934, 1995), 'keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(32, (3, 3), activation='relu', padding='valid')\n", (1947, 1995), False, 'from keras import layers\n'), ((2034, 2071), 'keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2053, 2071), False, 'from keras import layers\n'), ((2083, 2127), 'keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (2096, 2127), False, 'from keras import layers\n'), ((2139, 2176), 'keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2158, 2176), False, 'from keras import layers\n'), ((2188, 2204), 'keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2202, 2204), False, 'from keras import layers\n'), ((2216, 2252), 'keras.layers.Dense', 'layers.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (2228, 2252), False, 'from keras import layers\n'), ((2264, 2283), 'keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (2278, 2283), False, 'from keras import layers\n'), ((2305, 2372), 'keras.layers.Dense', 'layers.Dense', (['num_classes'], {'activation': '"""softmax"""', 'name': '"""x_train_out"""'}), "(num_classes, activation='softmax', name='x_train_out')\n", (2317, 2372), False, 'from keras import layers\n'), ((4112, 4156), 'keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'lr': 'lr', 'decay': '(1e-05)'}), '(lr=lr, decay=1e-05)\n', (4136, 4156), False, 'import keras\n'), ((1400, 1421), 'os.remove', 'os.remove', (['self.fname'], {}), '(self.fname)\n', (1409, 1421), False, 'import os\n'), ((2911, 2931), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (2925, 2931), True, 'import tensorflow as tf\n'), ((1214, 1225), 'time.time', 'time.time', ([], {}), '()\n', (1223, 1225), False, 'import time\n'), ((1874, 1885), 'time.time', 'time.time', ([], {}), '()\n', (1883, 1885), False, 'import time\n'), ((1674, 1696), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1694, 1696), True, 'import tensorflow as tf\n')] |
from flask import Flask
from flask import request
from flask import jsonify
import numpy as np
import pandas as pd
import scorecardpy as sc
import joblib
import os
import logging
import logging.handlers
import time
# 忽略弹出的warnings
import warnings
warnings.filterwarnings('ignore')
from scorecardpy.woebin import woepoints_ply1
#日志打印设置
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler = logging.handlers.TimedRotatingFileHandler('model.log', when='midnight',encoding='utf-8')#往文件里写入#指定间隔时间自动生成文件的处理器
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()#往屏幕上输出
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = False
app.config['JSON_AS_ASCII'] = False
#通用变量
modelDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+ '/model/'
#异常定义
code10001 = {'code':'10001','errorType':'KeyError','errorMsg':'输入特征错误:'}
code10002 = {'code':'10002','errorType':'ValueError','errorMsg':'输入值错误:'}
code10003 = {'code':'10003','errorType':'UncheckException','errorMsg':'未知错误:'}
code10009 = {'code':'10009','errorType':'IOException','errorMsg':'模型文件不可达:'}
#缓存变量
modelPath,binsPath='',''
bins,model=[],[]
#传参校验是否存在
def keyIsExist(data,key):
for i in data.keys():
if(i==key):
return True
return False
#模型文件路径校验
def modelFilePathCheck(request):
if keyIsExist(request,'modelFilePath'):
modelFilePath = request['modelFilePath']
else:
modelFilePath = modelDir + 'default/lr.pkl'
return modelFilePath
#原始数据进行woe编码
def applyBinMap(X_data, bin_map, var):
"""
将最优分箱的结果WOE值对原始数据进行编码
------------------------------------------------
Params
x: pandas Series
bin_map: pandas dataframe, map table
------------------------------------------------
Return
bin_res: pandas Series, result of bining
"""
x = X_data[var]
bin_map = bin_map[bin_map['var_name'] == var]
bin_res = np.array([0] * x.shape[-1], dtype=float)
for i in bin_map.index:
upper = bin_map['max'][i]
lower = bin_map['min'][i]
if lower == upper:
x1 = x[np.where(x >= lower)[0]]
else:
x1 = x[np.where((x >= lower) & (x < upper))[0]] #会去筛选矩阵里面符合条件的值
mask = np.in1d(x, x1) #用于测试一个数组中的值在另一个数组中的成员资格,返回布尔型数组
bin_res[mask] = bin_map['WOE'][i] #将Ture的数据替换掉
bin_res = pd.Series(bin_res, index=x.index)
bin_res.name = x.name
return bin_res
#判断字符串是否为数字
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def calculateFeatures(dt, card):
dt = dt.copy(deep=True)
# card # if (is.list(card)) rbindlist(card)
if isinstance(card, dict):
card_df = pd.concat(card, ignore_index=True)
elif isinstance(card, pd.DataFrame):
card_df = card.copy(deep=True)
# x variables
xs = card_df.loc[card_df.variable != 'basepoints', 'variable'].unique()
dat = {}
# loop on x variables
for feature in xs:
cardx = card_df.loc[card_df['variable']==feature]
dtx = dt[[feature]]
# score transformation
dtx_points = woepoints_ply1(dtx, cardx, feature, woe_points="points")
dtx_points.columns = [feature]
dat[feature]= str(dtx_points.values[0][0])
return dat
@app.route('/model/execute',methods=['POST'])
def execute_data():
start = time.time()
try:
global modelPath,binsPath,bins,model
logger.info('入参:%s',str(request.get_data()))
request_data = request.get_json() #获取传入数据
paramsJson = request_data['paramsData']
modelFilePath = modelFilePathCheck(request_data)
if (type(paramsJson) == type({})):
#原始数据中数值型字符串转为int64
items = paramsJson.items();
for key,value in items:
if (is_number(value)):
paramsJson[key] = int(value)
logger.info("paramsJson:%s",paramsJson)
#构建dataFrame
df = pd.json_normalize(paramsJson)
if (modelPath != modelFilePath):
logger.info('调用模型路径发生变化,重新加载模型!')
logger.info('global modelFilePath:%s',modelPath)
logger.info('param modelFilePath:%s',modelFilePath)
modelPath = modelFilePath
#导入模型
model = joblib.load(modelFilePath)
else:
logger.info('调用模型路径未发生变化,使用缓存中模型。')
logger.info('global modelFilePath:%s',modelPath)
#原始数据转换为woe值
bins = model.bins
df_woe = sc.woebin_ply(df, bins)
#打标签
label = model.predict(df_woe)[0]
#构建评分卡
card = sc.scorecard(bins, model, df_woe.keys())
#评分
score = sc.scorecard_ply(df, card,only_total_score=False, print_step=0)
#计算每个特征的得分
featureScore = {}
# featureScore = calculateFeatures(df, card)
if isinstance(card, dict):
card_df = pd.concat(card, ignore_index=True)
elif isinstance(card, pd.DataFrame):
card_df = card.copy(deep=True)
# x variables
xs = card_df.loc[card_df.variable != 'basepoints', 'variable'].unique()
for i in xs:
featureScore[i] = score[i + '_points'][0]
result = {}
result['code'] = '00000'
result['score'] = str(score['score'][0])
result['label'] = str(label)
result['featureScore'] = featureScore
end = time.time()
logger.info("运行结果:%s,模型执行耗时:%s",result,end-start)
return jsonify(result)
code10002['errorMsg']='输入值错误:请传入json格式参数'
return jsonify(code10002)
except KeyError as e:
logger.info(e)
code10001['errorMsg']='输入特征错误:' + str(e)
return jsonify(code10001)
except ValueError as e:
logger.info(e)
code10002['errorMsg']='输入值错误:' + str(e)
return jsonify(code10002)
except Exception as e:
logger.info(e)
code10003['errorMsg']='未知错误:' + str(e)
return jsonify(code10003)
@app.route('/model/features',methods=['POST'])
def features_data():
start = time.time()
try:
logger.info(str(request.get_data()))
request_data = request.get_json() #获取传入数据
modelFilePath = modelFilePathCheck(request_data)
if os.path.isfile(modelFilePath):
#调用model文件
model = joblib.load(modelFilePath)
bins = model.bins
result = {}
result['code'] = '00000'
result['features'] = list(bins.keys())
end = time.time()
logging.info('特征查询耗时:%s',end-start)
return jsonify(result)
return jsonify(code10009)
except KeyError as e:
logger.info(e)
code10001['errorMsg']='输入参数错误:' + str(e)
return jsonify(code10001)
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5003) # 指定ip:port
app.run(threaded=True) #开启多线程
print('运行结束')
| [
"unicodedata.numeric",
"logging.Formatter",
"flask.jsonify",
"os.path.isfile",
"flask.request.get_json",
"scorecardpy.scorecard_ply",
"os.path.abspath",
"scorecardpy.woebin.woepoints_ply1",
"logging.handlers.TimedRotatingFileHandler",
"pandas.concat",
"logging.StreamHandler",
"flask.request.ge... | [((248, 281), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (271, 281), False, 'import warnings\n'), ((346, 365), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (363, 365), False, 'import logging\n'), ((378, 424), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(message)s"""'], {}), "('%(asctime)s - %(message)s')\n", (395, 424), False, 'import logging\n'), ((440, 533), 'logging.handlers.TimedRotatingFileHandler', 'logging.handlers.TimedRotatingFileHandler', (['"""model.log"""'], {'when': '"""midnight"""', 'encoding': '"""utf-8"""'}), "('model.log', when='midnight',\n encoding='utf-8')\n", (481, 533), False, 'import logging\n'), ((607, 630), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (628, 630), False, 'import logging\n'), ((780, 795), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (785, 795), False, 'from flask import Flask\n'), ((2089, 2129), 'numpy.array', 'np.array', (['([0] * x.shape[-1])'], {'dtype': 'float'}), '([0] * x.shape[-1], dtype=float)\n', (2097, 2129), True, 'import numpy as np\n'), ((2527, 2560), 'pandas.Series', 'pd.Series', (['bin_res'], {'index': 'x.index'}), '(bin_res, index=x.index)\n', (2536, 2560), True, 'import pandas as pd\n'), ((3684, 3695), 'time.time', 'time.time', ([], {}), '()\n', (3693, 3695), False, 'import time\n'), ((6542, 6553), 'time.time', 'time.time', ([], {}), '()\n', (6551, 6553), False, 'import time\n'), ((2404, 2418), 'numpy.in1d', 'np.in1d', (['x', 'x1'], {}), '(x, x1)\n', (2411, 2418), True, 'import numpy as np\n'), ((2764, 2786), 'unicodedata.numeric', 'unicodedata.numeric', (['s'], {}), '(s)\n', (2783, 2786), False, 'import unicodedata\n'), ((3032, 3066), 'pandas.concat', 'pd.concat', (['card'], {'ignore_index': '(True)'}), '(card, ignore_index=True)\n', (3041, 3066), True, 'import pandas as pd\n'), ((3442, 3498), 'scorecardpy.woebin.woepoints_ply1', 'woepoints_ply1', (['dtx', 'cardx', 'feature'], {'woe_points': '"""points"""'}), "(dtx, cardx, feature, woe_points='points')\n", (3456, 3498), False, 'from scorecardpy.woebin import woepoints_ply1\n'), ((3826, 3844), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (3842, 3844), False, 'from flask import request\n'), ((6043, 6061), 'flask.jsonify', 'jsonify', (['code10002'], {}), '(code10002)\n', (6050, 6061), False, 'from flask import jsonify\n'), ((6631, 6649), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6647, 6649), False, 'from flask import request\n'), ((6726, 6755), 'os.path.isfile', 'os.path.isfile', (['modelFilePath'], {}), '(modelFilePath)\n', (6740, 6755), False, 'import os\n'), ((7098, 7116), 'flask.jsonify', 'jsonify', (['code10009'], {}), '(code10009)\n', (7105, 7116), False, 'from flask import jsonify\n'), ((932, 957), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (947, 957), False, 'import os\n'), ((4293, 4322), 'pandas.json_normalize', 'pd.json_normalize', (['paramsJson'], {}), '(paramsJson)\n', (4310, 4322), True, 'import pandas as pd\n'), ((4879, 4902), 'scorecardpy.woebin_ply', 'sc.woebin_ply', (['df', 'bins'], {}), '(df, bins)\n', (4892, 4902), True, 'import scorecardpy as sc\n'), ((5081, 5145), 'scorecardpy.scorecard_ply', 'sc.scorecard_ply', (['df', 'card'], {'only_total_score': '(False)', 'print_step': '(0)'}), '(df, card, only_total_score=False, print_step=0)\n', (5097, 5145), True, 'import scorecardpy as sc\n'), ((5868, 5879), 'time.time', 'time.time', ([], {}), '()\n', (5877, 5879), False, 'import time\n'), ((5961, 5976), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (5968, 5976), False, 'from flask import jsonify\n'), ((6176, 6194), 'flask.jsonify', 'jsonify', (['code10001'], {}), '(code10001)\n', (6183, 6194), False, 'from flask import jsonify\n'), ((6310, 6328), 'flask.jsonify', 'jsonify', (['code10002'], {}), '(code10002)\n', (6317, 6328), False, 'from flask import jsonify\n'), ((6442, 6460), 'flask.jsonify', 'jsonify', (['code10003'], {}), '(code10003)\n', (6449, 6460), False, 'from flask import jsonify\n'), ((6800, 6826), 'joblib.load', 'joblib.load', (['modelFilePath'], {}), '(modelFilePath)\n', (6811, 6826), False, 'import joblib\n'), ((6987, 6998), 'time.time', 'time.time', ([], {}), '()\n', (6996, 6998), False, 'import time\n'), ((7011, 7049), 'logging.info', 'logging.info', (['"""特征查询耗时:%s"""', '(end - start)'], {}), "('特征查询耗时:%s', end - start)\n", (7023, 7049), False, 'import logging\n'), ((7066, 7081), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (7073, 7081), False, 'from flask import jsonify\n'), ((7231, 7249), 'flask.jsonify', 'jsonify', (['code10001'], {}), '(code10001)\n', (7238, 7249), False, 'from flask import jsonify\n'), ((3788, 3806), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (3804, 3806), False, 'from flask import request\n'), ((4640, 4666), 'joblib.load', 'joblib.load', (['modelFilePath'], {}), '(modelFilePath)\n', (4651, 4666), False, 'import joblib\n'), ((5320, 5354), 'pandas.concat', 'pd.concat', (['card'], {'ignore_index': '(True)'}), '(card, ignore_index=True)\n', (5329, 5354), True, 'import pandas as pd\n'), ((6587, 6605), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (6603, 6605), False, 'from flask import request\n'), ((2273, 2293), 'numpy.where', 'np.where', (['(x >= lower)'], {}), '(x >= lower)\n', (2281, 2293), True, 'import numpy as np\n'), ((2331, 2367), 'numpy.where', 'np.where', (['((x >= lower) & (x < upper))'], {}), '((x >= lower) & (x < upper))\n', (2339, 2367), True, 'import numpy as np\n')] |
import numpy as np
from pathlib import Path
from kenning.core.dataset import Dataset
from kenning.core.measurements import Measurements
class RandomizedClassificationDataset(Dataset):
"""
Creates a sample randomized classification dataset.
It is a mock dataset with randomized inputs and outputs.
It can be used only for speed and utilization metrics, no quality metrics.
"""
def __init__(
self,
root: Path,
batch_size: int = 1,
samplescount: int = 1000,
inputdims: list = (224, 224, 3),
outputdims: list = (1000,)):
"""
Creates randomized dataset.
Parameters
----------
root : Path
Deprecated argument, not used in this dataset
batch_size : int
The size of batches of data delivered during inference
samplescount : int
The number of samples in the dataset
inputdims : list
The dimensionality of the inputs
outputdims : list
The dimensionality of the outputs
"""
self.samplescount = samplescount
self.inputdims = inputdims
self.outputdims = outputdims
super().__init__(root, batch_size)
@classmethod
def form_argparse(cls):
parser, group = super().form_argparse()
group.add_argument(
'--num-samples',
help='Number of samples to process',
type=int,
default=1000
)
group.add_argument(
'--input-dims',
help='Dimensionality of the inputs',
type=int,
nargs='+',
default=[224, 224, 3]
)
group.add_argument(
'--output-dims',
help='Dimensionality of the outputs',
type=int,
nargs='+',
default=[1000]
)
return parser, group
@classmethod
def from_argparse(cls, args):
return cls(
args.dataset_root,
args.inference_batch_size,
args.num_samples,
args.input_dims,
args.output_dims
)
def prepare(self):
self.dataX = [i for i in range(self.samplescount)]
self.dataY = [i for i in range(self.samplescount)]
def download_dataset(self):
pass
def prepare_input_samples(self, samples):
result = []
for sample in samples:
np.random.seed(sample)
result.append(np.random.randint(0, 255, size=self.inputdims))
return result
def prepare_output_samples(self, samples):
result = []
for sample in samples:
np.random.seed(sample)
result.append(np.random.rand(*self.outputdims))
return result
def evaluate(self, predictions, truth):
return Measurements()
def calibration_dataset_generator(
self,
percentage: float = 0.25,
seed: int = 12345):
for _ in range(int(self.samplescount * percentage)):
yield [np.random.randint(0, 255, size=self.inputdims)]
| [
"numpy.random.rand",
"numpy.random.randint",
"numpy.random.seed",
"kenning.core.measurements.Measurements"
] | [((2864, 2878), 'kenning.core.measurements.Measurements', 'Measurements', ([], {}), '()\n', (2876, 2878), False, 'from kenning.core.measurements import Measurements\n'), ((2469, 2491), 'numpy.random.seed', 'np.random.seed', (['sample'], {}), '(sample)\n', (2483, 2491), True, 'import numpy as np\n'), ((2699, 2721), 'numpy.random.seed', 'np.random.seed', (['sample'], {}), '(sample)\n', (2713, 2721), True, 'import numpy as np\n'), ((2518, 2564), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': 'self.inputdims'}), '(0, 255, size=self.inputdims)\n', (2535, 2564), True, 'import numpy as np\n'), ((2748, 2780), 'numpy.random.rand', 'np.random.rand', (['*self.outputdims'], {}), '(*self.outputdims)\n', (2762, 2780), True, 'import numpy as np\n'), ((3087, 3133), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': 'self.inputdims'}), '(0, 255, size=self.inputdims)\n', (3104, 3133), True, 'import numpy as np\n')] |
#import csv
import cv2
import time
import random
import imutils
import numpy as np
from PIL import ImageGrab
import pydirectinput as pdi
from PIL import Image, ImageOps
say = 0
global soltara
soltara = 63
global sagtara
sagtara = 64
def tara():
global sagtara
sagtara = 64
global soltara
soltara = 64
pixel = img[30, sagtara]
minLineLength = 5
maxLineGap = 1
lines = cv2.HoughLinesP(img,1,np.pi/180,100,minLineLength,maxLineGap)
if lines is not None:
list1 = []
list2 = []
for x in range(0, len(lines)):
for x1,y1,x2,y2 in lines[x]:
list1.append((x1,y1))
list2.append((x2,y2))
for i in range(0, int(len(list1) / 2)):
sequence = [i for i in range(0, len(list2))]
q = random.choice(sequence)
mesafe = list1[i][0] - list2[q][0]
if mesafe < 0:
mesafe = mesafe * -1
if mesafe < 20:
cv2.line(img,list1[i],list2[q],(255,255,255),2)
while sagtara < 127:
pixel = img[50, sagtara]
if pixel == 0:
sagtara = sagtara + 1
else:
break
while soltara > 1:
pixel = img[50, soltara]
if pixel == 0:
soltara = soltara - 1
else:
break
while(True):
global img
global gray
# Capture frame-by-frame
img = ImageGrab.grab(bbox=(0,0,1280,720)) #ets tam ekran
img = np.array(img)
height, width, channels = img.shape
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 5)
img = cv2.Canny(img, 100, 200)
img = img[int(height / 3):int((height / 3) * 2), int((width / 8) * 2):int((width / 8) * 6)]
height, width = img.shape
img = img[int((height / 8) * 6):height, 0:width]
img = cv2.resize(img, (128, 128))
kernel = np.ones((5,2),np.uint8)
img = cv2.dilate(img,kernel,iterations = 3)
if say > 100:
tara()
if say == 200:
print('\a')
say = say + 1
cikti = cv2.line(img,(soltara, 50), (sagtara, 50), (255, 255, 255), 1)
orta = int((soltara + sagtara) / 2)
#sure = int((64 - orta) / 100)
#if sure < 0:
# sure = sure * -1
if say > 100:
if soltara < 5 or sagtara > 123:
print("x")
else:
if orta < 60:
pdi.press("a")
elif orta > 68:
pdi.press("d")
# Display the resulting frame
cikti = cv2.resize(cikti, (512, 512))
cv2.imshow('wow',cikti)
#cv2.imshow('wow2',araba)
if sagtara == 128:
sagtara = 64
if soltara == 0:
soltara = 64
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
"cv2.line",
"cv2.Canny",
"PIL.ImageGrab.grab",
"cv2.cvtColor",
"cv2.medianBlur",
"cv2.dilate",
"cv2.waitKey",
"cv2.imshow",
"numpy.ones",
"random.choice",
"numpy.array",
"cv2.HoughLinesP",
"pydirectinput.press",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((2941, 2964), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2962, 2964), False, 'import cv2\n'), ((429, 497), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['img', '(1)', '(np.pi / 180)', '(100)', 'minLineLength', 'maxLineGap'], {}), '(img, 1, np.pi / 180, 100, minLineLength, maxLineGap)\n', (444, 497), False, 'import cv2\n'), ((1508, 1546), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {'bbox': '(0, 0, 1280, 720)'}), '(bbox=(0, 0, 1280, 720))\n', (1522, 1546), False, 'from PIL import ImageGrab\n'), ((1570, 1583), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1578, 1583), True, 'import numpy as np\n'), ((1640, 1677), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1652, 1677), False, 'import cv2\n'), ((1689, 1711), 'cv2.medianBlur', 'cv2.medianBlur', (['img', '(5)'], {}), '(img, 5)\n', (1703, 1711), False, 'import cv2\n'), ((1723, 1747), 'cv2.Canny', 'cv2.Canny', (['img', '(100)', '(200)'], {}), '(img, 100, 200)\n', (1732, 1747), False, 'import cv2\n'), ((1945, 1972), 'cv2.resize', 'cv2.resize', (['img', '(128, 128)'], {}), '(img, (128, 128))\n', (1955, 1972), False, 'import cv2\n'), ((1987, 2012), 'numpy.ones', 'np.ones', (['(5, 2)', 'np.uint8'], {}), '((5, 2), np.uint8)\n', (1994, 2012), True, 'import numpy as np\n'), ((2022, 2059), 'cv2.dilate', 'cv2.dilate', (['img', 'kernel'], {'iterations': '(3)'}), '(img, kernel, iterations=3)\n', (2032, 2059), False, 'import cv2\n'), ((2176, 2239), 'cv2.line', 'cv2.line', (['img', '(soltara, 50)', '(sagtara, 50)', '(255, 255, 255)', '(1)'], {}), '(img, (soltara, 50), (sagtara, 50), (255, 255, 255), 1)\n', (2184, 2239), False, 'import cv2\n'), ((2634, 2663), 'cv2.resize', 'cv2.resize', (['cikti', '(512, 512)'], {}), '(cikti, (512, 512))\n', (2644, 2663), False, 'import cv2\n'), ((2669, 2693), 'cv2.imshow', 'cv2.imshow', (['"""wow"""', 'cikti'], {}), "('wow', cikti)\n", (2679, 2693), False, 'import cv2\n'), ((2828, 2842), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2839, 2842), False, 'import cv2\n'), ((858, 881), 'random.choice', 'random.choice', (['sequence'], {}), '(sequence)\n', (871, 881), False, 'import random\n'), ((2508, 2522), 'pydirectinput.press', 'pdi.press', (['"""a"""'], {}), "('a')\n", (2517, 2522), True, 'import pydirectinput as pdi\n'), ((1062, 1115), 'cv2.line', 'cv2.line', (['img', 'list1[i]', 'list2[q]', '(255, 255, 255)', '(2)'], {}), '(img, list1[i], list2[q], (255, 255, 255), 2)\n', (1070, 1115), False, 'import cv2\n'), ((2569, 2583), 'pydirectinput.press', 'pdi.press', (['"""d"""'], {}), "('d')\n", (2578, 2583), True, 'import pydirectinput as pdi\n')] |
__author__ = 'Chad'
import numpy as np
data = [1, 2, 3, 4, 5]
arr = np.array(data)
arr2d = np.arange(40).reshape(5, 8)
arr3d = np.arange(30).reshape(2, 3, 5)
print(arr2d)
print(arr2d[2])
print(arr2d[2, 3])
print(arr2d[2, 3:])
print(arr2d[0::, 3])
bool_index = np.array([False, False, True, False, True])
print(arr2d[bool_index, 0]) | [
"numpy.array",
"numpy.arange"
] | [((70, 84), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (78, 84), True, 'import numpy as np\n'), ((266, 309), 'numpy.array', 'np.array', (['[False, False, True, False, True]'], {}), '([False, False, True, False, True])\n', (274, 309), True, 'import numpy as np\n'), ((94, 107), 'numpy.arange', 'np.arange', (['(40)'], {}), '(40)\n', (103, 107), True, 'import numpy as np\n'), ((131, 144), 'numpy.arange', 'np.arange', (['(30)'], {}), '(30)\n', (140, 144), True, 'import numpy as np\n')] |
try:
import numpy as np
has_numpy = True
except ImportError:
import math
has_numpy = False
try:
import scipy.constants
has_scipy = True
except ImportError:
has_scipy = False
import operator as op
from .similar import sim, nsim, gsim, lsim
def equation_extend(core):
def product(*args):
if len(args) == 1 and has_numpy:
return np.prod(args[0])
else:
return reduce(op.mul,args,1)
def sumargs(*args):
if len(args) == 1:
return sum(args[0])
else:
return sum(args)
core.addOp('+',"({0:s} + {1:s})","\\left({0:s} + {1:s}\\right)",False,3,op.add)
core.addOp('-',"({0:s} - {1:s})","\\left({0:s} - {1:s}\\right)",False,3,op.sub)
core.addOp('*',"({0:s} * {1:s})","\\left({0:s} \\times {1:s}\\right)",False,2,op.mul)
core.addOp('/',"({0:s} / {1:s})","\\frac{{{0:s}}}{{{1:s}}}",False,2,op.truediv)
core.addOp('%',"({0:s} % {1:s})","\\left({0:s} \\bmod {1:s}\\right)",False,2,op.mod)
core.addOp('^',"({0:s} ^ {1:s})","{0:s}^{{{1:s}}}",False,1,op.pow)
core.addOp('**',"({0:s} ^ {1:s})","{0:s}^{{{1:s}}}",False,1,op.pow)
core.addOp('&',"({0:s} & {1:s})","\\left({0:s} \\land {1:s}\\right)",False,4,op.and_)
core.addOp('|',"({0:s} | {1:s})","\\left({0:s} \\lor {1:s}\\right)",False,4,op.or_)
core.addOp('</>',"({0:s} </> {1:s})","\\left({0:s} \\oplus {1:s}\\right)",False,4,op.xor)
core.addOp('&|',"({0:s} </> {1:s})","\\left({0:s} \\oplus {1:s}\\right)",False,4,op.xor)
core.addOp('|&',"({0:s} </> {1:s})","\\left({0:s} \\oplus {1:s}\\right)",False,4,op.xor)
core.addOp('==',"({0:s} == {1:s})","\\left({0:s} = {1:s}\\right)",False,5,op.eq)
core.addOp('=',"({0:s} == {1:s})","\\left({0:s} = {1:s}\\right)",False,5,op.eq)
core.addOp('~',"({0:s} ~ {1:s})","\\left({0:s} \\approx {1:s}\\right)",False,5,sim)
core.addOp('!~',"({0:s} !~ {1:s})","\\left({0:s} \\not\\approx {1:s}\\right)",False,5,nsim)
core.addOp('!=',"({0:s} != {1:s})","\\left({0:s} \\neg {1:s}\\right)",False,5,op.ne)
core.addOp('<>',"({0:s} != {1:s})","\\left({0:s} \\neg {1:s}\\right)",False,5,op.ne)
core.addOp('><',"({0:s} != {1:s})","\\left({0:s} \\neg {1:s}\\right)",False,5,op.ne)
core.addOp('<',"({0:s} < {1:s})","\\left({0:s} < {1:s}\\right)",False,5,op.lt)
core.addOp('>',"({0:s} > {1:s})","\\left({0:s} > {1:s}\\right)",False,5,op.gt)
core.addOp('<=',"({0:s} <= {1:s})","\\left({0:s} \\leq {1:s}\\right)",False,5,op.le)
core.addOp('>=',"({0:s} >= {1:s})","\\left({0:s} \\geq {1:s}\\right)",False,5,op.ge)
core.addOp('=<',"({0:s} <= {1:s})","\\left({0:s} \\leq {1:s}\\right)",False,5,op.le)
core.addOp('=>',"({0:s} >= {1:s})","\\left({0:s} \\geq {1:s}\\right)",False,5,op.ge)
core.addOp('<~',"({0:s} <~ {1:s})","\\left({0:s} \lessapprox {1:s}\\right)",False,5,lsim)
core.addOp('>~',"({0:s} >~ {1:s})","\\left({0:s} \\gtrapprox {1:s}\\right)",False,5,gsim)
core.addOp('~<',"({0:s} <~ {1:s})","\\left({0:s} \lessapprox {1:s}\\right)",False,5,lsim)
core.addOp('~>',"({0:s} >~ {1:s})","\\left({0:s} \\gtrapprox {1:s}\\right)",False,5,gsim)
core.addUnaryOp('!',"(!{0:s})","\\neg{0:s}",op.not_)
core.addUnaryOp('-',"-{0:s}","-{0:s}",op.neg)
core.addFn('abs',"abs({0:s})","\\left|{0:s}\\right|",1,op.abs)
core.addFn('sum',"sum({0:s})","\\sum\\left({0:s}\\right)",'+',sumargs)
core.addFn('prod',"prod({0:s})","\\prod\\left({0:s}\\right)",'+',product)
if has_numpy:
core.addFn('floor',"floor({0:s})","\\lfloor {0:s} \\rfloor",1,np.floor)
core.addFn('ceil',"ceil({0:s})","\\lceil {0:s} \\rceil",1,np.ceil)
core.addFn('round',"round({0:s})","\\lfloor {0:s} \\rceil",1,np.round)
core.addFn('sin',"sin({0:s})","\\sin\\left({0:s}\\right)",1,np.sin)
core.addFn('cos',"cos({0:s})","\\cos\\left({0:s}\\right)",1,np.cos)
core.addFn('tan',"tan({0:s})","\\tan\\left({0:s}\\right)",1,np.tan)
core.addFn('re',"re({0:s})","\\Re\\left({0:s}\\right)",1,np.real)
core.addFn('im',"re({0:s})","\\Im\\left({0:s}\\right)",1,np.imag)
core.addFn('sqrt',"sqrt({0:s})","\\sqrt{{{0:s}}}",1,np.sqrt)
core.addConst("pi",np.pi)
core.addConst("e",np.e)
core.addConst("Inf",np.Inf)
core.addConst("NaN",np.NaN)
else:
core.addFn('floor',"floor({0:s})","\\lfloor {0:s} \\rfloor",1,math.floor)
core.addFn('ceil',"ceil({0:s})","\\lceil {0:s} \\rceil",1,math.ceil)
core.addFn('round',"round({0:s})","\\lfloor {0:s} \\rceil",1,round)
core.addFn('sin',"sin({0:s})","\\sin\\left({0:s}\\right)",1,math.sin)
core.addFn('cos',"cos({0:s})","\\cos\\left({0:s}\\right)",1,math.cos)
core.addFn('tan',"tan({0:s})","\\tan\\left({0:s}\\right)",1,math.tan)
core.addFn('re',"re({0:s})","\\Re\\left({0:s}\\right)",1,complex.real)
core.addFn('im',"re({0:s})","\\Im\\left({0:s}\\right)",1,complex.imag)
core.addFn('sqrt',"sqrt({0:s})","\\sqrt{{{0:s}}}",1,math.sqrt)
core.addConst("pi",math.pi)
core.addConst("e",math.e)
core.addConst("Inf",float("Inf"))
core.addConst("NaN",float("NaN"))
if has_scipy:
core.addConst("h",scipy.constants.h)
core.addConst("hbar",scipy.constants.hbar)
core.addConst("m_e",scipy.constants.m_e)
core.addConst("m_p",scipy.constants.m_p)
core.addConst("m_n",scipy.constants.m_n)
core.addConst("c",scipy.constants.c)
core.addConst("N_A",scipy.constants.N_A)
core.addConst("mu_0",scipy.constants.mu_0)
core.addConst("eps_0",scipy.constants.epsilon_0)
core.addConst("k",scipy.constants.k)
core.addConst("G",scipy.constants.G)
core.addConst("g",scipy.constants.g)
core.addConst("q",scipy.constants.e)
core.addConst("R",scipy.constants.R)
core.addConst("sigma",scipy.constants.e)
core.addConst("Rb",scipy.constants.Rydberg) | [
"numpy.prod"
] | [((340, 356), 'numpy.prod', 'np.prod', (['args[0]'], {}), '(args[0])\n', (347, 356), True, 'import numpy as np\n')] |
import numpy as np
import pybullet as p
import time
from datetime import datetime
import logging
from lgp.logic.parser import PDDLParser
from lgp.core.planner import HumoroLGP
from lgp.geometry.geometry import get_angle, get_point_on_circle
from lgp.geometry.workspace import Circle
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# temporary importing until complication of install is resolve
import os
import sys
_path_file = os.path.dirname(os.path.realpath(__file__))
VIDEO_DIR = os.path.join(_path_file, '../../data/videos')
sys.path.append(os.path.join(_path_file, "../../../humoro"))
from examples.prediction.hmp_interface import HumanRollout
from humoro.utility import storeimg
class DynamicLGP(object):
'''
General dynamic LGP class
'''
def __init__(self, **kwargs):
domain_file = kwargs.get('domain_file')
problem_file = kwargs.get('problem_file', None)
self.domain = PDDLParser.parse_domain(domain_file)
self.problem = None
if problem_file is not None:
self.problem = PDDLParser.parse_problem(problem_file)
def run(self):
raise NotImplementedError()
class HumoroDynamicLGP(DynamicLGP):
'''
Humoro environment to interfacing with humoro
'''
logger = logging.getLogger(__name__)
def __init__(self, **kwargs):
super(HumoroDynamicLGP, self).__init__(**kwargs)
path_to_mogaze = kwargs.get('path_to_mogaze', 'datasets/mogaze')
self.sim_fps = kwargs.get('sim_fps', 120)
self.prediction = kwargs.get('prediction', False)
self.hr = HumanRollout(path_to_mogaze=path_to_mogaze, fps=self.sim_fps, predicting=self.prediction, load_predictions=True)
self.humoro_lgp = HumoroLGP(self.domain, self.hr, **kwargs)
# useful variables
self.robot_frame = self.humoro_lgp.workspace.robot_frame
self.handling_circle = Circle(np.zeros(2), radius=0.3)
self.reset_experiment()
self.image_dir = os.path.join(VIDEO_DIR, str(datetime.now()))
os.makedirs(self.image_dir, exist_ok=True)
def init_planner(self, **kwargs):
if 'problem' not in kwargs:
kwargs['problem'] = self.problem
kwargs['sim_fps'] = self.sim_fps
kwargs['prediction'] = self.prediction
self.humoro_lgp.init_planner(**kwargs)
self.prev_robot_pos = self.humoro_lgp.workspace.get_robot_geometric_state()
self.q = [0, 0, 0, 1]
self.z_angle = 0.
self.actual_robot_path = []
self.actual_human_path = []
def reset_experiment(self):
# single plan
self.single_symbolic_plan_time = 0
self.single_plans = []
self.single_chosen_plan_id = None
self.single_perceive_human_objects = []
self.single_geometric_plan_time = 0
self.single_plan_costs = []
self.single_num_failed_plan = 0
self.single_actual_path = None
self.single_complete_time = 0
self.single_reduction_ratio = 0.
# dynamic plan
self.dynamic_symbolic_plan_time = {}
self.dynamic_plans = {}
self.dynamic_chosen_plan_id = {}
self.dynamic_perceive_human_objects = {}
self.dynamic_geometric_plan_time = {}
self.dynamic_plan_costs = {}
self.dynamic_num_failed_plans = {}
self.dynamic_num_change_plan = 0
self.dynamic_actual_path = None
self.dynamic_complete_time = 0
self.dynamic_reduction_ratio = 0.
def get_experiment_data(self):
data = {
'single_symbolic_plan_time': self.single_symbolic_plan_time,
'single_plans': self.single_plans,
'single_chosen_plan_id': self.single_chosen_plan_id,
'single_perceive_human_objects': self.single_perceive_human_objects,
'single_geometric_plan_time': self.single_geometric_plan_time,
'single_plan_costs': self.single_plan_costs,
'single_num_failed_plan': self.single_num_failed_plan,
'single_actual_path': self.single_actual_path,
'single_complete_time': self.single_complete_time,
'single_reduction_ratio': self.single_reduction_ratio,
'dynamic_symbolic_plan_time': self.dynamic_symbolic_plan_time,
'dynamic_plans': self.dynamic_plans,
'dynamic_chosen_plan_id': self.dynamic_chosen_plan_id,
'dynamic_perceive_human_objects': self.dynamic_perceive_human_objects,
'dynamic_geometric_plan_time': self.dynamic_geometric_plan_time,
'dynamic_plan_costs': self.dynamic_plan_costs,
'dynamic_num_failed_plans': self.dynamic_num_failed_plans,
'dynamic_num_change_plan': self.dynamic_num_change_plan,
'dynamic_actual_path': self.dynamic_actual_path,
'dynamic_complete_time': self.dynamic_complete_time,
'dynamic_reduction_ratio': self.dynamic_reduction_ratio,
'human_path': self.actual_human_path
}
return data
def check_goal_reached(self):
return self.humoro_lgp.logic_planner.current_state in self.humoro_lgp.logic_planner.goal_states
def update_visualization(self):
'''
This update currently has no playback (backward in time)
'''
# update robot
robot = self.humoro_lgp.workspace.get_robot_link_obj()
current_robot_pos = self.humoro_lgp.workspace.get_robot_geometric_state()
grad = current_robot_pos - self.prev_robot_pos
if np.linalg.norm(grad) > 0: # prevent numerical error
z_angle = get_angle(grad, np.array([1, 0])) # angle of current path gradient with y axis
self.z_angle = z_angle if grad[1] > 0 else -z_angle
self.q = p.getQuaternionFromEuler([0, 0, self.z_angle]) # + pi/2 due to default orientation of pepper is x-axis
self.prev_robot_pos = current_robot_pos
p.resetBasePositionAndOrientation(self.humoro_lgp.player._robots[self.robot_frame], [*current_robot_pos, 0], self.q)
# update object
if self.humoro_lgp.plan is not None:
current_action = self.humoro_lgp.get_current_action()
if current_action is not None and current_action.name == 'place':
obj, location = current_action.parameters
box = self.humoro_lgp.workspace.kin_tree.nodes[location]['link_obj']
x = np.random.uniform(box.origin[0] - box.dim[0] / 2, box.origin[0] + box.dim[0] / 2) # TODO: should be desired place_pos on location, or add an animation of placing here
y = np.random.uniform(box.origin[1] - box.dim[1] / 2, box.origin[1] + box.dim[1] / 2)
p.resetBasePositionAndOrientation(self.humoro_lgp.player._objects[obj], [x, y, 0.735], [0, 0, 0, 1]) # currently ignore object orientation
elif robot.couplings:
for obj in robot.couplings:
self.handling_circle.origin = current_robot_pos
handling_pos = get_point_on_circle(self.z_angle, self.handling_circle)
p.resetBasePositionAndOrientation(self.humoro_lgp.player._objects[obj], [*handling_pos, 1], [0, 0, 0, 1]) # TODO: for now attach object at robot origin
def run(self, replan=False, sleep=False, save_frame=False):
if not replan:
self.humoro_lgp.update_current_symbolic_state()
start_symbolic_plan = time.time()
success = self.humoro_lgp.symbolic_plan()
start_geometric_plan = time.time()
self.single_symbolic_plan_time = start_geometric_plan - start_symbolic_plan
success = self.humoro_lgp.geometric_plan()
self.single_geometric_plan_time = time.time() - start_geometric_plan
self.single_plans = self.humoro_lgp.get_list_plan_as_string()
self.single_chosen_plan_id = self.humoro_lgp.chosen_plan_id
self.single_perceive_human_objects = self.humoro_lgp.perceive_human_objects
self.single_plan_costs = self.humoro_lgp.ranking
for r in self.humoro_lgp.ranking:
if r[1] == self.humoro_lgp.chosen_plan_id:
break
self.single_num_failed_plan += 1
if not success:
HumoroDynamicLGP.logger.info('Task failed!')
return False
max_t = self.humoro_lgp.timeout * self.humoro_lgp.ratio
while self.humoro_lgp.lgp_t < max_t:
if replan and (self.humoro_lgp.lgp_t % (self.humoro_lgp.trigger_period * self.humoro_lgp.ratio) == 0):
self.humoro_lgp.update_current_symbolic_state()
if self.humoro_lgp.plan is None:
self.dynamic_num_change_plan += 1
start_symbolic_plan = time.time()
success = self.humoro_lgp.symbolic_plan()
self.dynamic_symbolic_plan_time[self.humoro_lgp.lgp_t] = time.time() - start_symbolic_plan
self.dynamic_perceive_human_objects[self.humoro_lgp.lgp_t] = self.humoro_lgp.perceive_human_objects
start_geometric_plan = time.time()
success = self.humoro_lgp.geometric_replan()
self.dynamic_geometric_plan_time[self.humoro_lgp.lgp_t] = time.time() - start_geometric_plan
if self.humoro_lgp.lgp_t in self.dynamic_symbolic_plan_time:
self.dynamic_chosen_plan_id[self.humoro_lgp.lgp_t] = self.humoro_lgp.chosen_plan_id
self.dynamic_plans[self.humoro_lgp.lgp_t] = self.humoro_lgp.get_list_plan_as_string()
self.dynamic_plan_costs[self.humoro_lgp.lgp_t] = self.humoro_lgp.ranking
if success:
n = 0
for r in self.humoro_lgp.ranking:
if r[1] == self.humoro_lgp.chosen_plan_id:
break
n += 1
self.dynamic_num_failed_plans[self.humoro_lgp.lgp_t] = n
else:
self.dynamic_num_failed_plans[self.humoro_lgp.lgp_t] = len(self.humoro_lgp.ranking)
if self.humoro_lgp.lgp_t % self.humoro_lgp.ratio == 0:
# executing current action in the plan
if replan:
if success:
self.humoro_lgp.act(sanity_check=False)
else:
self.humoro_lgp.act(sanity_check=False)
# reflecting changes in PyBullet
self.update_visualization()
# recording paths
self.actual_robot_path.append(self.humoro_lgp.workspace.get_robot_geometric_state())
self.actual_human_path.append(self.humoro_lgp.workspace.get_human_geometric_state())
self.humoro_lgp.update_workspace()
self.humoro_lgp.visualize()
if save_frame:
storeimg(p, os.path.join(self.image_dir, str(self.humoro_lgp.lgp_t) + '.png'))
self.humoro_lgp.increase_timestep()
if self.humoro_lgp.lgp_t > self.humoro_lgp.workspace.duration and self.humoro_lgp.symbolic_elapsed_t > self.humoro_lgp.get_current_plan_time():
break
if sleep:
time.sleep(1 / self.humoro_lgp.sim_fps)
self.humoro_lgp.update_workspace()
self.humoro_lgp.update_current_symbolic_state()
if not replan:
self.single_actual_path = self.actual_robot_path
self.single_complete_time = self.humoro_lgp.lgp_t / self.humoro_lgp.sim_fps
self.single_reduction_ratio = self.humoro_lgp.lgp_t / self.hr.get_segment_timesteps(self.humoro_lgp.workspace.segment, predicting=False)
else:
self.dynamic_actual_path = self.actual_robot_path
self.dynamic_complete_time = self.humoro_lgp.lgp_t / self.humoro_lgp.sim_fps
self.dynamic_reduction_ratio = self.humoro_lgp.lgp_t / self.hr.get_segment_timesteps(self.humoro_lgp.workspace.segment, predicting=False)
if self.check_goal_reached():
HumoroDynamicLGP.logger.info('Task complete successfully!')
return True
else:
HumoroDynamicLGP.logger.info('Task failed!')
return False
| [
"pybullet.getQuaternionFromEuler",
"numpy.random.uniform",
"os.makedirs",
"lgp.geometry.geometry.get_point_on_circle",
"os.path.realpath",
"lgp.core.planner.HumoroLGP",
"numpy.zeros",
"datetime.datetime.now",
"time.time",
"time.sleep",
"lgp.logic.parser.PDDLParser.parse_problem",
"examples.pre... | [((536, 581), 'os.path.join', 'os.path.join', (['_path_file', '"""../../data/videos"""'], {}), "(_path_file, '../../data/videos')\n", (548, 581), False, 'import os\n'), ((496, 522), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (512, 522), False, 'import os\n'), ((598, 641), 'os.path.join', 'os.path.join', (['_path_file', '"""../../../humoro"""'], {}), "(_path_file, '../../../humoro')\n", (610, 641), False, 'import os\n'), ((1317, 1344), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1334, 1344), False, 'import logging\n'), ((972, 1008), 'lgp.logic.parser.PDDLParser.parse_domain', 'PDDLParser.parse_domain', (['domain_file'], {}), '(domain_file)\n', (995, 1008), False, 'from lgp.logic.parser import PDDLParser\n'), ((1636, 1753), 'examples.prediction.hmp_interface.HumanRollout', 'HumanRollout', ([], {'path_to_mogaze': 'path_to_mogaze', 'fps': 'self.sim_fps', 'predicting': 'self.prediction', 'load_predictions': '(True)'}), '(path_to_mogaze=path_to_mogaze, fps=self.sim_fps, predicting=\n self.prediction, load_predictions=True)\n', (1648, 1753), False, 'from examples.prediction.hmp_interface import HumanRollout\n'), ((1775, 1816), 'lgp.core.planner.HumoroLGP', 'HumoroLGP', (['self.domain', 'self.hr'], {}), '(self.domain, self.hr, **kwargs)\n', (1784, 1816), False, 'from lgp.core.planner import HumoroLGP\n'), ((2082, 2124), 'os.makedirs', 'os.makedirs', (['self.image_dir'], {'exist_ok': '(True)'}), '(self.image_dir, exist_ok=True)\n', (2093, 2124), False, 'import os\n'), ((5965, 6086), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['self.humoro_lgp.player._robots[self.robot_frame]', '[*current_robot_pos, 0]', 'self.q'], {}), '(self.humoro_lgp.player._robots[self.\n robot_frame], [*current_robot_pos, 0], self.q)\n', (5998, 6086), True, 'import pybullet as p\n'), ((1101, 1139), 'lgp.logic.parser.PDDLParser.parse_problem', 'PDDLParser.parse_problem', (['problem_file'], {}), '(problem_file)\n', (1125, 1139), False, 'from lgp.logic.parser import PDDLParser\n'), ((1947, 1958), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1955, 1958), True, 'import numpy as np\n'), ((5565, 5585), 'numpy.linalg.norm', 'np.linalg.norm', (['grad'], {}), '(grad)\n', (5579, 5585), True, 'import numpy as np\n'), ((5805, 5851), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, self.z_angle]'], {}), '([0, 0, self.z_angle])\n', (5829, 5851), True, 'import pybullet as p\n'), ((7476, 7487), 'time.time', 'time.time', ([], {}), '()\n', (7485, 7487), False, 'import time\n'), ((7577, 7588), 'time.time', 'time.time', ([], {}), '()\n', (7586, 7588), False, 'import time\n'), ((2057, 2071), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2069, 2071), False, 'from datetime import datetime\n'), ((5656, 5672), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (5664, 5672), True, 'import numpy as np\n'), ((6458, 6544), 'numpy.random.uniform', 'np.random.uniform', (['(box.origin[0] - box.dim[0] / 2)', '(box.origin[0] + box.dim[0] / 2)'], {}), '(box.origin[0] - box.dim[0] / 2, box.origin[0] + box.dim[0\n ] / 2)\n', (6475, 6544), True, 'import numpy as np\n'), ((6646, 6732), 'numpy.random.uniform', 'np.random.uniform', (['(box.origin[1] - box.dim[1] / 2)', '(box.origin[1] + box.dim[1] / 2)'], {}), '(box.origin[1] - box.dim[1] / 2, box.origin[1] + box.dim[1\n ] / 2)\n', (6663, 6732), True, 'import numpy as np\n'), ((6744, 6848), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['self.humoro_lgp.player._objects[obj]', '[x, y, 0.735]', '[0, 0, 0, 1]'], {}), '(self.humoro_lgp.player._objects[obj], [x,\n y, 0.735], [0, 0, 0, 1])\n', (6777, 6848), True, 'import pybullet as p\n'), ((7778, 7789), 'time.time', 'time.time', ([], {}), '()\n', (7787, 7789), False, 'import time\n'), ((9183, 9194), 'time.time', 'time.time', ([], {}), '()\n', (9192, 9194), False, 'import time\n'), ((11353, 11392), 'time.sleep', 'time.sleep', (['(1 / self.humoro_lgp.sim_fps)'], {}), '(1 / self.humoro_lgp.sim_fps)\n', (11363, 11392), False, 'import time\n'), ((8839, 8850), 'time.time', 'time.time', ([], {}), '()\n', (8848, 8850), False, 'import time\n'), ((9330, 9341), 'time.time', 'time.time', ([], {}), '()\n', (9339, 9341), False, 'import time\n'), ((7065, 7120), 'lgp.geometry.geometry.get_point_on_circle', 'get_point_on_circle', (['self.z_angle', 'self.handling_circle'], {}), '(self.z_angle, self.handling_circle)\n', (7084, 7120), False, 'from lgp.geometry.geometry import get_angle, get_point_on_circle\n'), ((7141, 7251), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['self.humoro_lgp.player._objects[obj]', '[*handling_pos, 1]', '[0, 0, 0, 1]'], {}), '(self.humoro_lgp.player._objects[obj], [*\n handling_pos, 1], [0, 0, 0, 1])\n', (7174, 7251), True, 'import pybullet as p\n'), ((8990, 9001), 'time.time', 'time.time', ([], {}), '()\n', (8999, 9001), False, 'import time\n')] |
# Copyright (c) 2010, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from geometry_msgs.msg import Pose, Point, Quaternion
from tf import transformations
import tf
import rospy
import numpy
from PyKDL import *
def fromTf(tf):
"""
:param tf: :class:`tf.Transformer` transform
:type tf: tuple (translation, quaternion)
:return: New :class:`PyKDL.Frame` object
Convert a pose returned by :meth:`tf.Transformer.lookupTransform` to a :class:`PyKDL.Frame`.
.. doctest::
>>> import rospy
>>> import tf
>>> import geometry_msgs.msg
>>> t = tf.Transformer(True, rospy.Duration(10.0))
>>> m = geometry_msgs.msg.TransformStamped()
>>> m.header.frame_id = 'THISFRAME'
>>> m.child_frame_id = 'CHILD'
>>> m.transform.translation.x = 668.5
>>> m.transform.rotation.w = 1.0
>>> t.setTransform(m)
>>> t.lookupTransform('THISFRAME', 'CHILD', rospy.Time(0))
((668.5, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))
>>> import tf_conversions.posemath as pm
>>> p = pm.fromTf(t.lookupTransform('THISFRAME', 'CHILD', rospy.Time(0)))
>>> print pm.toMsg(p * p)
position:
x: 1337.0
y: 0.0
z: 0.0
orientation:
x: 0.0
y: 0.0
z: 0.0
w: 1.0
"""
position, quaternion = tf
x, y, z = position
Qx, Qy, Qz, Qw = quaternion
return Frame(Rotation.Quaternion(Qx, Qy, Qz, Qw),
Vector(x, y, z))
def toTf(f):
"""
:param f: input pose
:type f: :class:`PyKDL.Frame`
Return a tuple (position, quaternion) for the pose.
"""
return ((f.p[0], f.p[1], f.p[2]), f.M.GetQuaternion())
# to and from pose message
def fromMsg(p):
"""
:param p: input pose
:type p: :class:`geometry_msgs.msg.Pose`
:return: New :class:`PyKDL.Frame` object
Convert a pose represented as a ROS Pose message to a :class:`PyKDL.Frame`.
"""
return Frame(Rotation.Quaternion(p.orientation.x,
p.orientation.y,
p.orientation.z,
p.orientation.w),
Vector(p.position.x, p.position.y, p.position.z))
def toMsg(f):
"""
:param f: input pose
:type f: :class:`PyKDL.Frame`
Return a ROS Pose message for the Frame f.
"""
p = Pose()
p.orientation.x, p.orientation.y, p.orientation.z, p.orientation.w = f.M.GetQuaternion()
p.position.x = f.p[0]
p.position.y = f.p[1]
p.position.z = f.p[2]
return p
# to and from matrix
def fromMatrix(m):
"""
:param m: input 4x4 matrix
:type m: :func:`numpy.array`
:return: New :class:`PyKDL.Frame` object
Convert a pose represented as a 4x4 numpy array to a :class:`PyKDL.Frame`.
"""
return Frame(Rotation(m[0,0], m[0,1], m[0,2],
m[1,0], m[1,1], m[1,2],
m[2,0], m[2,1], m[2,2]),
Vector(m[0,3], m[1, 3], m[2, 3]))
def toMatrix(f):
"""
:param f: input pose
:type f: :class:`PyKDL.Frame`
Return a numpy 4x4 array for the Frame F.
"""
return numpy.array([[f.M[0,0], f.M[0,1], f.M[0,2], f.p[0]],
[f.M[1,0], f.M[1,1], f.M[1,2], f.p[1]],
[f.M[2,0], f.M[2,1], f.M[2,2], f.p[2]],
[0,0,0,1]])
# from camera parameters
def fromCameraParams(cv, rvec, tvec):
"""
:param cv: OpenCV module
:param rvec: A Rodrigues rotation vector - see :func:`Rodrigues2`
:type rvec: 3x1 :class:`CvMat`
:param tvec: A translation vector
:type tvec: 3x1 :class:`CvMat`
:return: New :class:`PyKDL.Frame` object
For use with :func:`FindExtrinsicCameraParams2`::
import cv
import tf_conversions.posemath as pm
...
rvec = cv.CreateMat(3, 1, cv.CV_32FC1)
tvec = cv.CreateMat(3, 1, cv.CV_32FC1)
cv.FindExtrinsicCameraParams2(model, corners, intrinsic_matrix, kc, rvec, tvec)
pose = pm.fromCameraParams(cv, rvec, tvec)
"""
m = numpy.array([ [ 0, 0, 0, tvec[0,0] ],
[ 0, 0, 0, tvec[1,0] ],
[ 0, 0, 0, tvec[2,0] ],
[ 0, 0, 0, 1.0 ] ], dtype = numpy.float32)
cv.Rodrigues2(rvec, m[:3,:3])
return fromMatrix(m)
| [
"geometry_msgs.msg.Pose",
"numpy.array"
] | [((3912, 3918), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (3916, 3918), False, 'from geometry_msgs.msg import Pose, Point, Quaternion\n'), ((4710, 4876), 'numpy.array', 'numpy.array', (['[[f.M[0, 0], f.M[0, 1], f.M[0, 2], f.p[0]], [f.M[1, 0], f.M[1, 1], f.M[1, 2\n ], f.p[1]], [f.M[2, 0], f.M[2, 1], f.M[2, 2], f.p[2]], [0, 0, 0, 1]]'], {}), '([[f.M[0, 0], f.M[0, 1], f.M[0, 2], f.p[0]], [f.M[1, 0], f.M[1, \n 1], f.M[1, 2], f.p[1]], [f.M[2, 0], f.M[2, 1], f.M[2, 2], f.p[2]], [0, \n 0, 0, 1]])\n', (4721, 4876), False, 'import numpy\n'), ((5638, 5761), 'numpy.array', 'numpy.array', (['[[0, 0, 0, tvec[0, 0]], [0, 0, 0, tvec[1, 0]], [0, 0, 0, tvec[2, 0]], [0, 0,\n 0, 1.0]]'], {'dtype': 'numpy.float32'}), '([[0, 0, 0, tvec[0, 0]], [0, 0, 0, tvec[1, 0]], [0, 0, 0, tvec[2,\n 0]], [0, 0, 0, 1.0]], dtype=numpy.float32)\n', (5649, 5761), False, 'import numpy\n')] |
import os
import cv2 as cv
import numpy as np
class SuspiciousImage:
def __init__(self,
path=None, hist_eq=True,
algorithm='orb', nfeatures=5000,
dsize=256, gap=32, h=200, w=400):
self.path = path
self.hist_eq = hist_eq
self.algorithm = algorithm
self.nfeatures = nfeatures
self.dsize = dsize
self.gap = gap
self.h = h
self.w = w
self.mat = None
self.gray = None
if path is not None:
self.name = os.path.basename(path)
self.size = os.path.getsize(path)
self.read()
def read(self):
# Read image
if self.path.split('.')[-1] == 'gif':
gif = cv.VideoCapture(self.path)
_, self.mat = gif.read()
else:
self.mat = cv.imread(self.path)
if self.mat is None:
print("ERROR: No such image file.")
return self
# Convert to Gray image
if len(self.mat.shape) is 3:
self.gray = cv.cvtColor(self.mat, cv.COLOR_BGR2GRAY)
elif len(self.mat.shape) is 2:
self.gray = self.mat
self.keypoint()
self.laplacian()
# 保存用のサイズ
height, width = self.gray.shape
scale = self.h / height
if width * scale > self.w:
scale = self.w / width
self.h = int(height * scale)
self.w = int(width * scale)
self.noise = 0
self.no_img = cv.resize(
255 -
np.where(
self.lap > 4,
4,
self.lap) /
4 *
255,
dsize=(
self.w,
self.h))
self.dist = 0
self.clipping = 0
# self.cl_img = self.mat
self.area_ratio = 0
self.copymove = -1
# self.cm_img = self.mat
self.mask_ratio = 0
self.cutpaste = -1
self.prob = 0
return self
def make_flip(self, imgarr, name):
self.name = name
self.mat = cv.flip(imgarr, 1)
self.gray = cv.cvtColor(self.mat, cv.COLOR_BGR2GRAY)
self.laplacian()
self.keypoint()
return self
def laplacian(self):
self.lap = abs(cv.filter2D(
self.gray, -1,
np.array([[1, 1, 1],
[1, -8, 1],
[1, 1, 1]], np.float32), delta=100).astype('int') - 100)
return self
def keypoint(self):
if self.mat is None:
self.mat = cv.cvtColor(self.gray, cv.COLOR_GRAY2BGR)
elif self.gray is None:
self.gray = cv.cvtColor(self.mat, cv.COLOR_BGR2GRAY)
keyimg = self.gray
if self.hist_eq:
keyimg = cv.equalizeHist(keyimg)
H, W = keyimg.shape
gap = self.gap
imgzeros = np.full((H + gap * 2, W + gap * 2), 255)
for i in range(H):
for j in range(W):
imgzeros[i + gap, j + gap] = keyimg[i, j]
self.keyimg = imgzeros.astype('uint8')
if self.algorithm == 'orb':
self.detector = cv.ORB_create(nfeatures=self.nfeatures)
self.bf = cv.BFMatcher(cv.NORM_HAMMING)
elif self.algorithm == 'akaze':
self.detector = cv.AKAZE_create()
self.bf = cv.BFMatcher(cv.NORM_HAMMING)
elif self.algorithm == 'sift':
self.detector = cv.xfeatures2d.SIFT_create()
self.bf = cv.BFMatcher(cv.NORM_L2)
elif self.algorithm == 'surf':
self.detector = cv.xfeatures2d.SURF_create()
self.bf = cv.BFMatcher(cv.NORM_L2)
self.kp, self.des = self.detector.detectAndCompute(self.keyimg, None)
return self
| [
"numpy.full",
"cv2.equalizeHist",
"os.path.basename",
"cv2.cvtColor",
"os.path.getsize",
"cv2.BFMatcher",
"cv2.AKAZE_create",
"cv2.VideoCapture",
"cv2.imread",
"cv2.xfeatures2d.SURF_create",
"numpy.where",
"cv2.ORB_create",
"numpy.array",
"cv2.xfeatures2d.SIFT_create",
"cv2.flip"
] | [((2099, 2117), 'cv2.flip', 'cv.flip', (['imgarr', '(1)'], {}), '(imgarr, 1)\n', (2106, 2117), True, 'import cv2 as cv\n'), ((2138, 2178), 'cv2.cvtColor', 'cv.cvtColor', (['self.mat', 'cv.COLOR_BGR2GRAY'], {}), '(self.mat, cv.COLOR_BGR2GRAY)\n', (2149, 2178), True, 'import cv2 as cv\n'), ((2889, 2929), 'numpy.full', 'np.full', (['(H + gap * 2, W + gap * 2)', '(255)'], {}), '((H + gap * 2, W + gap * 2), 255)\n', (2896, 2929), True, 'import numpy as np\n'), ((556, 578), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (572, 578), False, 'import os\n'), ((603, 624), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (618, 624), False, 'import os\n'), ((755, 781), 'cv2.VideoCapture', 'cv.VideoCapture', (['self.path'], {}), '(self.path)\n', (770, 781), True, 'import cv2 as cv\n'), ((856, 876), 'cv2.imread', 'cv.imread', (['self.path'], {}), '(self.path)\n', (865, 876), True, 'import cv2 as cv\n'), ((1072, 1112), 'cv2.cvtColor', 'cv.cvtColor', (['self.mat', 'cv.COLOR_BGR2GRAY'], {}), '(self.mat, cv.COLOR_BGR2GRAY)\n', (1083, 1112), True, 'import cv2 as cv\n'), ((2580, 2621), 'cv2.cvtColor', 'cv.cvtColor', (['self.gray', 'cv.COLOR_GRAY2BGR'], {}), '(self.gray, cv.COLOR_GRAY2BGR)\n', (2591, 2621), True, 'import cv2 as cv\n'), ((2794, 2817), 'cv2.equalizeHist', 'cv.equalizeHist', (['keyimg'], {}), '(keyimg)\n', (2809, 2817), True, 'import cv2 as cv\n'), ((3158, 3197), 'cv2.ORB_create', 'cv.ORB_create', ([], {'nfeatures': 'self.nfeatures'}), '(nfeatures=self.nfeatures)\n', (3171, 3197), True, 'import cv2 as cv\n'), ((3220, 3249), 'cv2.BFMatcher', 'cv.BFMatcher', (['cv.NORM_HAMMING'], {}), '(cv.NORM_HAMMING)\n', (3232, 3249), True, 'import cv2 as cv\n'), ((2678, 2718), 'cv2.cvtColor', 'cv.cvtColor', (['self.mat', 'cv.COLOR_BGR2GRAY'], {}), '(self.mat, cv.COLOR_BGR2GRAY)\n', (2689, 2718), True, 'import cv2 as cv\n'), ((3318, 3335), 'cv2.AKAZE_create', 'cv.AKAZE_create', ([], {}), '()\n', (3333, 3335), True, 'import cv2 as cv\n'), ((3358, 3387), 'cv2.BFMatcher', 'cv.BFMatcher', (['cv.NORM_HAMMING'], {}), '(cv.NORM_HAMMING)\n', (3370, 3387), True, 'import cv2 as cv\n'), ((3455, 3483), 'cv2.xfeatures2d.SIFT_create', 'cv.xfeatures2d.SIFT_create', ([], {}), '()\n', (3481, 3483), True, 'import cv2 as cv\n'), ((3506, 3530), 'cv2.BFMatcher', 'cv.BFMatcher', (['cv.NORM_L2'], {}), '(cv.NORM_L2)\n', (3518, 3530), True, 'import cv2 as cv\n'), ((1556, 1591), 'numpy.where', 'np.where', (['(self.lap > 4)', '(4)', 'self.lap'], {}), '(self.lap > 4, 4, self.lap)\n', (1564, 1591), True, 'import numpy as np\n'), ((3598, 3626), 'cv2.xfeatures2d.SURF_create', 'cv.xfeatures2d.SURF_create', ([], {}), '()\n', (3624, 3626), True, 'import cv2 as cv\n'), ((3649, 3673), 'cv2.BFMatcher', 'cv.BFMatcher', (['cv.NORM_L2'], {}), '(cv.NORM_L2)\n', (3661, 3673), True, 'import cv2 as cv\n'), ((2349, 2405), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, -8, 1], [1, 1, 1]]', 'np.float32'], {}), '([[1, 1, 1], [1, -8, 1], [1, 1, 1]], np.float32)\n', (2357, 2405), True, 'import numpy as np\n')] |
'''
Created on Nov 8, 2018
@author: Zwieback
'''
import numpy as np
import copy, os, datetime, string
import itertools
import collections
from paths import pathcalibration
from model import (setup_slump, slump_parameters, integration_parameters, T_initial,
integrate_slump)
from slump import SlumpResults
misfit_parameters_null = [{'variable': 'T', 'depth': 0.05, 'type': 'least_squares',
'obs_values': None, 'obs_dates': None}]
class CalibrationModule(object):
def __init__(
self, baseline_slump_parameters=slump_parameters,
integration_parameters=integration_parameters, T_initial=T_initial,
misfit_parameters=misfit_parameters_null,
date_initial=datetime.datetime(2018, 6, 1, 12, 10),
date_final=datetime.datetime(2018, 8, 28, 12, 10),
calibration_parameters={'beta': (0.0, 0.2, 0.4, 0.6, 0.8, 1.0)},
same_roughness_length=False, forcing_MERRA=False, output_path=None,
name='calibrationstd', save_figures=True):
self.baseline_slump_parameters = baseline_slump_parameters
self.integration_parameters = integration_parameters
self.T_initial = T_initial
self.date_initial = date_initial
self.date_final = date_final
self.same_roughness_length = same_roughness_length
self.misfit_parameters = misfit_parameters
self.calibration_parameters = collections.OrderedDict(calibration_parameters)
self._calibration_grid = tuple(
itertools.product(*self.calibration_parameters.values()))
self.forcing_MERRA = forcing_MERRA
self.name = name
self.save_figures = save_figures,
if output_path is None:
self.output_path = os.path.join(pathcalibration, name)
else:
self.output_path = output_path
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
def _filename_grid_point(self, index_grid, figure=False):
if not figure:
fn = os.path.join(self.output_path, 'output_' + str(index_grid) + '.p')
else:
fn = os.path.join(self.output_path, 'output_' + str(index_grid) + '.pdf')
return fn
def _filename_summary_default(self):
return os.path.join(self.output_path, 'summary.csv')
def _run_grid_point(self, index_grid):
fnout = self._filename_grid_point(index_grid)
fnoutfig = self._filename_grid_point(index_grid, figure=True)
if not self.save_figures:
fnoutfig = None
# modify slump parameters; pay attention to same_roughness_length
slump_parameters_grid_point = copy.deepcopy(self.baseline_slump_parameters)
parameters_grid_point = self._calibration_grid[index_grid]
for jparameter, parameter in enumerate(self.calibration_parameters):
if parameter == 'debris_thickness': # for convenience
debrist = parameters_grid_point[jparameter]
# only implemented for two-layer structure
assert len(slump_parameters_grid_point['constituents']) == 2
# top layer unchanged (i.e. debris thickness > 0)
assert debrist > 0
# also must be smaller than grid size
assert debrist < slump_parameters_grid_point['mesh_length']
# lower layer: non-melting massive ice; may be changed
if (slump_parameters_grid_point.has_key('thermal_properties') and
slump_parameters_grid_point.has_key('thermal_properties') is not None):
tp_temp = slump_parameters_grid_point['thermal_properties']
slump_parameters_grid_point['constituents'] = [(), ()]
slump_parameters_grid_point['constituents'][0] = (0.0,
tp_temp.output(constituents_only=True))
slump_parameters_grid_point['thermal_constants'] = (
tp_temp.output(parameters_only=True))
slump_parameters_grid_point['thermal_properties'] = None
constituents_lower = (debrist,
{'theta_t':0.0, 'theta_m':0.0, 'theta_o':0.0, 'theta_n':1.0})
slump_parameters_grid_point['constituents'][-1] = constituents_lower
else:
slump_parameters_grid_point[parameter] = parameters_grid_point[jparameter]
if parameter == 'z0m' and self.same_roughness_length:
slump_parameters_grid_point['z0a'] = slump_parameters_grid_point['z0m']
ts = setup_slump(slump_parameters=slump_parameters_grid_point,
integration_parameters=self.integration_parameters,
T_initial=self.T_initial, date_initial=self.date_initial,
date_final=self.date_final, forcing_MERRA=self.forcing_MERRA)
ts = integrate_slump(ts, date_final=self.date_final,
viewer_variable=None, fnout=fnout, fnoutfig=fnoutfig)
def gridSearch(self, n_jobs=32, overwrite=False, write_summary=False,
fnsummary=None):
if overwrite or not os.path.exists(self._filename_grid_point(0)):
from joblib import Parallel, delayed
Parallel(n_jobs=n_jobs)(delayed(self._run_grid_point)(index_grid)
for index_grid in range(len(self._calibration_grid)))
misfits = []
if write_summary:
if fnsummary is None:
fnsummary = self._filename_summary_default()
summary = []
header = 'index,' + string.join(list(self.calibration_parameters.keys()),
',') + ',misfit'
summary.append(header)
for index_grid in range(len(self._calibration_grid)):
misfit = self._misfit_grid_point(index_grid, overwrite=False)
misfits.append(misfit)
if write_summary:
summary_line = str(index_grid) + ','
summary_line = summary_line + string.join(
['{:.3f}'.format(param) for param in self._calibration_grid[index_grid]],
sep=',')
summary_line = summary_line + ',' + '{:.3f}'.format(misfit)
summary.append(summary_line)
if write_summary:
with open(fnsummary, 'w') as fsummary:
fsummary.writelines([line + '\n' for line in summary])
return np.argmin(misfits)
def _misfit_grid_point(self, index_grid, overwrite=False):
fn = self._filename_grid_point(index_grid)
if overwrite or not os.path.exists(fn):
self._run_grid_point(index_grid)
sr = SlumpResults.fromFile(fn)
# to do: misfit
misfit = 0.0
try:
for misfit_parameters_objective in self.misfit_parameters:
values_predicted = sr.readVariable(
variable_name=misfit_parameters_objective['variable'],
interp_dates=misfit_parameters_objective['obs_dates'],
interp_depths=(misfit_parameters_objective['depth'],))
if misfit_parameters_objective['type'] == 'least_squares':
misfit_objective = np.nanmean((
values_predicted - misfit_parameters_objective['obs_values']) ** 2)
weight_objective = misfit_parameters_objective['weight'] \
if 'weight' in misfit_parameters_objective else 1.0
misfit = misfit + weight_objective * misfit_objective
else:
raise NotImplementedError
except:
misfit = np.nan
return misfit
| [
"copy.deepcopy",
"model.setup_slump",
"os.makedirs",
"os.path.exists",
"numpy.argmin",
"datetime.datetime",
"numpy.nanmean",
"joblib.Parallel",
"collections.OrderedDict",
"slump.SlumpResults.fromFile",
"os.path.join",
"joblib.delayed",
"model.integrate_slump"
] | [((780, 817), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(6)', '(1)', '(12)', '(10)'], {}), '(2018, 6, 1, 12, 10)\n', (797, 817), False, 'import copy, os, datetime, string\n'), ((843, 881), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(8)', '(28)', '(12)', '(10)'], {}), '(2018, 8, 28, 12, 10)\n', (860, 881), False, 'import copy, os, datetime, string\n'), ((1505, 1552), 'collections.OrderedDict', 'collections.OrderedDict', (['calibration_parameters'], {}), '(calibration_parameters)\n', (1528, 1552), False, 'import collections\n'), ((2402, 2447), 'os.path.join', 'os.path.join', (['self.output_path', '"""summary.csv"""'], {}), "(self.output_path, 'summary.csv')\n", (2414, 2447), False, 'import copy, os, datetime, string\n'), ((2802, 2847), 'copy.deepcopy', 'copy.deepcopy', (['self.baseline_slump_parameters'], {}), '(self.baseline_slump_parameters)\n', (2815, 2847), False, 'import copy, os, datetime, string\n'), ((4783, 5025), 'model.setup_slump', 'setup_slump', ([], {'slump_parameters': 'slump_parameters_grid_point', 'integration_parameters': 'self.integration_parameters', 'T_initial': 'self.T_initial', 'date_initial': 'self.date_initial', 'date_final': 'self.date_final', 'forcing_MERRA': 'self.forcing_MERRA'}), '(slump_parameters=slump_parameters_grid_point,\n integration_parameters=self.integration_parameters, T_initial=self.\n T_initial, date_initial=self.date_initial, date_final=self.date_final,\n forcing_MERRA=self.forcing_MERRA)\n', (4794, 5025), False, 'from model import setup_slump, slump_parameters, integration_parameters, T_initial, integrate_slump\n'), ((5105, 5211), 'model.integrate_slump', 'integrate_slump', (['ts'], {'date_final': 'self.date_final', 'viewer_variable': 'None', 'fnout': 'fnout', 'fnoutfig': 'fnoutfig'}), '(ts, date_final=self.date_final, viewer_variable=None, fnout\n =fnout, fnoutfig=fnoutfig)\n', (5120, 5211), False, 'from model import setup_slump, slump_parameters, integration_parameters, T_initial, integrate_slump\n'), ((6733, 6751), 'numpy.argmin', 'np.argmin', (['misfits'], {}), '(misfits)\n', (6742, 6751), True, 'import numpy as np\n'), ((6979, 7004), 'slump.SlumpResults.fromFile', 'SlumpResults.fromFile', (['fn'], {}), '(fn)\n', (7000, 7004), False, 'from slump import SlumpResults\n'), ((1843, 1878), 'os.path.join', 'os.path.join', (['pathcalibration', 'name'], {}), '(pathcalibration, name)\n', (1855, 1878), False, 'import copy, os, datetime, string\n'), ((1954, 1986), 'os.path.exists', 'os.path.exists', (['self.output_path'], {}), '(self.output_path)\n', (1968, 1986), False, 'import copy, os, datetime, string\n'), ((2001, 2030), 'os.makedirs', 'os.makedirs', (['self.output_path'], {}), '(self.output_path)\n', (2012, 2030), False, 'import copy, os, datetime, string\n'), ((5497, 5520), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (5505, 5520), False, 'from joblib import Parallel, delayed\n'), ((6899, 6917), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (6913, 6917), False, 'import copy, os, datetime, string\n'), ((7535, 7614), 'numpy.nanmean', 'np.nanmean', (["((values_predicted - misfit_parameters_objective['obs_values']) ** 2)"], {}), "((values_predicted - misfit_parameters_objective['obs_values']) ** 2)\n", (7545, 7614), True, 'import numpy as np\n'), ((5521, 5550), 'joblib.delayed', 'delayed', (['self._run_grid_point'], {}), '(self._run_grid_point)\n', (5528, 5550), False, 'from joblib import Parallel, delayed\n')] |
# approaches the n-armed bandit problem from a different angle,
# doing away with estimated action-values in favour of preferences based on
# a single reference reward (the average of _all_ received rewards)
# takes no direct parameters (though this may change)
# instead, modify the "n_armed_bandits" dict in "settings.py" to change the
# hyperparamters for this solution
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from util.iter_count import IterCount
from util.cmap import colormap
from .solution_util import pref_softmax
import settings
config = settings.n_armed_bandits
def learn(alpha, beta, init_ref):
numBandits, numArms, numPlays = (config['numBandits'], config['numArms'], config['numPlays'])
bandits = np.random.normal(0, 1, (numBandits, numArms))
best = np.argmax(bandits, axis=1)
reward_ref = np.zeros(numBandits) + init_ref
preferences = np.zeros(bandits.shape)
rewards, isOptimal = [np.zeros((numBandits, numPlays)) for i in range(2)]
ic = IterCount('Play number {0} of {1}'.format("{}", numPlays))
for i in range(numPlays):
ic.update()
arm = pref_softmax(preferences)
isOptimal[:, i][arm == best] = 1
reward = np.random.normal(0, 1, numBandits) + bandits[range(numBandits), arm]
rewards[:, i] = reward
reward_diff = reward - reward_ref
preferences[range(numBandits), arm] += beta * reward_diff
reward_ref += alpha * reward_diff
ic.exit()
return rewards, isOptimal
def run():
print('Running with settings:')
print('\tnumBandits: {0}\n\tnumArms: {1}\n\tnumPlays: {2}\n\talpha: {3}\n\tbeta: {4}\n\tinit_ref: {5}\n'.format(config['numBandits'], config['numArms'], config['numPlays'], config['alpha'], config['beta'], config['init_ref']))
fig = plt.figure(1, (8,8))
reward_plot = fig.add_subplot(211)
optimal_plot = fig.add_subplot(212)
print('Learning...')
rewards, isOptimal = learn(config['alpha'], config['beta'], config['init_ref'])
reward_plot.plot(range(config['numPlays']), np.mean(rewards, axis=0), c=(0,1,1))
optimal_plot.plot(range(config['numPlays']), np.mean(isOptimal, axis=0) * 100, c=(0,1,1))
yticks = mtick.FormatStrFormatter('%.0f%%')
optimal_plot.yaxis.set_major_formatter(yticks)
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.argmax",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.ticker.FormatStrFormatter",
"numpy.random.normal"
] | [((770, 815), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(numBandits, numArms)'], {}), '(0, 1, (numBandits, numArms))\n', (786, 815), True, 'import numpy as np\n'), ((828, 854), 'numpy.argmax', 'np.argmax', (['bandits'], {'axis': '(1)'}), '(bandits, axis=1)\n', (837, 854), True, 'import numpy as np\n'), ((923, 946), 'numpy.zeros', 'np.zeros', (['bandits.shape'], {}), '(bandits.shape)\n', (931, 946), True, 'import numpy as np\n'), ((1837, 1858), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '(8, 8)'], {}), '(1, (8, 8))\n', (1847, 1858), True, 'import matplotlib.pyplot as plt\n'), ((2240, 2274), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['"""%.0f%%"""'], {}), "('%.0f%%')\n", (2264, 2274), True, 'import matplotlib.ticker as mtick\n'), ((2331, 2341), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2339, 2341), True, 'import matplotlib.pyplot as plt\n'), ((873, 893), 'numpy.zeros', 'np.zeros', (['numBandits'], {}), '(numBandits)\n', (881, 893), True, 'import numpy as np\n'), ((974, 1006), 'numpy.zeros', 'np.zeros', (['(numBandits, numPlays)'], {}), '((numBandits, numPlays))\n', (982, 1006), True, 'import numpy as np\n'), ((2095, 2119), 'numpy.mean', 'np.mean', (['rewards'], {'axis': '(0)'}), '(rewards, axis=0)\n', (2102, 2119), True, 'import numpy as np\n'), ((1247, 1281), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'numBandits'], {}), '(0, 1, numBandits)\n', (1263, 1281), True, 'import numpy as np\n'), ((2181, 2207), 'numpy.mean', 'np.mean', (['isOptimal'], {'axis': '(0)'}), '(isOptimal, axis=0)\n', (2188, 2207), True, 'import numpy as np\n')] |
import os
import csv
import re
import shutil
import cv2
import numpy as np
def alter_format():
gt_path_name = "D:/Data/DATA/ICDAR2013/icdar13-Training-GT"
gt_list = os.listdir(gt_path_name)
for file in gt_list:
file_path = gt_path_name + "/" + file
strs = ""
with open(file_path, 'r', encoding='UTF-8') as f:
for line in f:
line = re.split(" ", line)
label = eval(line[-1])
line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in line]
x1, y1, x3, y3 = list(map(int, line[:4]))
x2 = x3
y2 = y1
x4 = x1
y4 = y3
strs += str(x1) + "," + str(y1) + "," + str(x2) + "," + str(y2) + "," \
+ str(x3) + "," + str(y3) + "," + str(x4) + "," + str(y4) + "," \
+ label + "\n"
# print(file_path, ":", strs)
with open(file_path, 'w', encoding='UTF-8') as f:
f.write(strs)
# print(file_path,":",strs)
def delete_gt():
img_path_name = 'D:/Data/HUST-TR400/img'
gt_path_name = "D:/Data/HUST-TR400/txt"
img_list = os.listdir(img_path_name)
gt_list = os.listdir(gt_path_name)
i = 0
for file in gt_list:
# image_name = re.sub('IMG_', '', file)
image_name = re.sub('txt', 'jpg', file)
print(image_name)
if not image_name in img_list:
os.remove(gt_path_name + '\\' + file)
# print(os.path.pathname(file))
# os.rename(img_path_name+file, new_name)
def data_rename():
img_path_name = 'D:/Data/DATA/MLT/img/'
# middle = "D:/Data/DATA/icdar2015/middle/"
# gt_path_name = "D:\Data\icdar2015\ch4_test_images_gt"
i = 3048
for file in os.listdir(img_path_name):
# print(file)
new_name = "img_" + str(i) + ".jpg"
i += 1
print(new_name, img_path_name + file)
os.rename(img_path_name + file, img_path_name + new_name)
def select_gt():
img_path_name = 'D:/Data/DATA/MLT/img/'
gt_path_name = "D:/Data//MLT/MLT_gt/"
new_gt_path = "D:/Data/DATA/MLT/gt/"
for file in os.listdir(img_path_name):
gt_name = "gt_" + re.sub('jpg', 'txt', file)
# print(gt_name)
shutil.copyfile(gt_path_name + gt_name, new_gt_path + gt_name)
def is_latin():
gt_path = "D:/Data/DATA/MLT/ch8_training_images_7_gt/"
gt_list = os.listdir(gt_path)
for file in gt_list:
strs = ""
with open(gt_path + file, "r", encoding='UTF-8') as f:
for line in f:
line = re.split(",", line)
if not line[8] == "Latin":
line[9] = "###\n"
if len(line) > 10:
for i in range(10, len(line)):
line[9] += line[i]
# print(line[9])
strs += line[0] + "," + line[1] + "," + line[2] + "," + line[3] + "," + line[4] + "," + line[5] + "," + \
line[6] + "," + line[7] + "," + line[9]
# print(strs)
with open(gt_path + file, "w", encoding='UTF-8') as f:
f.write(strs)
# print(type(line))
# print(line[-2])
def divide_words_img():
img_path = r"D:/Data/DATA/dataset/img/"
gt_path = r"D:/Data/DATA/dataset/img_gt/"
words_path = r"D:/Data/DATA/dataset/words/"
img_list = os.listdir(img_path)
gt_list = os.listdir(gt_path)
for img_name in img_list:
gt_name = re.sub("img", "gt_img", (re.sub("jpg", "txt", img_name)))
print(gt_name)
if gt_name not in gt_list:
print("Couldn't find the txt file of " + img_name)
continue
with open(gt_path + gt_name, "r", encoding="UTF-8") as f:
word_count = 1
img = cv2.imread(img_path + img_name)
for line in f:
line = re.sub(r"\ufeff", "", line)
line = re.split(r",|\n", line)
if len(line) < 8:
continue
x1, y1, x2, y2, x3, y3, x4, y4 = int(line[0]), int(line[1]), int(line[2]), int(line[3]), int(
line[4]), int(line[5]), int(line[6]), int(line[7])
x_min = min(x1, x2, x3, x4)
x_max = max(x1, x2, x3, x4)
y_min = min(y1, y2, y3, y4)
y_max = max(y1, y2, y3, y4)
word_background = np.zeros((np.int32(y_max - y_min), np.int32(x_max - x_min)), dtype=np.int32)
poly_area = np.array([[x1 - x_min, y1 - y_min], [x2 - x_min, y2 - y_min], [x3 - x_min, y3 - y_min],
[x4 - x_min, y4 - y_min]])
cv2.fillPoly(word_background, np.int32([poly_area]), 1)
word_area = np.copy(img[y_min:y_max, x_min:x_max])
try:
word_area[:, :, 0] *= np.uint8(word_background)
word_area[:, :, 1] *= np.uint8(word_background)
word_area[:, :, 2] *= np.uint8(word_background)
cv2.imwrite(filename=words_path + re.sub(".jpg", "_word_" + str(word_count) + ".jpg", img_name),
img=word_area)
word_count += 1
except Exception as e:
print("\033[0;31m", gt_name, "\033[0m", e)
print("\033[0;31m",
"Shape don't match! Maybe some negative numbers exist! The type must be 'uint'!", "\033[0m")
# cv2.imshow("img", word_area)
# cv2.waitKey(0)
def alter():
gt_path_name = "D:/Data/DATA/ICDAR2013/icdar13_Test_GT"
gt_list = os.listdir(gt_path_name)
for file in gt_list:
file_path = gt_path_name + "/" + file
strs = ""
with open(file_path, 'r', encoding='UTF-8') as f:
for line in f:
line = re.split(",", line)
if len(line) < 7:
continue
label = line[-1]
line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in line]
x1, y1, x2, y2, x4, y4, x3, y3 = list(map(int, line[:8]))
strs += str(x1) + "," + str(y1) + "," + str(x2) + "," + str(y2) + "," \
+ str(x3) + "," + str(y3) + "," + str(x4) + "," + str(y4) + "," \
+ label
print(file_path, ":", strs)
with open(file_path, 'w', encoding='UTF-8') as f:
f.write(strs)
def add_txt():
gt_path_name = "D:/Data/DATA/USTB/training/USTB_train_txt/"
gt_list = os.listdir(gt_path_name)
for gt_name in gt_list:
strs = ""
with open(gt_path_name + gt_name, "r", encoding="UTF-8") as f:
for line in f:
line = re.sub("\n", ",Text\n", line)
strs += line
print(gt_name)
with open(gt_path_name + gt_name, "w", encoding='UTF-8') as f:
f.write(strs)
def is_exist_space():
gt_path_name = "D:/Data/DATA/dataset/img_gt/"
gt_list = os.listdir(gt_path_name)
for gt_name in gt_list:
with open(gt_path_name + gt_name, "r", encoding="UTF-8") as f:
for line in f:
line = re.split(",", line)
if len(line) != 9:
print(gt_name, line)
def is_exist_symbol():
gt_path_name = "D:/Data/DATA/dataset/img_gt/"
gt_list = os.listdir(gt_path_name)
for gt_name in gt_list:
strs = ""
with open(gt_path_name + gt_name, "r", encoding="UTF-8") as f:
for line in f:
if re.match("\ufeff", line):
print(gt_name)
# line = re.sub("\ufeff","",line)
# strs += line
# print(strs)
# with open(gt_path_name + gt_name, "w", encoding='UTF-8') as f:
# f.write(strs)
def main():
divide_words_img()
main()
| [
"os.remove",
"numpy.uint8",
"re.split",
"numpy.copy",
"os.rename",
"re.match",
"cv2.imread",
"numpy.array",
"numpy.int32",
"shutil.copyfile",
"re.sub",
"os.listdir"
] | [((175, 199), 'os.listdir', 'os.listdir', (['gt_path_name'], {}), '(gt_path_name)\n', (185, 199), False, 'import os\n'), ((1195, 1220), 'os.listdir', 'os.listdir', (['img_path_name'], {}), '(img_path_name)\n', (1205, 1220), False, 'import os\n'), ((1235, 1259), 'os.listdir', 'os.listdir', (['gt_path_name'], {}), '(gt_path_name)\n', (1245, 1259), False, 'import os\n'), ((1798, 1823), 'os.listdir', 'os.listdir', (['img_path_name'], {}), '(img_path_name)\n', (1808, 1823), False, 'import os\n'), ((2180, 2205), 'os.listdir', 'os.listdir', (['img_path_name'], {}), '(img_path_name)\n', (2190, 2205), False, 'import os\n'), ((2447, 2466), 'os.listdir', 'os.listdir', (['gt_path'], {}), '(gt_path)\n', (2457, 2466), False, 'import os\n'), ((3425, 3445), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (3435, 3445), False, 'import os\n'), ((3460, 3479), 'os.listdir', 'os.listdir', (['gt_path'], {}), '(gt_path)\n', (3470, 3479), False, 'import os\n'), ((5702, 5726), 'os.listdir', 'os.listdir', (['gt_path_name'], {}), '(gt_path_name)\n', (5712, 5726), False, 'import os\n'), ((6626, 6650), 'os.listdir', 'os.listdir', (['gt_path_name'], {}), '(gt_path_name)\n', (6636, 6650), False, 'import os\n'), ((7085, 7109), 'os.listdir', 'os.listdir', (['gt_path_name'], {}), '(gt_path_name)\n', (7095, 7109), False, 'import os\n'), ((7444, 7468), 'os.listdir', 'os.listdir', (['gt_path_name'], {}), '(gt_path_name)\n', (7454, 7468), False, 'import os\n'), ((1364, 1390), 're.sub', 're.sub', (['"""txt"""', '"""jpg"""', 'file'], {}), "('txt', 'jpg', file)\n", (1370, 1390), False, 'import re\n'), ((1960, 2017), 'os.rename', 'os.rename', (['(img_path_name + file)', '(img_path_name + new_name)'], {}), '(img_path_name + file, img_path_name + new_name)\n', (1969, 2017), False, 'import os\n'), ((2293, 2355), 'shutil.copyfile', 'shutil.copyfile', (['(gt_path_name + gt_name)', '(new_gt_path + gt_name)'], {}), '(gt_path_name + gt_name, new_gt_path + gt_name)\n', (2308, 2355), False, 'import shutil\n'), ((1468, 1505), 'os.remove', 'os.remove', (["(gt_path_name + '\\\\' + file)"], {}), "(gt_path_name + '\\\\' + file)\n", (1477, 1505), False, 'import os\n'), ((2233, 2259), 're.sub', 're.sub', (['"""jpg"""', '"""txt"""', 'file'], {}), "('jpg', 'txt', file)\n", (2239, 2259), False, 'import re\n'), ((3553, 3583), 're.sub', 're.sub', (['"""jpg"""', '"""txt"""', 'img_name'], {}), "('jpg', 'txt', img_name)\n", (3559, 3583), False, 'import re\n'), ((3839, 3870), 'cv2.imread', 'cv2.imread', (['(img_path + img_name)'], {}), '(img_path + img_name)\n', (3849, 3870), False, 'import cv2\n'), ((397, 416), 're.split', 're.split', (['""" """', 'line'], {}), "(' ', line)\n", (405, 416), False, 'import re\n'), ((2623, 2642), 're.split', 're.split', (['""","""', 'line'], {}), "(',', line)\n", (2631, 2642), False, 'import re\n'), ((3921, 3948), 're.sub', 're.sub', (['"""\\\\ufeff"""', '""""""', 'line'], {}), "('\\\\ufeff', '', line)\n", (3927, 3948), False, 'import re\n'), ((3972, 3995), 're.split', 're.split', (['""",|\\\\n"""', 'line'], {}), "(',|\\\\n', line)\n", (3980, 3995), False, 'import re\n'), ((4555, 4674), 'numpy.array', 'np.array', (['[[x1 - x_min, y1 - y_min], [x2 - x_min, y2 - y_min], [x3 - x_min, y3 -\n y_min], [x4 - x_min, y4 - y_min]]'], {}), '([[x1 - x_min, y1 - y_min], [x2 - x_min, y2 - y_min], [x3 - x_min, \n y3 - y_min], [x4 - x_min, y4 - y_min]])\n', (4563, 4674), True, 'import numpy as np\n'), ((4808, 4846), 'numpy.copy', 'np.copy', (['img[y_min:y_max, x_min:x_max]'], {}), '(img[y_min:y_max, x_min:x_max])\n', (4815, 4846), True, 'import numpy as np\n'), ((5924, 5943), 're.split', 're.split', (['""","""', 'line'], {}), "(',', line)\n", (5932, 5943), False, 'import re\n'), ((6818, 6847), 're.sub', 're.sub', (['"""\n"""', '""",Text\n"""', 'line'], {}), "('\\n', ',Text\\n', line)\n", (6824, 6847), False, 'import re\n'), ((7259, 7278), 're.split', 're.split', (['""","""', 'line'], {}), "(',', line)\n", (7267, 7278), False, 'import re\n'), ((7632, 7656), 're.match', 're.match', (['"""\ufeff"""', 'line'], {}), "('\\ufeff', line)\n", (7640, 7656), False, 'import re\n'), ((4754, 4775), 'numpy.int32', 'np.int32', (['[poly_area]'], {}), '([poly_area])\n', (4762, 4775), True, 'import numpy as np\n'), ((4910, 4935), 'numpy.uint8', 'np.uint8', (['word_background'], {}), '(word_background)\n', (4918, 4935), True, 'import numpy as np\n'), ((4978, 5003), 'numpy.uint8', 'np.uint8', (['word_background'], {}), '(word_background)\n', (4986, 5003), True, 'import numpy as np\n'), ((5046, 5071), 'numpy.uint8', 'np.uint8', (['word_background'], {}), '(word_background)\n', (5054, 5071), True, 'import numpy as np\n'), ((4460, 4483), 'numpy.int32', 'np.int32', (['(y_max - y_min)'], {}), '(y_max - y_min)\n', (4468, 4483), True, 'import numpy as np\n'), ((4485, 4508), 'numpy.int32', 'np.int32', (['(x_max - x_min)'], {}), '(x_max - x_min)\n', (4493, 4508), True, 'import numpy as np\n')] |
import math
import numpy as np
import torch
import random
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TrackMeter(object):
"""Compute and store values"""
def __init__(self):
self.reset()
def reset(self):
self.data = []
self.sum = 0
self.avg = 0
self.max_val = float('-inf')
self.max_idx = -1
def update(self, val, idx=None):
self.data.append(val)
self.sum += val
self.avg = self.sum / len(self.data)
if val > self.max_val:
self.max_val = val
self.max_idx = idx if idx else len(self.data)
def last(self, k):
assert 0 < k <= len(self.data)
return sum(self.data[-k:]) / k
def set_seed(seed=42):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def update_ema(model, model_ema, m=0.999):
for param, param_ema in zip(model.parameters(), model_ema.parameters()):
param_ema.data = param_ema.data * m + param.data * (1. - m)
# BN running_mean and running_var are buffers
for buf, buf_ema in zip(model.buffers(), model_ema.buffers()):
buf_ema.data = buf.data # buf_ema = buf is wrong. should not share memory
def interleave(x, batch_size):
# x.shape[0] == batch_size * num_batches
s = list(x.shape)
return x.reshape([-1, batch_size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def de_interleave(x, batch_size):
s = list(x.shape)
return x.reshape([batch_size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def count_params(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def _get_lr(cfg, step):
lr = cfg.lr
if cfg.type == 'Cosine': # Cosine Anneal
start_step = cfg.get('start_step', 1)
eta_min = lr * cfg.decay_rate
lr = eta_min + (lr - eta_min) * (1 + math.cos(math.pi * (step - start_step) / cfg.steps)) / 2
elif cfg.type == 'MultiStep': # MultiStep
num_steps = np.sum(step > np.asarray(cfg.decay_steps))
lr = lr * (cfg.decay_rate ** num_steps)
else:
raise NotImplementedError(cfg.type)
return lr
def adjust_learning_rate(cfg, optimizer, step, batch_idx=0, num_batches=100):
start_step = cfg.get('start_step', 1)
if step < cfg.get('warmup_steps', 0) + start_step:
warmup_to = _get_lr(cfg, cfg.warmup_steps + 1)
p = (step - start_step + batch_idx / num_batches) / cfg.warmup_steps
lr = cfg.warmup_from + p * (warmup_to - cfg.warmup_from)
else:
lr = _get_lr(cfg, step)
# update optimizer lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_lr_simsiam(cfg, optimizer, step):
init_lr = cfg.lr
lr = _get_lr(cfg, step)
for param_group in optimizer.param_groups:
if 'fix_lr' in param_group and param_group['fix_lr']:
param_group['lr'] = init_lr
else:
param_group['lr'] = lr
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
# millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
# if millis > 0 and i <= 2:
# f += str(millis) + 'ms'
# i += 1
if f == '':
f = '0ms'
return f
| [
"numpy.random.seed",
"torch.manual_seed",
"numpy.asarray",
"random.seed",
"math.cos",
"torch.no_grad"
] | [((1096, 1113), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1107, 1113), False, 'import random\n'), ((1118, 1138), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1132, 1138), True, 'import numpy as np\n'), ((1143, 1166), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1160, 1166), False, 'import torch\n'), ((2127, 2142), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2140, 2142), False, 'import torch\n'), ((2894, 2921), 'numpy.asarray', 'np.asarray', (['cfg.decay_steps'], {}), '(cfg.decay_steps)\n', (2904, 2921), True, 'import numpy as np\n'), ((2756, 2807), 'math.cos', 'math.cos', (['(math.pi * (step - start_step) / cfg.steps)'], {}), '(math.pi * (step - start_step) / cfg.steps)\n', (2764, 2807), False, 'import math\n')] |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# np.random.seed(1)
# tf.compat.v1.set_random_seed(1)
LR_A = 0.0005 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GAMMA = 0.9 # reward discount
REPLACEMENT = [
dict(name='soft', tau=0.01),
dict(name='hard', rep_iter_a=600, rep_iter_c=500)
][1] # you can try different target replacement strategies
MEMORY_CAPACITY = 10000
BATCH_SIZE = 256
RENDER = False
OUTPUT_GRAPH = True
TAU = 0.01
class MADDPG(object):
def __init__(self, a_dim, s_dim, a_bound, model, retrain):
self.memory = np.zeros((MEMORY_CAPACITY, s_dim * 4 + a_dim * 2 + 1), dtype=np.float32)
self.pointer = 0
self.sess = tf.Session()
self.a_replace_counter, self.c_replace_counter = 0, 0
self.model = model
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.S1 = tf.placeholder(tf.float32, [None, s_dim], 's1')
self.S_1 = tf.placeholder(tf.float32, [None, s_dim], 's_1')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.S2 = tf.placeholder(tf.float32, [None, s_dim], 's2')
self.S_2 = tf.placeholder(tf.float32, [None, s_dim], 's_2')
self.a2 = tf.placeholder(tf.float32, [None, a_dim], 'a2')
self.retrain = retrain
# self.saver = tf.train.import_meta_graph('./nmodel/model-45000.meta')
# ckpt = tf.train.get_checkpoint_state('./nmodel')
# if ckpt and ckpt.model_checkpoint_path and not self.retrain:
# self.saver.restore(self.sess, ckpt.model_checkpoint_path)
# print(1)
# self.graph = tf.get_default_graph()
with tf.variable_scope('Actor'):
self.a1 = self._build_a(self.S1, scope='eval', trainable=True)
a_1 = self._build_a(self.S_1, scope='target', trainable=False)
a_2 = self._build_a(self.S_2, scope='target', trainable=False)
# self.a2 = self._build_a(self.S2, scope='eval', trainable=True)
# self.a_2 = self._build_a(self.S_2, scope='target', trainable=False)
# 使用DDPG训练的Actor
# self.ae_params1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
# self.at_params1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
# self.ae_params2 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor1/eval')
# self.at_params2 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor1/target')
# self.replace1 = [[tf.assign(ta, ea), tf.assign(tc, ec)]
# for ta, ea, tc, ec in zip(self.ae_params2, self.ae_params1, self.at_params2,self.at_params1)]
with tf.variable_scope('Critic'):
# assign self.a = a in memory when calculating q for td_error,
# otherwise the self.a is from Actor when updating Actor
q = self._build_c(self.S1, self.S2, self.a1, self.a2, scope='eval', trainable=True)
q_ = self._build_c(self.S_1, self.S_2, a_1, a_2, scope='target', trainable=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [[tf.assign(ta, (1 - TAU) * ta + TAU * ea), tf.assign(tc, (1 - TAU) * tc + TAU * ec)]
for ta, ea, tc, ec in zip(self.at_params, self.ae_params, self.ct_params, self.ce_params)]
q_target = self.R + GAMMA * q_
# in the feed_dic for the td_error, the self.a should change to actions in memory
td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.ctrain = tf.train.AdamOptimizer(LR_C).minimize(td_error, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(LR_A).minimize(a_loss, var_list=self.ae_params)
#
self.sess.run(tf.global_variables_initializer())
# self.sess.run(self.replace1)
self.avgreward = []
self.collision = []
self.saver = tf.train.Saver(max_to_keep=4)
# ckpt = tf.train.get_checkpoint_state('./nmodel')
# self.saver = saver = tf.train.Saver(max_to_keep=4)
# if ckpt and ckpt.model_checkpoint_path:
# self.saver.restore(self.sess,ckpt.model_checkpoint_path)
# print(1)
def choose_action(self, s):
return self.sess.run(self.a1, {self.S1: s[np.newaxis, :]})[0]
def learn(self):
# soft target replacement
self.sess.run(self.soft_replace)
indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)
bt = self.memory[indices, :]
bs1 = bt[:, :self.s_dim]
bs2 = bt[:, self.s_dim: self.s_dim * 2]
ba1 = bt[:, self.s_dim * 2: self.s_dim * 2 + self.a_dim]
ba2 = bt[:, self.s_dim * 2 + self.a_dim: self.s_dim * 2 + self.a_dim * 2]
br = bt[:, -self.s_dim * 2 - 1: -self.s_dim * 2]
bs_1 = bt[:, -self.s_dim * 2: -self.s_dim]
bs_2 = bt[:, -self.s_dim:]
self.sess.run(self.atrain, {self.S1: bs1, self.S2: bs2, self.a2: ba2})
self.sess.run(self.ctrain,
{self.S1: bs1, self.S2: bs2, self.a1: ba1, self.a2: ba2, self.R: br, self.S_1: bs_1,
self.S_2: bs_2})
def save(self, episode):
self.saver.save(self.sess, self.model + '/' + self.model, global_step=episode)
def store_transition(self, s1, s2, a1, a2, r, s_1, s_2):
transition = np.hstack((s1, s2, a1, a2, [r], s_1, s_2))
index = self.pointer % MEMORY_CAPACITY # r # replace the old memory with new memory
self.memory[index, :] = transition
self.pointer += 1
def _build_a(self, s, scope, trainable):
with tf.variable_scope(scope):
# net = self.graph.get_tensor_by_name('Actor/'+scope+'/l1/Tanh:0')
# a1 = self.graph.get_tensor_by_name('Actor/'+scope+'/a1/Tanh:0')
# a2 = self.graph.get_tensor_by_name('Actor/'+scope+'/a2/Tanh:0')
# tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor1/eval')
init_w = tf.random_normal_initializer(0., 0.3)
init_b = tf.constant_initializer(0.1)
net = tf.layers.dense(s, 30, activation=tf.nn.tanh, kernel_initializer=init_w,
bias_initializer=init_b, name='l11', trainable=trainable, reuse=tf.AUTO_REUSE)
# a = tf.layers.dense(net, self.a_dim, name='a', trainable=trainable)
a1 = tf.layers.dense(net, 50, activation=tf.nn.tanh, kernel_initializer=init_w,
bias_initializer=init_b, name='a11', trainable=trainable, reuse=tf.AUTO_REUSE)
a2 = tf.layers.dense(a1, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,
bias_initializer=init_b, name='a21', trainable=trainable, reuse=tf.AUTO_REUSE)
return a2
def _build_c(self, s1, s2, a1, a2, scope, trainable):
with tf.variable_scope(scope):
n_l1 = 30
n_l2 = 40
w_s1 = tf.get_variable('w1_s1', [self.s_dim, n_l1], trainable=trainable)
w_s2 = tf.get_variable('w2_s2', [self.s_dim, n_l1], trainable=trainable)
w_a1 = tf.get_variable('w1_a1', [self.a_dim, n_l1], trainable=trainable)
w_a2 = tf.get_variable('w1_oa1', [self.a_dim, n_l1], trainable=trainable)
b = tf.get_variable('b', [1, n_l1], trainable=trainable)
net1 = tf.nn.tanh(tf.matmul(s1, w_s1) + tf.matmul(s2, w_s2) + tf.matmul(a1, w_a1) + tf.matmul(a2, w_a2) + b)
net2 = tf.layers.dense(net1, n_l2, activation=tf.nn.tanh, trainable=trainable, name='net21')
net3 = tf.layers.dense(net2, 1, trainable=trainable, name='net31') # Q(s,a)
return net3
| [
"tensorflow.losses.mean_squared_error",
"tensorflow.train.Saver",
"tensorflow.get_collection",
"tensorflow.global_variables_initializer",
"tensorflow.constant_initializer",
"tensorflow.layers.dense",
"tensorflow.Session",
"numpy.zeros",
"tensorflow.variable_scope",
"numpy.hstack",
"tensorflow.re... | [((603, 675), 'numpy.zeros', 'np.zeros', (['(MEMORY_CAPACITY, s_dim * 4 + a_dim * 2 + 1)'], {'dtype': 'np.float32'}), '((MEMORY_CAPACITY, s_dim * 4 + a_dim * 2 + 1), dtype=np.float32)\n', (611, 675), True, 'import numpy as np\n'), ((721, 733), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (731, 733), True, 'import tensorflow as tf\n'), ((911, 958), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, s_dim]', '"""s1"""'], {}), "(tf.float32, [None, s_dim], 's1')\n", (925, 958), True, 'import tensorflow as tf\n'), ((978, 1026), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, s_dim]', '"""s_1"""'], {}), "(tf.float32, [None, s_dim], 's_1')\n", (992, 1026), True, 'import tensorflow as tf\n'), ((1044, 1086), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""r"""'], {}), "(tf.float32, [None, 1], 'r')\n", (1058, 1086), True, 'import tensorflow as tf\n'), ((1105, 1152), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, s_dim]', '"""s2"""'], {}), "(tf.float32, [None, s_dim], 's2')\n", (1119, 1152), True, 'import tensorflow as tf\n'), ((1172, 1220), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, s_dim]', '"""s_2"""'], {}), "(tf.float32, [None, s_dim], 's_2')\n", (1186, 1220), True, 'import tensorflow as tf\n'), ((1239, 1286), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, a_dim]', '"""a2"""'], {}), "(tf.float32, [None, a_dim], 'a2')\n", (1253, 1286), True, 'import tensorflow as tf\n'), ((3139, 3207), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""Actor/eval"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')\n", (3156, 3207), True, 'import tensorflow as tf\n'), ((3233, 3303), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""Actor/target"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')\n", (3250, 3303), True, 'import tensorflow as tf\n'), ((3329, 3398), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""Critic/eval"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')\n", (3346, 3398), True, 'import tensorflow as tf\n'), ((3424, 3495), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""Critic/target"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')\n", (3441, 3495), True, 'import tensorflow as tf\n'), ((3913, 3973), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'labels': 'q_target', 'predictions': 'q'}), '(labels=q_target, predictions=q)\n', (3941, 3973), True, 'import tensorflow as tf\n'), ((4402, 4431), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(4)'}), '(max_to_keep=4)\n', (4416, 4431), True, 'import tensorflow as tf\n'), ((4910, 4960), 'numpy.random.choice', 'np.random.choice', (['MEMORY_CAPACITY'], {'size': 'BATCH_SIZE'}), '(MEMORY_CAPACITY, size=BATCH_SIZE)\n', (4926, 4960), True, 'import numpy as np\n'), ((5830, 5872), 'numpy.hstack', 'np.hstack', (['(s1, s2, a1, a2, [r], s_1, s_2)'], {}), '((s1, s2, a1, a2, [r], s_1, s_2))\n', (5839, 5872), True, 'import numpy as np\n'), ((1686, 1712), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Actor"""'], {}), "('Actor')\n", (1703, 1712), True, 'import tensorflow as tf\n'), ((2720, 2747), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Critic"""'], {}), "('Critic')\n", (2737, 2747), True, 'import tensorflow as tf\n'), ((4089, 4106), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['q'], {}), '(q)\n', (4103, 4106), True, 'import tensorflow as tf\n'), ((4251, 4284), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4282, 4284), True, 'import tensorflow as tf\n'), ((6095, 6119), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (6112, 6119), True, 'import tensorflow as tf\n'), ((6461, 6499), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.3)'], {}), '(0.0, 0.3)\n', (6489, 6499), True, 'import tensorflow as tf\n'), ((6520, 6548), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (6543, 6548), True, 'import tensorflow as tf\n'), ((6567, 6727), 'tensorflow.layers.dense', 'tf.layers.dense', (['s', '(30)'], {'activation': 'tf.nn.tanh', 'kernel_initializer': 'init_w', 'bias_initializer': 'init_b', 'name': '"""l11"""', 'trainable': 'trainable', 'reuse': 'tf.AUTO_REUSE'}), "(s, 30, activation=tf.nn.tanh, kernel_initializer=init_w,\n bias_initializer=init_b, name='l11', trainable=trainable, reuse=tf.\n AUTO_REUSE)\n", (6582, 6727), True, 'import tensorflow as tf\n'), ((6852, 7014), 'tensorflow.layers.dense', 'tf.layers.dense', (['net', '(50)'], {'activation': 'tf.nn.tanh', 'kernel_initializer': 'init_w', 'bias_initializer': 'init_b', 'name': '"""a11"""', 'trainable': 'trainable', 'reuse': 'tf.AUTO_REUSE'}), "(net, 50, activation=tf.nn.tanh, kernel_initializer=init_w,\n bias_initializer=init_b, name='a11', trainable=trainable, reuse=tf.\n AUTO_REUSE)\n", (6867, 7014), True, 'import tensorflow as tf\n'), ((7056, 7226), 'tensorflow.layers.dense', 'tf.layers.dense', (['a1', 'self.a_dim'], {'activation': 'tf.nn.tanh', 'kernel_initializer': 'init_w', 'bias_initializer': 'init_b', 'name': '"""a21"""', 'trainable': 'trainable', 'reuse': 'tf.AUTO_REUSE'}), "(a1, self.a_dim, activation=tf.nn.tanh, kernel_initializer=\n init_w, bias_initializer=init_b, name='a21', trainable=trainable, reuse\n =tf.AUTO_REUSE)\n", (7071, 7226), True, 'import tensorflow as tf\n'), ((7344, 7368), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (7361, 7368), True, 'import tensorflow as tf\n'), ((7433, 7498), 'tensorflow.get_variable', 'tf.get_variable', (['"""w1_s1"""', '[self.s_dim, n_l1]'], {'trainable': 'trainable'}), "('w1_s1', [self.s_dim, n_l1], trainable=trainable)\n", (7448, 7498), True, 'import tensorflow as tf\n'), ((7518, 7583), 'tensorflow.get_variable', 'tf.get_variable', (['"""w2_s2"""', '[self.s_dim, n_l1]'], {'trainable': 'trainable'}), "('w2_s2', [self.s_dim, n_l1], trainable=trainable)\n", (7533, 7583), True, 'import tensorflow as tf\n'), ((7603, 7668), 'tensorflow.get_variable', 'tf.get_variable', (['"""w1_a1"""', '[self.a_dim, n_l1]'], {'trainable': 'trainable'}), "('w1_a1', [self.a_dim, n_l1], trainable=trainable)\n", (7618, 7668), True, 'import tensorflow as tf\n'), ((7688, 7754), 'tensorflow.get_variable', 'tf.get_variable', (['"""w1_oa1"""', '[self.a_dim, n_l1]'], {'trainable': 'trainable'}), "('w1_oa1', [self.a_dim, n_l1], trainable=trainable)\n", (7703, 7754), True, 'import tensorflow as tf\n'), ((7771, 7823), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[1, n_l1]'], {'trainable': 'trainable'}), "('b', [1, n_l1], trainable=trainable)\n", (7786, 7823), True, 'import tensorflow as tf\n'), ((7964, 8053), 'tensorflow.layers.dense', 'tf.layers.dense', (['net1', 'n_l2'], {'activation': 'tf.nn.tanh', 'trainable': 'trainable', 'name': '"""net21"""'}), "(net1, n_l2, activation=tf.nn.tanh, trainable=trainable,\n name='net21')\n", (7979, 8053), True, 'import tensorflow as tf\n'), ((8069, 8128), 'tensorflow.layers.dense', 'tf.layers.dense', (['net2', '(1)'], {'trainable': 'trainable', 'name': '"""net31"""'}), "(net2, 1, trainable=trainable, name='net31')\n", (8084, 8128), True, 'import tensorflow as tf\n'), ((3560, 3600), 'tensorflow.assign', 'tf.assign', (['ta', '((1 - TAU) * ta + TAU * ea)'], {}), '(ta, (1 - TAU) * ta + TAU * ea)\n', (3569, 3600), True, 'import tensorflow as tf\n'), ((3602, 3642), 'tensorflow.assign', 'tf.assign', (['tc', '((1 - TAU) * tc + TAU * ec)'], {}), '(tc, (1 - TAU) * tc + TAU * ec)\n', (3611, 3642), True, 'import tensorflow as tf\n'), ((3996, 4024), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['LR_C'], {}), '(LR_C)\n', (4018, 4024), True, 'import tensorflow as tf\n'), ((4147, 4175), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['LR_A'], {}), '(LR_A)\n', (4169, 4175), True, 'import tensorflow as tf\n'), ((7920, 7939), 'tensorflow.matmul', 'tf.matmul', (['a2', 'w_a2'], {}), '(a2, w_a2)\n', (7929, 7939), True, 'import tensorflow as tf\n'), ((7898, 7917), 'tensorflow.matmul', 'tf.matmul', (['a1', 'w_a1'], {}), '(a1, w_a1)\n', (7907, 7917), True, 'import tensorflow as tf\n'), ((7854, 7873), 'tensorflow.matmul', 'tf.matmul', (['s1', 'w_s1'], {}), '(s1, w_s1)\n', (7863, 7873), True, 'import tensorflow as tf\n'), ((7876, 7895), 'tensorflow.matmul', 'tf.matmul', (['s2', 'w_s2'], {}), '(s2, w_s2)\n', (7885, 7895), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""CNN.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1z-MzR5uN73-ek3jLjZoHPSUS5X1lgPsI
"""
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import SGD, Adam, Adadelta
import numpy as np
import tensorflow as tf
# Loading Dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
x_train, x_test = np.expand_dims(x_train, axis=-1), np.expand_dims(x_test, axis=-1)
# CNN Layers
model = Sequential()
model.add(Conv2D(16, (3, 3),(2,2), input_shape=x_train.shape[1:], padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(16, (2, 2),(2,2),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(16, (2, 2),(2,2),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
model.add(Flatten())
#FC Layers
model.add(Dense(16,activation='relu'))
model.add(Dense(10,activation='softmax'))
model.summary()
print("\nTraining...")
opt = SGD(learning_rate=0.05)
model.compile(optimizer=opt, loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.fit(x_train, y_train, epochs=20,batch_size=96)
# Testing
print("\nTesting...")
test_loss, test_acc = model.evaluate(x_test, y_test)
print("Test Loss: {0} - Test Acc: {1}".format(test_loss, test_acc)) | [
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.SGD",
"numpy.expand_dims",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Sequential",
"tensorflow.kera... | [((539, 556), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (554, 556), False, 'from tensorflow.keras.datasets import mnist\n'), ((709, 721), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (719, 721), False, 'from tensorflow.keras.models import Sequential\n'), ((1333, 1356), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'learning_rate': '(0.05)'}), '(learning_rate=0.05)\n', (1336, 1356), False, 'from tensorflow.keras.optimizers import SGD, Adam, Adadelta\n'), ((621, 653), 'numpy.expand_dims', 'np.expand_dims', (['x_train'], {'axis': '(-1)'}), '(x_train, axis=-1)\n', (635, 653), True, 'import numpy as np\n'), ((655, 686), 'numpy.expand_dims', 'np.expand_dims', (['x_test'], {'axis': '(-1)'}), '(x_test, axis=-1)\n', (669, 686), True, 'import numpy as np\n'), ((732, 805), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)', '(2, 2)'], {'input_shape': 'x_train.shape[1:]', 'padding': '"""same"""'}), "(16, (3, 3), (2, 2), input_shape=x_train.shape[1:], padding='same')\n", (738, 805), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((815, 833), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (825, 833), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((845, 891), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (857, 891), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((904, 946), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(2, 2)', '(2, 2)'], {'padding': '"""same"""'}), "(16, (2, 2), (2, 2), padding='same')\n", (910, 946), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((955, 973), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (965, 973), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((985, 1031), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (997, 1031), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((1044, 1086), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(2, 2)', '(2, 2)'], {'padding': '"""same"""'}), "(16, (2, 2), (2, 2), padding='same')\n", (1050, 1086), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((1095, 1113), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1105, 1113), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((1125, 1171), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (1137, 1171), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((1183, 1192), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1190, 1192), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((1216, 1244), 'tensorflow.keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (1221, 1244), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n'), ((1255, 1286), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (1260, 1286), False, 'from tensorflow.keras.layers import Conv2D, Activation, Dropout, Dense, MaxPooling2D, Flatten\n')] |
import numpy.testing as npt
import pytest
from pyHalo.Rendering.MassFunctions.power_law import GeneralPowerLaw
from pyHalo.Rendering.MassFunctions.mass_function_utilities import integrate_power_law_quad, integrate_power_law_analytic
import numpy as np
class TestGeneralPowerLaw(object):
def setup(self):
self.log_mlow = 6.
self.log_mhigh = 8.7
self.plaw_index = -1.9
self.norm = 10 ** 12
self.func_cdm = GeneralPowerLaw(self.log_mlow, self.log_mhigh, self.plaw_index,
draw_poisson=False, normalization=self.norm, log_mc=None,
a_wdm=None, b_wdm=None, c_wdm=None)
self.func_wdm = GeneralPowerLaw(self.log_mlow, self.log_mhigh, self.plaw_index,
draw_poisson=False, normalization=self.norm,
log_mc=7.5, a_wdm=2., b_wdm=0.5, c_wdm=-1.3)
def test_draw_cdm(self):
n = 1
mtheory = integrate_power_law_analytic(self.norm, 10**self.log_mlow, 10**self.log_mhigh, n,
self.plaw_index)
m = self.func_cdm.draw()
npt.assert_almost_equal(np.sum(m)/mtheory, 1, 2)
def test_draw_wdm(self):
n = 1
mtheory = integrate_power_law_quad(self.norm, 10**self.log_mlow, 10**self.log_mhigh, 7.5, n,
self.plaw_index, a_wdm=2., b_wdm=0.5, c_wdm=-1.3)
m = self.func_wdm.draw()
npt.assert_almost_equal(np.sum(m)/mtheory, 1, 2)
def test_number_of_halos(self):
n_model = self.func_cdm._nhalos_mean_unbroken
ntheory = self.norm * 4.407e-6
npt.assert_almost_equal(n_model/ntheory, 1, 5)
if __name__ == '__main__':
pytest.main()
| [
"numpy.sum",
"numpy.testing.assert_almost_equal",
"pyHalo.Rendering.MassFunctions.mass_function_utilities.integrate_power_law_analytic",
"pyHalo.Rendering.MassFunctions.mass_function_utilities.integrate_power_law_quad",
"pytest.main",
"pyHalo.Rendering.MassFunctions.power_law.GeneralPowerLaw"
] | [((1791, 1804), 'pytest.main', 'pytest.main', ([], {}), '()\n', (1802, 1804), False, 'import pytest\n'), ((451, 616), 'pyHalo.Rendering.MassFunctions.power_law.GeneralPowerLaw', 'GeneralPowerLaw', (['self.log_mlow', 'self.log_mhigh', 'self.plaw_index'], {'draw_poisson': '(False)', 'normalization': 'self.norm', 'log_mc': 'None', 'a_wdm': 'None', 'b_wdm': 'None', 'c_wdm': 'None'}), '(self.log_mlow, self.log_mhigh, self.plaw_index,\n draw_poisson=False, normalization=self.norm, log_mc=None, a_wdm=None,\n b_wdm=None, c_wdm=None)\n', (466, 616), False, 'from pyHalo.Rendering.MassFunctions.power_law import GeneralPowerLaw\n'), ((714, 876), 'pyHalo.Rendering.MassFunctions.power_law.GeneralPowerLaw', 'GeneralPowerLaw', (['self.log_mlow', 'self.log_mhigh', 'self.plaw_index'], {'draw_poisson': '(False)', 'normalization': 'self.norm', 'log_mc': '(7.5)', 'a_wdm': '(2.0)', 'b_wdm': '(0.5)', 'c_wdm': '(-1.3)'}), '(self.log_mlow, self.log_mhigh, self.plaw_index,\n draw_poisson=False, normalization=self.norm, log_mc=7.5, a_wdm=2.0,\n b_wdm=0.5, c_wdm=-1.3)\n', (729, 876), False, 'from pyHalo.Rendering.MassFunctions.power_law import GeneralPowerLaw\n'), ((1011, 1118), 'pyHalo.Rendering.MassFunctions.mass_function_utilities.integrate_power_law_analytic', 'integrate_power_law_analytic', (['self.norm', '(10 ** self.log_mlow)', '(10 ** self.log_mhigh)', 'n', 'self.plaw_index'], {}), '(self.norm, 10 ** self.log_mlow, 10 ** self.\n log_mhigh, n, self.plaw_index)\n', (1039, 1118), False, 'from pyHalo.Rendering.MassFunctions.mass_function_utilities import integrate_power_law_quad, integrate_power_law_analytic\n'), ((1306, 1448), 'pyHalo.Rendering.MassFunctions.mass_function_utilities.integrate_power_law_quad', 'integrate_power_law_quad', (['self.norm', '(10 ** self.log_mlow)', '(10 ** self.log_mhigh)', '(7.5)', 'n', 'self.plaw_index'], {'a_wdm': '(2.0)', 'b_wdm': '(0.5)', 'c_wdm': '(-1.3)'}), '(self.norm, 10 ** self.log_mlow, 10 ** self.\n log_mhigh, 7.5, n, self.plaw_index, a_wdm=2.0, b_wdm=0.5, c_wdm=-1.3)\n', (1330, 1448), False, 'from pyHalo.Rendering.MassFunctions.mass_function_utilities import integrate_power_law_quad, integrate_power_law_analytic\n'), ((1711, 1759), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['(n_model / ntheory)', '(1)', '(5)'], {}), '(n_model / ntheory, 1, 5)\n', (1734, 1759), True, 'import numpy.testing as npt\n'), ((1218, 1227), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1224, 1227), True, 'import numpy as np\n'), ((1547, 1556), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1553, 1556), True, 'import numpy as np\n')] |
import os
import sys
import pandas as pd
import numpy as np
import lightfm
import scipy.sparse as sps
import scipy.sparse.linalg as splinalg
threads = 10
for i in range(1, 14):
print("running batch %d" % i)
batch = pd.read_csv("batches/batch_%d_train.dat" % i)
test_users = pd.read_csv("batches/batch_%d_test.dat" % i)
model = lightfm.LightFM(
loss='warp',
no_components=10,
learning_rate=0.05,
learning_schedule="adadelta"
)
maxover = batch.groupby('user').item.count().max()
topk = 100
def get_ranklists(model, users, items, test):
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(threads)
def predu(i):
scores = model.predict(
i,
items,
num_threads=1
)
return items[np.argsort(scores)[-(topk + maxover):][::-1]]
preds = list(executor.map(predu, users))
lists = pd.DataFrame({
'user': np.repeat(users, topk + maxover),
'item': np.ndarray.flatten(np.array(preds)),
'pos': np.tile(np.arange(topk + maxover) + 1, len(users))
})
return lists
uim_train = sps.coo_matrix((np.ones(len(batch)), tuple(zip(*batch[['user', 'item']].values))))
model = model.fit_partial(
uim_train,
epochs=5,
num_threads=threads,
verbose=False
)
real_test_users = test_users.user[test_users.user.isin(batch.user.unique())].values
ranklists = get_ranklists(model, real_test_users, batch.item.unique(), None)
# filtering seen items
ranklists_j = ranklists.join(batch.set_index(['user', 'item'])['score'], on=['user', 'item'])
ranklists_j_new = ranklists_j[ranklists_j['score'].isnull()].copy()
ranklists_j_new.rename(columns={'pos': 'oldpos'}, inplace=True)
ranklists_j_new.sort_values('oldpos', inplace=True)
ranklists_j_new['pos'] = 1
ranklists_j_new['pos'] = ranklists_j_new.groupby('user').pos.transform(np.cumsum)
ranklists_j_new[ranklists_j_new.pos <= 100][['user', 'item', 'pos']].sort_values(['user','pos']).to_csv('batches/batch_%d_predictions.dat' % i, sep=' ', header=False, index=False) | [
"pandas.read_csv",
"lightfm.LightFM",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"numpy.repeat"
] | [((225, 270), 'pandas.read_csv', 'pd.read_csv', (["('batches/batch_%d_train.dat' % i)"], {}), "('batches/batch_%d_train.dat' % i)\n", (236, 270), True, 'import pandas as pd\n'), ((288, 332), 'pandas.read_csv', 'pd.read_csv', (["('batches/batch_%d_test.dat' % i)"], {}), "('batches/batch_%d_test.dat' % i)\n", (299, 332), True, 'import pandas as pd\n'), ((346, 446), 'lightfm.LightFM', 'lightfm.LightFM', ([], {'loss': '"""warp"""', 'no_components': '(10)', 'learning_rate': '(0.05)', 'learning_schedule': '"""adadelta"""'}), "(loss='warp', no_components=10, learning_rate=0.05,\n learning_schedule='adadelta')\n", (361, 446), False, 'import lightfm\n'), ((1019, 1051), 'numpy.repeat', 'np.repeat', (['users', '(topk + maxover)'], {}), '(users, topk + maxover)\n', (1028, 1051), True, 'import numpy as np\n'), ((1092, 1107), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (1100, 1107), True, 'import numpy as np\n'), ((872, 890), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (882, 890), True, 'import numpy as np\n'), ((1137, 1162), 'numpy.arange', 'np.arange', (['(topk + maxover)'], {}), '(topk + maxover)\n', (1146, 1162), True, 'import numpy as np\n')] |
import cPickle as pickle
import os
"""
This module contains the methods for get and parsing data from mnist datase
originally the mnist dataset has different dimensions that we used in the system
because we have needed to adapt it
This code is based in a Martin Thoma tutorial
https://martin-thoma.com/classify-mnist-with-pybrain/#tocAnchor-1-1
"""
__author__ = 'rbalda'
from image_processing import crop_image, invert_color,resize, apply_threshold, generate_pattern
from struct import unpack
import gzip
import matplotlib.pylab as plt
from numpy import zeros, uint8
def get_labeled_data(imagefile, labelfile,database):
"""Read input-vector (image) and target class (label, 0-9) and return
it as list of tuples.
"""
if os.path.isfile('%s.pickle' % database):
data = pickle.load(open('%s.pickle' % database))
else:
# Open the images with gzip in read binary mode
images = gzip.open(imagefile, 'rb')
labels = gzip.open(labelfile, 'rb')
# Read the binary data
# We have to get big endian unsigned int. So we need '>I'
# Get metadata for images
images.read(4) # skip the magic_number
number_of_images = images.read(4)
number_of_images = unpack('>I', number_of_images)[0]
rows = images.read(4)
rows = unpack('>I', rows)[0]
cols = images.read(4)
cols = unpack('>I', cols)[0]
# Get metadata for labels
labels.read(4) # skip the magic_number
N = labels.read(4)
N = unpack('>I', N)[0]
if number_of_images != N:
raise Exception('The number of labels did not match '
'the number of images.')
# Get the data
x = zeros((rows, cols), dtype=uint8) # Initialize numpy array
y = zeros((N, 1), dtype=uint8) # Initialize numpy array
w = zeros((N,400),dtype=uint8)
for i in range(N):
if i % 1000 == 0:
print("i: %i" % i)
for row in range(rows):
for col in range(cols):
tmp_pixel = images.read(1) # Just a single byte
tmp_pixel = unpack('>B', tmp_pixel)[0]
x[row][col] = (tmp_pixel)
z = resize(crop_image(invert_color(apply_threshold(x))))
w[i] = generate_pattern(z)
x = zeros((rows, cols), dtype=uint8)
tmp_label = labels.read(1)
y[i]=unpack('>B',tmp_label)[0]
data = {'data': w, 'label': y}
pickle.dump(data, open("%s.pickle" % database, "wb"))
return data
def view_image(image, label=""):
"""
View a single image.
:param image:
:param label:
"""
print("Label: %s" % label)
plt.imshow(image,cmap=plt.cm.gray)
plt.show() | [
"gzip.open",
"matplotlib.pylab.imshow",
"struct.unpack",
"numpy.zeros",
"image_processing.generate_pattern",
"os.path.isfile",
"image_processing.apply_threshold",
"matplotlib.pylab.show"
] | [((744, 782), 'os.path.isfile', 'os.path.isfile', (["('%s.pickle' % database)"], {}), "('%s.pickle' % database)\n", (758, 782), False, 'import os\n'), ((2754, 2789), 'matplotlib.pylab.imshow', 'plt.imshow', (['image'], {'cmap': 'plt.cm.gray'}), '(image, cmap=plt.cm.gray)\n', (2764, 2789), True, 'import matplotlib.pylab as plt\n'), ((2793, 2803), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2801, 2803), True, 'import matplotlib.pylab as plt\n'), ((924, 950), 'gzip.open', 'gzip.open', (['imagefile', '"""rb"""'], {}), "(imagefile, 'rb')\n", (933, 950), False, 'import gzip\n'), ((968, 994), 'gzip.open', 'gzip.open', (['labelfile', '"""rb"""'], {}), "(labelfile, 'rb')\n", (977, 994), False, 'import gzip\n'), ((1745, 1777), 'numpy.zeros', 'zeros', (['(rows, cols)'], {'dtype': 'uint8'}), '((rows, cols), dtype=uint8)\n', (1750, 1777), False, 'from numpy import zeros, uint8\n'), ((1816, 1842), 'numpy.zeros', 'zeros', (['(N, 1)'], {'dtype': 'uint8'}), '((N, 1), dtype=uint8)\n', (1821, 1842), False, 'from numpy import zeros, uint8\n'), ((1882, 1910), 'numpy.zeros', 'zeros', (['(N, 400)'], {'dtype': 'uint8'}), '((N, 400), dtype=uint8)\n', (1887, 1910), False, 'from numpy import zeros, uint8\n'), ((1246, 1276), 'struct.unpack', 'unpack', (['""">I"""', 'number_of_images'], {}), "('>I', number_of_images)\n", (1252, 1276), False, 'from struct import unpack\n'), ((1325, 1343), 'struct.unpack', 'unpack', (['""">I"""', 'rows'], {}), "('>I', rows)\n", (1331, 1343), False, 'from struct import unpack\n'), ((1392, 1410), 'struct.unpack', 'unpack', (['""">I"""', 'cols'], {}), "('>I', cols)\n", (1398, 1410), False, 'from struct import unpack\n'), ((1536, 1551), 'struct.unpack', 'unpack', (['""">I"""', 'N'], {}), "('>I', N)\n", (1542, 1551), False, 'from struct import unpack\n'), ((2339, 2358), 'image_processing.generate_pattern', 'generate_pattern', (['z'], {}), '(z)\n', (2355, 2358), False, 'from image_processing import crop_image, invert_color, resize, apply_threshold, generate_pattern\n'), ((2375, 2407), 'numpy.zeros', 'zeros', (['(rows, cols)'], {'dtype': 'uint8'}), '((rows, cols), dtype=uint8)\n', (2380, 2407), False, 'from numpy import zeros, uint8\n'), ((2464, 2487), 'struct.unpack', 'unpack', (['""">B"""', 'tmp_label'], {}), "('>B', tmp_label)\n", (2470, 2487), False, 'from struct import unpack\n'), ((2178, 2201), 'struct.unpack', 'unpack', (['""">B"""', 'tmp_pixel'], {}), "('>B', tmp_pixel)\n", (2184, 2201), False, 'from struct import unpack\n'), ((2298, 2316), 'image_processing.apply_threshold', 'apply_threshold', (['x'], {}), '(x)\n', (2313, 2316), False, 'from image_processing import crop_image, invert_color, resize, apply_threshold, generate_pattern\n')] |
import numpy as np
import boto3
from moto import mock_s3
import pytest
import os
from .S3_image_functions import S3Images
from PIL import Image
from PIL import ImageChops
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
@pytest.fixture(scope="function")
def s3(aws_credentials):
with mock_s3():
yield boto3.client("s3", region_name="ap-southeast-2")
def test_ImagesS3(s3):
size = (200, 200)
color = (255, 0, 0, 0)
img = Image.new("RGB", size, color)
images = S3Images()
region = "ap-southeast-2"
location = {'LocationConstraint': region}
s3.create_bucket(Bucket="testbucket", CreateBucketConfiguration =location)
images.to_s3(img= img,bucket = "testbucket", key = 'testkey.png')
# compare downloaded image to original image as arrays (as comparing
# images is quite difficult).
dl_image = images.from_s3('testbucket', 'testkey.png')
assert (np.array(img) == np.array(dl_image)).all()
| [
"PIL.Image.new",
"boto3.client",
"pytest.fixture",
"numpy.array",
"moto.mock_s3"
] | [((173, 205), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (187, 205), False, 'import pytest\n'), ((472, 504), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (486, 504), False, 'import pytest\n'), ((697, 726), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', 'color'], {}), "('RGB', size, color)\n", (706, 726), False, 'from PIL import Image\n'), ((539, 548), 'moto.mock_s3', 'mock_s3', ([], {}), '()\n', (546, 548), False, 'from moto import mock_s3\n'), ((564, 612), 'boto3.client', 'boto3.client', (['"""s3"""'], {'region_name': '"""ap-southeast-2"""'}), "('s3', region_name='ap-southeast-2')\n", (576, 612), False, 'import boto3\n'), ((1158, 1171), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1166, 1171), True, 'import numpy as np\n'), ((1175, 1193), 'numpy.array', 'np.array', (['dl_image'], {}), '(dl_image)\n', (1183, 1193), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import numpy as np
import time
from pycocotools.cocoeval import COCOeval
from collections import defaultdict
from detectron2 import _C
class COCOeval_opt(COCOeval):
"""
This is a slightly modified version of the original COCO API, where the functions evaluateImg()
and accumulate() are implemented in C++ to speedup evaluation
"""
def evaluate(self):
"""
Run per image evaluation on given images and store results in self.evalImgs_cpp, a
datastructure that isn't readable from Python but is used by a c++ implementation of
accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
self.evalImgs because this datastructure is a computational bottleneck.
:return: None
"""
tic = time.time()
print("Running per image evaluation...")
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
print("useSegm (deprecated) is not None. Running {} evaluation".format(p.iouType))
print("Evaluate annotation type *{}*".format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
# p.useCats = 0
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
# modify the catIds and imgIds for 7-class smd detection
"""p.catIds = [1, 2, 3, 4, 5, 6, 7]
img_ids = []
for i, cat_id in enumerate(p.catIds):
if i == 0 and len(img_ids) == 0:
img_ids = set(self.cocoGt.getImgIds(catIds=i))
else:
img_ids |= set(self.cocoGt.getImgIds(catIds=[i]))
p.imgIds = sorted(img_ids)"""
# p.imgIds = sorted(self.cocoGt.getImgIds(catIds=p.catIds))
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == "segm" or p.iouType == "bbox":
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
}
maxDet = p.maxDets[-1]
# <<<< Beginning of code differences with original COCO API
def convert_instances_to_cpp(instances, is_det=False):
# Convert annotations for a list of instances in an image to a format that's fast
# to access in C++
instances_cpp = []
for instance in instances:
instance_cpp = _C.InstanceAnnotation(
int(instance["id"]),
instance["score"] if is_det else instance.get("score", 0.0),
instance["area"],
bool(instance.get("iscrowd", 0)),
bool(instance.get("ignore", 0)),
)
instances_cpp.append(instance_cpp)
return instances_cpp
# Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
ground_truth_instances = [
[convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
for imgId in p.imgIds
]
detected_instances = [
[convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds]
for imgId in p.imgIds
]
ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
if not p.useCats:
# For each image, flatten per-category lists into a single list
ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances]
detected_instances = [[[o for c in i for o in c]] for i in detected_instances]
p.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
# Call C++ implementation of self.evaluateImgs()
self._evalImgs_cpp = _C.COCOevalEvaluateImages(
p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances
)
self._evalImgs = None
self.params.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
# >>>> End of code differences with original COCO API
def accumulate(self):
"""
Accumulate per image evaluation results and store the result in self.eval. Does not
support changing parameter settings from those used by self.evaluate()
"""
print("Accumulating evaluation results...")
tic = time.time()
if not hasattr(self, "_evalImgs_cpp"):
print("Please run evaluate() first")
self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
# recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
self.eval["recall"] = np.array(self.eval["recall"]).reshape(
self.eval["counts"][:1] + self.eval["counts"][2:]
)
# precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
# num_area_ranges X num_max_detections
self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"])
self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
toc = time.time()
print("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic))
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap == 1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((14,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, iouThr=.5, maxDets=self.params.maxDets[2])
stats[7] = _summarize(0, iouThr=.75, maxDets=self.params.maxDets[2])
stats[8] = _summarize(0, maxDets=self.params.maxDets[0])
stats[9] = _summarize(0, maxDets=self.params.maxDets[1])
stats[10] = _summarize(0, maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
stats[12] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
stats[13] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
stats[4] = _summarize(1, maxDets=20, areaRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
stats[9] = _summarize(0, maxDets=20, areaRng='large')
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType == 'segm' or iouType == 'bbox':
summarize = _summarizeDets
elif iouType == 'keypoints':
summarize = _summarizeKps
self.stats = summarize()
def __str__(self):
self.summarize()
| [
"copy.deepcopy",
"detectron2._C.COCOevalEvaluateImages",
"numpy.zeros",
"time.time",
"numpy.mean",
"numpy.array",
"numpy.where",
"detectron2._C.COCOevalAccumulate",
"numpy.round",
"numpy.unique"
] | [((877, 888), 'time.time', 'time.time', ([], {}), '()\n', (886, 888), False, 'import time\n'), ((4189, 4298), 'detectron2._C.COCOevalEvaluateImages', '_C.COCOevalEvaluateImages', (['p.areaRng', 'maxDet', 'p.iouThrs', 'ious', 'ground_truth_instances', 'detected_instances'], {}), '(p.areaRng, maxDet, p.iouThrs, ious,\n ground_truth_instances, detected_instances)\n', (4214, 4298), False, 'from detectron2 import _C\n'), ((4481, 4507), 'copy.deepcopy', 'copy.deepcopy', (['self.params'], {}), '(self.params)\n', (4494, 4507), False, 'import copy\n'), ((4522, 4533), 'time.time', 'time.time', ([], {}), '()\n', (4531, 4533), False, 'import time\n'), ((4973, 4984), 'time.time', 'time.time', ([], {}), '()\n', (4982, 4984), False, 'import time\n'), ((5102, 5161), 'detectron2._C.COCOevalAccumulate', '_C.COCOevalAccumulate', (['self._paramsEval', 'self._evalImgs_cpp'], {}), '(self._paramsEval, self._evalImgs_cpp)\n', (5123, 5161), False, 'from detectron2 import _C\n'), ((5742, 5753), 'time.time', 'time.time', ([], {}), '()\n', (5751, 5753), False, 'import time\n'), ((1313, 1332), 'numpy.unique', 'np.unique', (['p.imgIds'], {}), '(p.imgIds)\n', (1322, 1332), True, 'import numpy as np\n'), ((7554, 7569), 'numpy.zeros', 'np.zeros', (['(14,)'], {}), '((14,))\n', (7562, 7569), True, 'import numpy as np\n'), ((8733, 8748), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (8741, 8748), True, 'import numpy as np\n'), ((1408, 1427), 'numpy.unique', 'np.unique', (['p.catIds'], {}), '(p.catIds)\n', (1417, 1427), True, 'import numpy as np\n'), ((5288, 5317), 'numpy.array', 'np.array', (["self.eval['recall']"], {}), "(self.eval['recall'])\n", (5296, 5317), True, 'import numpy as np\n'), ((5577, 5609), 'numpy.array', 'np.array', (["self.eval['precision']"], {}), "(self.eval['precision'])\n", (5585, 5609), True, 'import numpy as np\n'), ((5669, 5698), 'numpy.array', 'np.array', (["self.eval['scores']"], {}), "(self.eval['scores'])\n", (5677, 5698), True, 'import numpy as np\n'), ((7374, 7392), 'numpy.mean', 'np.mean', (['s[s > -1]'], {}), '(s[s > -1])\n', (7381, 7392), True, 'import numpy as np\n'), ((4053, 4082), 'numpy.round', 'np.round', (['((0.95 - 0.5) / 0.05)'], {}), '((0.95 - 0.5) / 0.05)\n', (4061, 4082), True, 'import numpy as np\n'), ((4404, 4433), 'numpy.round', 'np.round', (['((0.95 - 0.5) / 0.05)'], {}), '((0.95 - 0.5) / 0.05)\n', (4412, 4433), True, 'import numpy as np\n'), ((6890, 6919), 'numpy.where', 'np.where', (['(iouThr == p.iouThrs)'], {}), '(iouThr == p.iouThrs)\n', (6898, 6919), True, 'import numpy as np\n'), ((7165, 7194), 'numpy.where', 'np.where', (['(iouThr == p.iouThrs)'], {}), '(iouThr == p.iouThrs)\n', (7173, 7194), True, 'import numpy as np\n')] |
"""Unit tests for machine_learning_utils.py."""
import copy
import unittest
import numpy
import pandas
from gewittergefahr.gg_utils import nwp_model_utils
from generalexam.ge_utils import front_utils
from generalexam.machine_learning import machine_learning_utils as ml_utils
TOLERANCE = 1e-6
TOLERANCE_FOR_CLASS_WEIGHT = 1e-3
# The following constants are used to test _check_full_narr_matrix.
NUM_ROWS_IN_NARR, NUM_COLUMNS_IN_NARR = nwp_model_utils.get_grid_dimensions(
model_name=nwp_model_utils.NARR_MODEL_NAME)
FULL_NARR_MATRIX_2D = numpy.random.uniform(
low=0., high=1., size=(NUM_ROWS_IN_NARR, NUM_COLUMNS_IN_NARR))
FULL_NARR_MATRIX_3D = numpy.stack(
(FULL_NARR_MATRIX_2D, FULL_NARR_MATRIX_2D), axis=0)
FULL_NARR_MATRIX_4D = numpy.stack(
(FULL_NARR_MATRIX_3D, FULL_NARR_MATRIX_3D), axis=-1)
FULL_NARR_MATRIX_5D = numpy.stack(
(FULL_NARR_MATRIX_4D, FULL_NARR_MATRIX_4D), axis=-1)
# The following constants are used to test _check_predictor_matrix.
PREDICTOR_MATRIX_1D = numpy.array([1, 2, 3, 4], dtype=numpy.float32)
PREDICTOR_MATRIX_2D = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=numpy.float32)
TUPLE_OF_2D_PREDICTOR_MATRICES = (PREDICTOR_MATRIX_2D, PREDICTOR_MATRIX_2D)
PREDICTOR_MATRIX_3D = numpy.stack(TUPLE_OF_2D_PREDICTOR_MATRICES, axis=0)
PREDICTOR_MATRIX_3D[0, 0, 0] = numpy.nan
TUPLE_OF_3D_PREDICTOR_MATRICES = (
PREDICTOR_MATRIX_3D, PREDICTOR_MATRIX_3D, PREDICTOR_MATRIX_3D,
PREDICTOR_MATRIX_3D, PREDICTOR_MATRIX_3D, PREDICTOR_MATRIX_3D)
PREDICTOR_MATRIX_4D = numpy.stack(TUPLE_OF_3D_PREDICTOR_MATRICES, axis=-1)
TUPLE_OF_4D_PREDICTOR_MATRICES = (
PREDICTOR_MATRIX_4D, PREDICTOR_MATRIX_4D, PREDICTOR_MATRIX_4D,
PREDICTOR_MATRIX_4D, PREDICTOR_MATRIX_4D)
PREDICTOR_MATRIX_5D = numpy.stack(TUPLE_OF_4D_PREDICTOR_MATRICES, axis=-2)
# The following constants are used to test _check_target_matrix.
TARGET_VALUES_BINARY_1D = numpy.array([0, 1, 1, 0], dtype=int)
TARGET_VALUES_TERNARY_1D = numpy.array([0, 2, 1, 0], dtype=int)
TARGET_VALUES_BINARY_2D = numpy.array([[0, 1, 1, 0],
[1, 0, 0, 1]], dtype=int)
TARGET_VALUES_BINARY_3D = numpy.stack(
(TARGET_VALUES_BINARY_2D, TARGET_VALUES_BINARY_2D), axis=0)
# The following constants are used to test _downsize_predictor_images.
PRE_DOWNSIZED_MATRIX = numpy.array([[1, 2, 3, 4, 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14, 15, 16, 17, 18],
[19, 20, 21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 31, 32, 33, 34, 35, 36],
[37, 38, 39, 40, 41, 42, 43, 44, 45],
[46, 47, 48, 49, 50, 51, 52, 53, 54],
[55, 56, 57, 58, 59, 60, 61, 62, 63]],
dtype=numpy.float32)
DOWNSIZING_CENTER_ROW_TOP_LEFT = 1
DOWNSIZING_CENTER_COLUMN_TOP_LEFT = 1
DOWNSIZING_CENTER_ROW_BOTTOM_LEFT = 5
DOWNSIZING_CENTER_COLUMN_BOTTOM_LEFT = 1
DOWNSIZING_CENTER_ROW_TOP_RIGHT = 1
DOWNSIZING_CENTER_COLUMN_TOP_RIGHT = 7
DOWNSIZING_CENTER_ROW_BOTTOM_RIGHT = 5
DOWNSIZING_CENTER_COLUMN_BOTTOM_RIGHT = 7
DOWNSIZING_CENTER_ROW_MIDDLE = 3
DOWNSIZING_CENTER_COLUMN_MIDDLE = 4
NUM_ROWS_IN_DOWNSIZED_HALF_GRID = 2
NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID = 3
DOWNSIZED_MATRIX_TOP_LEFT = numpy.array([[1, 1, 1, 2, 3, 4, 5],
[1, 1, 1, 2, 3, 4, 5],
[10, 10, 10, 11, 12, 13, 14],
[19, 19, 19, 20, 21, 22, 23],
[28, 28, 28, 29, 30, 31, 32]],
dtype=numpy.float32)
DOWNSIZED_MATRIX_BOTTOM_LEFT = numpy.array([[28, 28, 28, 29, 30, 31, 32],
[37, 37, 37, 38, 39, 40, 41],
[46, 46, 46, 47, 48, 49, 50],
[55, 55, 55, 56, 57, 58, 59],
[55, 55, 55, 56, 57, 58, 59]],
dtype=numpy.float32)
DOWNSIZED_MATRIX_TOP_RIGHT = numpy.array([[5, 6, 7, 8, 9, 9, 9],
[5, 6, 7, 8, 9, 9, 9],
[14, 15, 16, 17, 18, 18, 18],
[23, 24, 25, 26, 27, 27, 27],
[32, 33, 34, 35, 36, 36, 36]],
dtype=numpy.float32)
DOWNSIZED_MATRIX_BOTTOM_RIGHT = numpy.array([[32, 33, 34, 35, 36, 36, 36],
[41, 42, 43, 44, 45, 45, 45],
[50, 51, 52, 53, 54, 54, 54],
[59, 60, 61, 62, 63, 63, 63],
[59, 60, 61, 62, 63, 63, 63]],
dtype=numpy.float32)
DOWNSIZED_MATRIX_MIDDLE = numpy.array([[11, 12, 13, 14, 15, 16, 17],
[20, 21, 22, 23, 24, 25, 26],
[29, 30, 31, 32, 33, 34, 35],
[38, 39, 40, 41, 42, 43, 44],
[47, 48, 49, 50, 51, 52, 53]],
dtype=numpy.float32)
PRE_DOWNSIZED_MATRIX_3D = numpy.stack(
(PRE_DOWNSIZED_MATRIX, PRE_DOWNSIZED_MATRIX), axis=0)
PRE_DOWNSIZED_MATRIX_4D = numpy.stack(
(PRE_DOWNSIZED_MATRIX_3D, PRE_DOWNSIZED_MATRIX_3D), axis=-1)
PRE_DOWNSIZED_MATRIX_5D = numpy.stack(
(PRE_DOWNSIZED_MATRIX_4D, PRE_DOWNSIZED_MATRIX_4D), axis=-2)
DOWNSIZED_MATRIX_TOP_LEFT_3D = numpy.stack(
(DOWNSIZED_MATRIX_TOP_LEFT, DOWNSIZED_MATRIX_TOP_LEFT), axis=0)
DOWNSIZED_MATRIX_TOP_LEFT_4D = numpy.stack(
(DOWNSIZED_MATRIX_TOP_LEFT_3D, DOWNSIZED_MATRIX_TOP_LEFT_3D), axis=-1)
DOWNSIZED_MATRIX_TOP_LEFT_5D = numpy.stack(
(DOWNSIZED_MATRIX_TOP_LEFT_4D, DOWNSIZED_MATRIX_TOP_LEFT_4D), axis=-2)
DOWNSIZED_MATRIX_BOTTOM_LEFT_3D = numpy.stack(
(DOWNSIZED_MATRIX_BOTTOM_LEFT, DOWNSIZED_MATRIX_BOTTOM_LEFT), axis=0)
DOWNSIZED_MATRIX_BOTTOM_LEFT_4D = numpy.stack(
(DOWNSIZED_MATRIX_BOTTOM_LEFT_3D, DOWNSIZED_MATRIX_BOTTOM_LEFT_3D),
axis=-1)
DOWNSIZED_MATRIX_BOTTOM_LEFT_5D = numpy.stack(
(DOWNSIZED_MATRIX_BOTTOM_LEFT_4D, DOWNSIZED_MATRIX_BOTTOM_LEFT_4D),
axis=-2)
DOWNSIZED_MATRIX_TOP_RIGHT_3D = numpy.stack(
(DOWNSIZED_MATRIX_TOP_RIGHT, DOWNSIZED_MATRIX_TOP_RIGHT), axis=0)
DOWNSIZED_MATRIX_TOP_RIGHT_4D = numpy.stack(
(DOWNSIZED_MATRIX_TOP_RIGHT_3D, DOWNSIZED_MATRIX_TOP_RIGHT_3D), axis=-1)
DOWNSIZED_MATRIX_TOP_RIGHT_5D = numpy.stack(
(DOWNSIZED_MATRIX_TOP_RIGHT_4D, DOWNSIZED_MATRIX_TOP_RIGHT_4D), axis=-2)
DOWNSIZED_MATRIX_BOTTOM_RIGHT_3D = numpy.stack(
(DOWNSIZED_MATRIX_BOTTOM_RIGHT, DOWNSIZED_MATRIX_BOTTOM_RIGHT), axis=0)
DOWNSIZED_MATRIX_BOTTOM_RIGHT_4D = numpy.stack(
(DOWNSIZED_MATRIX_BOTTOM_RIGHT_3D, DOWNSIZED_MATRIX_BOTTOM_RIGHT_3D),
axis=-1)
DOWNSIZED_MATRIX_BOTTOM_RIGHT_5D = numpy.stack(
(DOWNSIZED_MATRIX_BOTTOM_RIGHT_4D, DOWNSIZED_MATRIX_BOTTOM_RIGHT_4D),
axis=-2)
DOWNSIZED_MATRIX_MIDDLE_3D = numpy.stack(
(DOWNSIZED_MATRIX_MIDDLE, DOWNSIZED_MATRIX_MIDDLE), axis=0)
DOWNSIZED_MATRIX_MIDDLE_4D = numpy.stack(
(DOWNSIZED_MATRIX_MIDDLE_3D, DOWNSIZED_MATRIX_MIDDLE_3D), axis=-1)
DOWNSIZED_MATRIX_MIDDLE_5D = numpy.stack(
(DOWNSIZED_MATRIX_MIDDLE_4D, DOWNSIZED_MATRIX_MIDDLE_4D), axis=-2)
# The following constants are used to test _class_fractions_to_num_points.
CLASS_FRACTIONS_BINARY = numpy.array([0.1, 0.9])
NUM_POINTS_AVAILABLE_LARGE = 17
NUM_POINTS_BY_CLASS_BINARY_LARGE = numpy.array([2, 15])
NUM_POINTS_BY_CLASS_TERNARY_LARGE = numpy.array([2, 3, 12])
CLASS_FRACTIONS_TERNARY = numpy.array([0.1, 0.2, 0.7])
NUM_POINTS_AVAILABLE_SMALL = 4
NUM_POINTS_BY_CLASS_BINARY_SMALL = numpy.array([1, 3])
NUM_POINTS_BY_CLASS_TERNARY_SMALL = numpy.array([1, 1, 2])
# The following constants are used to test get_class_weight_dict.
CLASS_WEIGHT_DICT_BINARY = {0: 0.9, 1: 0.1}
CLASS_WEIGHT_DICT_TERNARY = {0: 0.6087, 1: 0.3043, 2: 0.0870}
# The following constants are used to test normalize_predictors with
# normalization type = "minmax".
PRCTILE_OFFSET_FOR_NORMALIZATION = 0.
FIRST_PREDICTOR_MATRIX_2D = numpy.array(
[[0, 1, 2, 3],
[4, 5, 6, 7]], dtype=float
)
SECOND_PREDICTOR_MATRIX_2D = numpy.array(
[[2, 4, 6, numpy.nan],
[-1, -3, -5, -7]]
)
THIS_FIRST_MATRIX_3D = numpy.stack(
(FIRST_PREDICTOR_MATRIX_2D, FIRST_PREDICTOR_MATRIX_2D), axis=-1)
PREDICTOR_MATRIX_4D_DENORM = numpy.stack(
(THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D), axis=0)
THIS_SECOND_MATRIX_3D = numpy.stack(
(SECOND_PREDICTOR_MATRIX_2D, SECOND_PREDICTOR_MATRIX_2D), axis=-1)
THIS_SECOND_MATRIX_4D = numpy.stack(
(THIS_SECOND_MATRIX_3D, THIS_SECOND_MATRIX_3D), axis=0)
PREDICTOR_MATRIX_5D_DENORM = numpy.stack(
(PREDICTOR_MATRIX_4D_DENORM, THIS_SECOND_MATRIX_4D), axis=-2)
THIS_MIN = 0.
THIS_MAX_LESS_MIN = 7.
THIS_FIRST_MATRIX_3D = numpy.stack((
(FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN,
(FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN
), axis=-1)
PREDICTOR_MATRIX_4D_MINMAX_NORM = numpy.stack(
(THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D), axis=0)
THIS_MIN = -7.
THIS_MAX_LESS_MIN = 14.
THIS_FIRST_MATRIX_3D = numpy.stack((
(FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN,
(FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN
), axis=-1)
THIS_FIRST_MATRIX_4D = numpy.stack(
(THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D), axis=0)
THIS_SECOND_MATRIX_3D = numpy.stack((
(SECOND_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN,
(SECOND_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN
), axis=-1)
THIS_SECOND_MATRIX_4D = numpy.stack(
(THIS_SECOND_MATRIX_3D, THIS_SECOND_MATRIX_3D), axis=0)
PREDICTOR_MATRIX_5D_MINMAX_NORM = numpy.stack(
(THIS_FIRST_MATRIX_4D, THIS_SECOND_MATRIX_4D), axis=-2)
# The following constants are used to test normalize_predictors with
# normalization type = "z_score".
THIS_MEAN = numpy.mean(FIRST_PREDICTOR_MATRIX_2D)
THIS_STDEV = numpy.std(FIRST_PREDICTOR_MATRIX_2D, ddof=1)
THIS_FIRST_MATRIX_3D = numpy.stack((
(FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV,
(FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV
), axis=-1)
PREDICTOR_MATRIX_4D_Z_NORM = numpy.stack(
(THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D), axis=0)
ALL_PREDICTORS = numpy.stack(
(FIRST_PREDICTOR_MATRIX_2D, SECOND_PREDICTOR_MATRIX_2D), axis=-1)
THIS_MEAN = numpy.nanmean(ALL_PREDICTORS)
THIS_STDEV = numpy.nanstd(ALL_PREDICTORS, ddof=1)
THIS_FIRST_MATRIX_3D = numpy.stack((
(FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV,
(FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV
), axis=-1)
THIS_FIRST_MATRIX_4D = numpy.stack(
(THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D), axis=0)
THIS_SECOND_MATRIX_3D = numpy.stack((
(SECOND_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV,
(SECOND_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV
), axis=-1)
THIS_SECOND_MATRIX_4D = numpy.stack(
(THIS_SECOND_MATRIX_3D, THIS_SECOND_MATRIX_3D), axis=0)
PREDICTOR_MATRIX_5D_Z_NORM = numpy.stack(
(THIS_FIRST_MATRIX_4D, THIS_SECOND_MATRIX_4D), axis=-2)
# The following constants are used to test front_table_to_images.
NUM_GRID_ROWS = 6
NUM_GRID_COLUMNS = 8
THESE_WF_ROW_INDICES = [numpy.array([0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=int)]
THESE_WF_COLUMN_INDICES = [numpy.array([1, 2, 7, 2, 3, 4, 5, 6, 7], dtype=int)]
THESE_CF_ROW_INDICES = [numpy.array([1, 2, 3, 4, 4, 5], dtype=int)]
THESE_CF_COLUMN_INDICES = [numpy.array([1, 1, 1, 0, 1, 0], dtype=int)]
THIS_DICT = {
front_utils.WARM_FRONT_ROW_INDICES_COLUMN: THESE_WF_ROW_INDICES,
front_utils.WARM_FRONT_COLUMN_INDICES_COLUMN: THESE_WF_COLUMN_INDICES,
front_utils.COLD_FRONT_ROW_INDICES_COLUMN: THESE_CF_ROW_INDICES,
front_utils.COLD_FRONT_COLUMN_INDICES_COLUMN: THESE_CF_COLUMN_INDICES
}
FRONTAL_GRID_TABLE1 = pandas.DataFrame.from_dict(THIS_DICT)
THESE_WF_ROW_INDICES = [numpy.array([1, 1, 2, 2, 3, 3], dtype=int)]
THESE_WF_COLUMN_INDICES = [numpy.array([4, 5, 5, 6, 6, 7], dtype=int)]
THESE_CF_ROW_INDICES = [
numpy.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=int)]
THESE_CF_COLUMN_INDICES = [
numpy.array([2, 3, 1, 2, 0, 1, 0, 1, 0, 1, 1], dtype=int)]
THIS_DICT = {
front_utils.WARM_FRONT_ROW_INDICES_COLUMN: THESE_WF_ROW_INDICES,
front_utils.WARM_FRONT_COLUMN_INDICES_COLUMN: THESE_WF_COLUMN_INDICES,
front_utils.COLD_FRONT_ROW_INDICES_COLUMN: THESE_CF_ROW_INDICES,
front_utils.COLD_FRONT_COLUMN_INDICES_COLUMN: THESE_CF_COLUMN_INDICES
}
FRONTAL_GRID_TABLE2 = pandas.DataFrame.from_dict(THIS_DICT)
FRONTAL_GRID_TABLE = pandas.concat(
[FRONTAL_GRID_TABLE1, FRONTAL_GRID_TABLE2], axis=0, ignore_index=True)
THIS_FIRST_MATRIX = numpy.array([[0, 1, 1, 0, 0, 0, 0, 1],
[0, 2, 1, 1, 1, 1, 1, 1],
[0, 2, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0, 0],
[2, 2, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0]])
THIS_SECOND_MATRIX = numpy.array([[0, 0, 2, 2, 0, 0, 0, 0],
[0, 2, 2, 0, 1, 1, 0, 0],
[2, 2, 0, 0, 0, 1, 1, 0],
[2, 2, 0, 0, 0, 0, 1, 1],
[2, 2, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0, 0]])
FRONTAL_GRID_MATRIX_TERNARY = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0).astype(int)
# The following constants are used to test binarize_front_images.
THIS_FIRST_MATRIX = numpy.array([[0, 1, 1, 0, 0, 0, 0, 1],
[0, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0]])
THIS_SECOND_MATRIX = numpy.array([[0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0]])
FRONTAL_GRID_MATRIX_BINARY = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0).astype(int)
# The following constants are used to test sample_target_points with 2 classes.
NUM_POINTS_TO_SAMPLE = 50
CLASS_FRACTIONS_FOR_BINARY_SAMPLING = numpy.array([0.5, 0.5])
MASK_MATRIX = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype=int)
NEGATIVE_ROWS_TIME1_NO_MASK = numpy.array(
[0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4],
dtype=int)
NEGATIVE_COLUMNS_TIME1_NO_MASK = numpy.array(
[0, 3, 4, 5, 6, 0, 0, 2, 3, 4, 5, 6, 7, 0, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6],
dtype=int)
POSITIVE_ROWS_TIME1_NO_MASK = numpy.array(
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 4, 5], dtype=int)
POSITIVE_COLUMNS_TIME1_NO_MASK = numpy.array(
[1, 2, 7, 1, 2, 3, 4, 5, 6, 7, 1, 1, 0, 1, 0], dtype=int)
ROW_INDICES_TIME1_NO_MASK = numpy.concatenate((
NEGATIVE_ROWS_TIME1_NO_MASK, POSITIVE_ROWS_TIME1_NO_MASK)).astype(int)
COLUMN_INDICES_TIME1_NO_MASK = numpy.concatenate((
NEGATIVE_COLUMNS_TIME1_NO_MASK, POSITIVE_COLUMNS_TIME1_NO_MASK)).astype(int)
NEGATIVE_ROWS_TIME1_WITH_MASK = numpy.array(
[2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4], dtype=int)
NEGATIVE_COLUMNS_TIME1_WITH_MASK = numpy.array(
[2, 3, 4, 5, 6, 2, 3, 4, 5, 6, 2, 3, 4, 5, 6], dtype=int)
POSITIVE_ROWS_TIME1_WITH_MASK = numpy.array(
[1, 1, 1, 1, 1, 1, 2, 3, 4], dtype=int)
POSITIVE_COLUMNS_TIME1_WITH_MASK = numpy.array(
[1, 2, 3, 4, 5, 6, 1, 1, 1], dtype=int)
ROW_INDICES_TIME1_WITH_MASK = numpy.concatenate((
NEGATIVE_ROWS_TIME1_WITH_MASK, POSITIVE_ROWS_TIME1_WITH_MASK)).astype(int)
COLUMN_INDICES_TIME1_WITH_MASK = numpy.concatenate((
NEGATIVE_COLUMNS_TIME1_WITH_MASK, POSITIVE_COLUMNS_TIME1_WITH_MASK
)).astype(int)
NEGATIVE_ROWS_TIME2_NO_MASK = numpy.array([], dtype=int)
NEGATIVE_COLUMNS_TIME2_NO_MASK = numpy.array([], dtype=int)
POSITIVE_ROWS_TIME2_NO_MASK = numpy.array(
[0, 0, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
POSITIVE_COLUMNS_TIME2_NO_MASK = numpy.array(
[2, 3, 1, 2, 4, 5, 0, 1, 5, 6], dtype=int)
ROW_INDICES_TIME2_NO_MASK = numpy.concatenate((
NEGATIVE_ROWS_TIME2_NO_MASK, POSITIVE_ROWS_TIME2_NO_MASK)).astype(int)
COLUMN_INDICES_TIME2_NO_MASK = numpy.concatenate((
NEGATIVE_COLUMNS_TIME2_NO_MASK, POSITIVE_COLUMNS_TIME2_NO_MASK)).astype(int)
NEGATIVE_ROWS_TIME2_WITH_MASK = numpy.array([1, 1, 2, 2], dtype=int)
NEGATIVE_COLUMNS_TIME2_WITH_MASK = numpy.array([3, 6, 2, 3], dtype=int)
POSITIVE_ROWS_TIME2_WITH_MASK = numpy.array(
[1, 1, 1, 1, 2, 2, 2, 3, 3, 4], dtype=int)
POSITIVE_COLUMNS_TIME2_WITH_MASK = numpy.array(
[1, 2, 4, 5, 1, 5, 6, 1, 6, 1], dtype=int)
ROW_INDICES_TIME2_WITH_MASK = numpy.concatenate((
NEGATIVE_ROWS_TIME2_WITH_MASK, POSITIVE_ROWS_TIME2_WITH_MASK)).astype(int)
COLUMN_INDICES_TIME2_WITH_MASK = numpy.concatenate((
NEGATIVE_COLUMNS_TIME2_WITH_MASK, POSITIVE_COLUMNS_TIME2_WITH_MASK
)).astype(int)
TARGET_POINT_DICT_BINARY_NO_MASK = {
ml_utils.ROW_INDICES_BY_TIME_KEY:
[ROW_INDICES_TIME1_NO_MASK, ROW_INDICES_TIME2_NO_MASK],
ml_utils.COLUMN_INDICES_BY_TIME_KEY:
[COLUMN_INDICES_TIME1_NO_MASK, COLUMN_INDICES_TIME2_NO_MASK]
}
TARGET_POINT_DICT_BINARY_WITH_MASK = {
ml_utils.ROW_INDICES_BY_TIME_KEY:
[ROW_INDICES_TIME1_WITH_MASK, ROW_INDICES_TIME2_WITH_MASK],
ml_utils.COLUMN_INDICES_BY_TIME_KEY:
[COLUMN_INDICES_TIME1_WITH_MASK, COLUMN_INDICES_TIME2_WITH_MASK]
}
# The following constants are used to test sample_target_points with 3 classes.
CLASS_FRACTIONS_FOR_TERNARY_SAMPLING = numpy.array([0.5, 0.2, 0.3])
NEGATIVE_ROWS_TIME1_NO_MASK = numpy.array(
[0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4],
dtype=int)
NEGATIVE_COLUMNS_TIME1_NO_MASK = numpy.array(
[0, 3, 4, 5, 6, 0, 0, 2, 3, 4, 5, 6, 7, 0, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6],
dtype=int)
NEGATIVE_ROWS_TIME2_NO_MASK = numpy.array([], dtype=int)
NEGATIVE_COLUMNS_TIME2_NO_MASK = numpy.array([], dtype=int)
WARM_FRONT_ROWS_TIME1_NO_MASK = numpy.array(
[0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=int)
WARM_FRONT_COLUMNS_TIME1_NO_MASK = numpy.array(
[1, 2, 7, 2, 3, 4, 5, 6, 7], dtype=int)
WARM_FRONT_ROWS_TIME2_NO_MASK = numpy.array([1], dtype=int)
WARM_FRONT_COLUMNS_TIME2_NO_MASK = numpy.array([4], dtype=int)
COLD_FRONT_ROWS_TIME1_NO_MASK = numpy.array([1, 2, 3, 4, 4, 5], dtype=int)
COLD_FRONT_COLUMNS_TIME1_NO_MASK = numpy.array([1, 1, 1, 0, 1, 0], dtype=int)
COLD_FRONT_ROWS_TIME2_NO_MASK = numpy.array(
[0, 0, 1, 1, 2, 2, 3, 3, 4], dtype=int)
COLD_FRONT_COLUMNS_TIME2_NO_MASK = numpy.array(
[2, 3, 1, 2, 0, 1, 0, 1, 0], dtype=int)
ROW_INDICES_TIME1_NO_MASK = numpy.concatenate((
NEGATIVE_ROWS_TIME1_NO_MASK, WARM_FRONT_ROWS_TIME1_NO_MASK,
COLD_FRONT_ROWS_TIME1_NO_MASK
)).astype(int)
COLUMN_INDICES_TIME1_NO_MASK = numpy.concatenate((
NEGATIVE_COLUMNS_TIME1_NO_MASK, WARM_FRONT_COLUMNS_TIME1_NO_MASK,
COLD_FRONT_COLUMNS_TIME1_NO_MASK
)).astype(int)
ROW_INDICES_TIME2_NO_MASK = numpy.concatenate((
NEGATIVE_ROWS_TIME2_NO_MASK, WARM_FRONT_ROWS_TIME2_NO_MASK,
COLD_FRONT_ROWS_TIME2_NO_MASK
)).astype(int)
COLUMN_INDICES_TIME2_NO_MASK = numpy.concatenate((
NEGATIVE_COLUMNS_TIME2_NO_MASK, WARM_FRONT_COLUMNS_TIME2_NO_MASK,
COLD_FRONT_COLUMNS_TIME2_NO_MASK
)).astype(int)
TARGET_POINT_DICT_TERNARY_NO_MASK = {
ml_utils.ROW_INDICES_BY_TIME_KEY:
[ROW_INDICES_TIME1_NO_MASK, ROW_INDICES_TIME2_NO_MASK],
ml_utils.COLUMN_INDICES_BY_TIME_KEY:
[COLUMN_INDICES_TIME1_NO_MASK, COLUMN_INDICES_TIME2_NO_MASK]
}
NEGATIVE_ROWS_TIME1_WITH_MASK = numpy.array(
[2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4], dtype=int)
NEGATIVE_COLUMNS_TIME1_WITH_MASK = numpy.array(
[2, 3, 4, 5, 6, 2, 3, 4, 5, 6, 2, 3, 4, 5, 6], dtype=int)
NEGATIVE_ROWS_TIME2_WITH_MASK = numpy.array([], dtype=int)
NEGATIVE_COLUMNS_TIME2_WITH_MASK = numpy.array([], dtype=int)
WARM_FRONT_ROWS_TIME1_WITH_MASK = numpy.array([1, 1, 1, 1, 1], dtype=int)
WARM_FRONT_COLUMNS_TIME1_WITH_MASK = numpy.array([2, 3, 4, 5, 6], dtype=int)
WARM_FRONT_ROWS_TIME2_WITH_MASK = numpy.array([1], dtype=int)
WARM_FRONT_COLUMNS_TIME2_WITH_MASK = numpy.array([4], dtype=int)
COLD_FRONT_ROWS_TIME1_WITH_MASK = numpy.array([1, 2, 3, 4], dtype=int)
COLD_FRONT_COLUMNS_TIME1_WITH_MASK = numpy.array([1, 1, 1, 1], dtype=int)
COLD_FRONT_ROWS_TIME2_WITH_MASK = numpy.array([1, 1, 2, 3, 4], dtype=int)
COLD_FRONT_COLUMNS_TIME2_WITH_MASK = numpy.array([1, 2, 1, 1, 1], dtype=int)
ROW_INDICES_TIME1_WITH_MASK = numpy.concatenate((
NEGATIVE_ROWS_TIME1_WITH_MASK, WARM_FRONT_ROWS_TIME1_WITH_MASK,
COLD_FRONT_ROWS_TIME1_WITH_MASK
)).astype(int)
COLUMN_INDICES_TIME1_WITH_MASK = numpy.concatenate((
NEGATIVE_COLUMNS_TIME1_WITH_MASK, WARM_FRONT_COLUMNS_TIME1_WITH_MASK,
COLD_FRONT_COLUMNS_TIME1_WITH_MASK
)).astype(int)
ROW_INDICES_TIME2_WITH_MASK = numpy.concatenate((
NEGATIVE_ROWS_TIME2_WITH_MASK, WARM_FRONT_ROWS_TIME2_WITH_MASK,
COLD_FRONT_ROWS_TIME2_WITH_MASK
)).astype(int)
COLUMN_INDICES_TIME2_WITH_MASK = numpy.concatenate((
NEGATIVE_COLUMNS_TIME2_WITH_MASK, WARM_FRONT_COLUMNS_TIME2_WITH_MASK,
COLD_FRONT_COLUMNS_TIME2_WITH_MASK
)).astype(int)
TARGET_POINT_DICT_TERNARY_WITH_MASK = {
ml_utils.ROW_INDICES_BY_TIME_KEY:
[ROW_INDICES_TIME1_WITH_MASK, ROW_INDICES_TIME2_WITH_MASK],
ml_utils.COLUMN_INDICES_BY_TIME_KEY:
[COLUMN_INDICES_TIME1_WITH_MASK, COLUMN_INDICES_TIME2_WITH_MASK]
}
# The following constants are used to test dilate_target_images.
DILATION_DISTANCE_METRES = 50000.
THIS_FIRST_MATRIX = numpy.array([[1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 1, 1],
[2, 2, 2, 0, 0, 0, 0, 0],
[2, 2, 2, 0, 0, 0, 0, 0],
[2, 2, 2, 0, 0, 0, 0, 0]])
THIS_SECOND_MATRIX = numpy.array([[2, 2, 2, 2, 2, 1, 1, 0],
[2, 2, 2, 2, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 1, 1, 1],
[2, 2, 2, 0, 1, 1, 1, 1],
[2, 2, 2, 0, 0, 1, 1, 1],
[2, 2, 2, 0, 0, 0, 0, 0]])
FRONTAL_GRID_MATRIX_TERNARY_DILATED = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0).astype(int)
THIS_FIRST_MATRIX = numpy.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0]])
THIS_SECOND_MATRIX = numpy.array([[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 0]])
FRONTAL_GRID_MATRIX_BINARY_DILATED = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0).astype(int)
# The following constants are used to test subset_narr_grid_for_fcn_input.
FCN_INPUT_MATRIX_2D = FULL_NARR_MATRIX_2D[
ml_utils.FIRST_NARR_ROW_FOR_FCN_INPUT:
(ml_utils.LAST_NARR_ROW_FOR_FCN_INPUT + 1),
ml_utils.FIRST_NARR_COLUMN_FOR_FCN_INPUT:
(ml_utils.LAST_NARR_COLUMN_FOR_FCN_INPUT + 1)
]
FCN_INPUT_MATRIX_3D = numpy.stack(
(FCN_INPUT_MATRIX_2D, FCN_INPUT_MATRIX_2D), axis=0)
FCN_INPUT_MATRIX_4D = numpy.stack(
(FCN_INPUT_MATRIX_3D, FCN_INPUT_MATRIX_3D), axis=-1)
FCN_INPUT_MATRIX_5D = numpy.stack(
(FCN_INPUT_MATRIX_4D, FCN_INPUT_MATRIX_4D), axis=-2)
# The following constants are used to test
# downsize_grids_around_selected_points.
PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS = numpy.array([[1, 3, 5, 7],
[2, 4, 6, 8]],
dtype=numpy.float32)
PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS = numpy.stack(
(PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,), axis=0)
PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS = numpy.stack(
(PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,
PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,
PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS), axis=-1)
DOWNSIZED_MATRIX_R1_C2 = numpy.array([[1, 3, 5],
[1, 3, 5],
[2, 4, 6]], dtype=numpy.float32)
DOWNSIZED_MATRIX_R1_C3 = numpy.array([[3, 5, 7],
[3, 5, 7],
[4, 6, 8]], dtype=numpy.float32)
DOWNSIZED_MATRIX_R2_C1 = numpy.array([[1, 1, 3],
[2, 2, 4],
[2, 2, 4]], dtype=numpy.float32)
DOWNSIZED_MATRIX_R2_C4 = numpy.array([[5, 7, 7],
[6, 8, 8],
[6, 8, 8]], dtype=numpy.float32)
TARGET_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS = numpy.array([[0, 0, 1, 1],
[2, 2, 0, 0]],
dtype=int)
TARGET_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS = numpy.stack(
(TARGET_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,), axis=0)
NUM_ROWS_IN_HALF_GRID_AROUND_SELECTED_PTS = 1
NUM_COLUMNS_IN_HALF_GRID_AROUND_SELECTED_PTS = 1
TARGET_POINT_DICT_FOR_DOWNSIZING = {
ml_utils.ROW_INDICES_BY_TIME_KEY: [numpy.array([0, 0, 1, 1], dtype=int)],
ml_utils.COLUMN_INDICES_BY_TIME_KEY: [numpy.array([2, 1, 3, 0], dtype=int)]
}
DOWNSIZED_MATRIX_AT_SELECTED_POINTS = numpy.stack((
DOWNSIZED_MATRIX_R1_C3, DOWNSIZED_MATRIX_R1_C2,
DOWNSIZED_MATRIX_R2_C4, DOWNSIZED_MATRIX_R2_C1), axis=0)
DOWNSIZED_MATRIX_AT_SELECTED_POINTS = numpy.stack(
(DOWNSIZED_MATRIX_AT_SELECTED_POINTS,
DOWNSIZED_MATRIX_AT_SELECTED_POINTS,
DOWNSIZED_MATRIX_AT_SELECTED_POINTS), axis=-1)
TARGET_VECTOR_AT_SELECTED_POINTS = numpy.array([1, 0, 0, 2], dtype=int)
EXAMPLE_INDICES_AT_SELECTED_POINTS = numpy.array([0, 0, 0, 0], dtype=int)
CENTER_ROWS_AT_SELECTED_POINTS = numpy.array([0, 0, 1, 1], dtype=int)
CENTER_COLUMNS_AT_SELECTED_POINTS = numpy.array([2, 1, 3, 0], dtype=int)
# The following constants are used to test find_gridded_prediction_file.
PREDICTION_DIR_NAME = 'poop'
FIRST_PREDICTION_TIME_UNIX_SEC = 1234569600
LAST_PREDICTION_TIME_UNIX_SEC = 2345684400
PREDICTION_FILE_NAME = 'poop/gridded_predictions_2009021400-2044050103.p'
def _compare_target_point_dicts(
first_target_point_dict, second_target_point_dict):
"""Compares two dictionaries with sampled target points.
:param first_target_point_dict: First dictionary (in the format produced by
`machine_learning_utils.sample_target_points`).
:param second_target_point_dict: Second dictionary.
:return: are_dicts_equal: Boolean flag.
"""
first_keys = first_target_point_dict.keys()
second_keys = second_target_point_dict.keys()
if set(first_keys) != set(second_keys):
return False
first_num_times = len(
first_target_point_dict[ml_utils.ROW_INDICES_BY_TIME_KEY])
second_num_times = len(
second_target_point_dict[ml_utils.ROW_INDICES_BY_TIME_KEY])
if first_num_times != second_num_times:
return False
for i in range(first_num_times):
for this_key in first_keys:
if not numpy.array_equal(first_target_point_dict[this_key][i],
second_target_point_dict[this_key][i]):
return False
return True
class MachineLearningUtilsTests(unittest.TestCase):
"""Each method is a unit test for machine_learning_utils.py."""
def test_check_full_narr_matrix_2d(self):
"""Ensures correct output from _check_full_narr_matrix.
In this case, input matrix is 2-D (bad).
"""
with self.assertRaises(ValueError):
ml_utils._check_full_narr_matrix(FULL_NARR_MATRIX_2D)
def test_check_full_narr_matrix_3d(self):
"""Ensures correct output from _check_full_narr_matrix.
In this case, input matrix is 3-D with the NARR's spatial dimensions
(good).
"""
ml_utils._check_full_narr_matrix(FULL_NARR_MATRIX_3D)
def test_check_full_narr_matrix_4d(self):
"""Ensures correct output from _check_full_narr_matrix.
In this case, input matrix is 4-D with the NARR's spatial dimensions
(good).
"""
ml_utils._check_full_narr_matrix(FULL_NARR_MATRIX_4D)
def test_check_full_narr_matrix_5d(self):
"""Ensures correct output from _check_full_narr_matrix.
In this case, input matrix is 5-D with the NARR's spatial dimensions
(good).
"""
ml_utils._check_full_narr_matrix(FULL_NARR_MATRIX_5D)
def test_check_full_narr_matrix_bad_dimensions(self):
"""Ensures correct output from _check_full_narr_matrix.
In this case, dimensions have been permuted, so that spatial dimensions
are not along the expected axes.
"""
with self.assertRaises(TypeError):
ml_utils._check_full_narr_matrix(
numpy.transpose(FULL_NARR_MATRIX_3D))
def test_check_predictor_matrix_1d(self):
"""Ensures correct output from _check_predictor_matrix.
In this case, input matrix is 1-D (bad).
"""
with self.assertRaises(ValueError):
ml_utils._check_predictor_matrix(PREDICTOR_MATRIX_1D)
def test_check_predictor_matrix_2d(self):
"""Ensures correct output from _check_predictor_matrix.
In this case, input matrix is 2-D (bad).
"""
with self.assertRaises(ValueError):
ml_utils._check_predictor_matrix(PREDICTOR_MATRIX_2D)
def test_check_predictor_matrix_nan_allowed(self):
"""Ensures correct output from _check_predictor_matrix.
In this case, input matrix contains NaN's (which are allowed).
"""
ml_utils._check_predictor_matrix(PREDICTOR_MATRIX_3D, allow_nan=True)
def test_check_predictor_matrix_nan_disallowed(self):
"""Ensures correct output from _check_predictor_matrix.
In this case, input matrix contains NaN's (which are *not* allowed).
"""
with self.assertRaises(ValueError):
ml_utils._check_predictor_matrix(
PREDICTOR_MATRIX_3D, allow_nan=False)
def test_check_predictor_matrix_4d(self):
"""Ensures correct output from _check_predictor_matrix.
In this case, input matrix is 4-D (good).
"""
ml_utils._check_predictor_matrix(PREDICTOR_MATRIX_4D, allow_nan=True)
def test_check_predictor_matrix_5d(self):
"""Ensures correct output from _check_predictor_matrix.
In this case, input matrix is 5-D (good).
"""
ml_utils._check_predictor_matrix(PREDICTOR_MATRIX_5D, allow_nan=True)
def test_check_target_matrix_1d_good(self):
"""Ensures correct output from _check_target_matrix.
In this case, method expects 1-D array and receives 1-D array.
"""
ml_utils._check_target_matrix(
TARGET_VALUES_BINARY_1D, assert_binary=True, num_dimensions=1)
def test_check_target_matrix_1d_bad(self):
"""Ensures correct output from _check_target_matrix.
In this case, method expects 3-D matrix and receives 1-D array.
"""
with self.assertRaises(TypeError):
ml_utils._check_target_matrix(
TARGET_VALUES_BINARY_1D, assert_binary=False, num_dimensions=3)
def test_check_target_matrix_3d_good(self):
"""Ensures correct output from _check_target_matrix.
In this case, method expects 3-D matrix and receives 3-D matrix.
"""
ml_utils._check_target_matrix(
TARGET_VALUES_BINARY_3D, assert_binary=True, num_dimensions=3)
def test_check_target_matrix_3d_bad(self):
"""Ensures correct output from _check_target_matrix.
In this case, method expects 1-D array and receives 3-D matrix.
"""
with self.assertRaises(TypeError):
ml_utils._check_target_matrix(
TARGET_VALUES_BINARY_3D, assert_binary=False, num_dimensions=1)
def test_check_target_matrix_2d(self):
"""Ensures correct output from _check_target_matrix.
In this case, input matrix is 2-D (bad).
"""
with self.assertRaises(TypeError):
ml_utils._check_target_matrix(
TARGET_VALUES_BINARY_2D, assert_binary=False)
def test_check_target_matrix_non_binary_allowed(self):
"""Ensures correct output from _check_target_matrix.
In this case, input matrix is non-binary, which is allowed.
"""
ml_utils._check_target_matrix(
TARGET_VALUES_BINARY_1D, assert_binary=False, num_dimensions=1)
def test_check_target_matrix_non_binary_disallowed(self):
"""Ensures correct output from _check_target_matrix.
In this case, input matrix is non-binary, which is *not* allowed.
"""
with self.assertRaises(ValueError):
ml_utils._check_target_matrix(
TARGET_VALUES_TERNARY_1D, assert_binary=True, num_dimensions=1)
def test_downsize_predictor_images_top_left_3d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 3-D; center point for extraction is at
top-left of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_3D,
center_row=DOWNSIZING_CENTER_ROW_TOP_LEFT,
center_column=DOWNSIZING_CENTER_COLUMN_TOP_LEFT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_TOP_LEFT_3D, atol=TOLERANCE))
def test_downsize_predictor_images_top_left_4d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 4-D; center point for extraction is at
top-left of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_4D,
center_row=DOWNSIZING_CENTER_ROW_TOP_LEFT,
center_column=DOWNSIZING_CENTER_COLUMN_TOP_LEFT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_TOP_LEFT_4D, atol=TOLERANCE))
def test_downsize_predictor_images_top_left_5d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 5-D; center point for extraction is at
top-left of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_5D,
center_row=DOWNSIZING_CENTER_ROW_TOP_LEFT,
center_column=DOWNSIZING_CENTER_COLUMN_TOP_LEFT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_TOP_LEFT_5D, atol=TOLERANCE))
def test_downsize_predictor_images_bottom_left_3d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 3-D; center point for extraction is at
bottom-left of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_3D,
center_row=DOWNSIZING_CENTER_ROW_BOTTOM_LEFT,
center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_LEFT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_BOTTOM_LEFT_3D, atol=TOLERANCE))
def test_downsize_predictor_images_bottom_left_4d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 4-D; center point for extraction is at
bottom-left of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_4D,
center_row=DOWNSIZING_CENTER_ROW_BOTTOM_LEFT,
center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_LEFT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_BOTTOM_LEFT_4D, atol=TOLERANCE))
def test_downsize_predictor_images_bottom_left_5d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 5-D; center point for extraction is at
bottom-left of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_5D,
center_row=DOWNSIZING_CENTER_ROW_BOTTOM_LEFT,
center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_LEFT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_BOTTOM_LEFT_5D, atol=TOLERANCE))
def test_downsize_predictor_images_top_right_3d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 3-D; center point for extraction is at
top-right of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_3D,
center_row=DOWNSIZING_CENTER_ROW_TOP_RIGHT,
center_column=DOWNSIZING_CENTER_COLUMN_TOP_RIGHT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_TOP_RIGHT_3D, atol=TOLERANCE))
def test_downsize_predictor_images_top_right_4d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 4-D; center point for extraction is at
top-right of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_4D,
center_row=DOWNSIZING_CENTER_ROW_TOP_RIGHT,
center_column=DOWNSIZING_CENTER_COLUMN_TOP_RIGHT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_TOP_RIGHT_4D, atol=TOLERANCE))
def test_downsize_predictor_images_top_right_5d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 5-D; center point for extraction is at
top-right of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_5D,
center_row=DOWNSIZING_CENTER_ROW_TOP_RIGHT,
center_column=DOWNSIZING_CENTER_COLUMN_TOP_RIGHT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_TOP_RIGHT_5D, atol=TOLERANCE))
def test_downsize_predictor_images_bottom_right_3d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 3-D; center point for extraction is at
bottom-right of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_3D,
center_row=DOWNSIZING_CENTER_ROW_BOTTOM_RIGHT,
center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_RIGHT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_BOTTOM_RIGHT_3D, atol=TOLERANCE))
def test_downsize_predictor_images_bottom_right_4d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 4-D; center point for extraction is at
bottom-right of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_4D,
center_row=DOWNSIZING_CENTER_ROW_BOTTOM_RIGHT,
center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_RIGHT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_BOTTOM_RIGHT_4D, atol=TOLERANCE))
def test_downsize_predictor_images_bottom_right_5d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 5-D; center point for extraction is at
bottom-right of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_5D,
center_row=DOWNSIZING_CENTER_ROW_BOTTOM_RIGHT,
center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_RIGHT,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_BOTTOM_RIGHT_5D, atol=TOLERANCE))
def test_downsize_predictor_images_middle_3d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 3-D; center point for extraction is in
middle of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_3D,
center_row=DOWNSIZING_CENTER_ROW_MIDDLE,
center_column=DOWNSIZING_CENTER_COLUMN_MIDDLE,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_MIDDLE_3D, atol=TOLERANCE))
def test_downsize_predictor_images_middle_4d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 4-D; center point for extraction is in
middle of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_4D,
center_row=DOWNSIZING_CENTER_ROW_MIDDLE,
center_column=DOWNSIZING_CENTER_COLUMN_MIDDLE,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_MIDDLE_4D, atol=TOLERANCE))
def test_downsize_predictor_images_middle_5d(self):
"""Ensures correct output from _downsize_predictor_images.
In this case, input matrix is 5-D; center point for extraction is in
middle of input matrix.
"""
this_matrix = ml_utils._downsize_predictor_images(
predictor_matrix=PRE_DOWNSIZED_MATRIX_5D,
center_row=DOWNSIZING_CENTER_ROW_MIDDLE,
center_column=DOWNSIZING_CENTER_COLUMN_MIDDLE,
num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,
num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)
self.assertTrue(numpy.allclose(
this_matrix, DOWNSIZED_MATRIX_MIDDLE_5D, atol=TOLERANCE))
def test_class_fractions_to_num_points_large_binary(self):
"""Ensures correct output from _class_fractions_to_num_points.
In this case, number of available points is large and there are 2
classes.
"""
this_num_points_by_class = ml_utils._class_fractions_to_num_points(
class_fractions=CLASS_FRACTIONS_BINARY,
num_points_total=NUM_POINTS_AVAILABLE_LARGE)
self.assertTrue(numpy.array_equal(
this_num_points_by_class, NUM_POINTS_BY_CLASS_BINARY_LARGE))
def test_class_fractions_to_num_points_large_ternary(self):
"""Ensures correct output from _class_fractions_to_num_points.
In this case, number of available points is large and there are 3
classes.
"""
this_num_points_by_class = ml_utils._class_fractions_to_num_points(
class_fractions=CLASS_FRACTIONS_TERNARY,
num_points_total=NUM_POINTS_AVAILABLE_LARGE)
self.assertTrue(numpy.array_equal(
this_num_points_by_class, NUM_POINTS_BY_CLASS_TERNARY_LARGE))
def test_class_fractions_to_num_points_small_binary(self):
"""Ensures correct output from _class_fractions_to_num_points.
In this case, number of available points is small and there are 2
classes.
"""
this_num_points_by_class = ml_utils._class_fractions_to_num_points(
class_fractions=CLASS_FRACTIONS_BINARY,
num_points_total=NUM_POINTS_AVAILABLE_SMALL)
self.assertTrue(numpy.array_equal(
this_num_points_by_class, NUM_POINTS_BY_CLASS_BINARY_SMALL))
def test_class_fractions_to_num_points_small_ternary(self):
"""Ensures correct output from _class_fractions_to_num_points.
In this case, number of available points is small and there are 3
classes.
"""
this_num_points_by_class = ml_utils._class_fractions_to_num_points(
class_fractions=CLASS_FRACTIONS_TERNARY,
num_points_total=NUM_POINTS_AVAILABLE_SMALL)
self.assertTrue(numpy.array_equal(
this_num_points_by_class, NUM_POINTS_BY_CLASS_TERNARY_SMALL))
def test_get_class_weight_dict_binary(self):
"""Ensures correct output from get_class_weight_dict.
In this case, input contains 2 classes.
"""
this_class_weight_dict = ml_utils.get_class_weight_dict(
CLASS_FRACTIONS_BINARY)
self.assertTrue(set(this_class_weight_dict.keys()) ==
set(CLASS_WEIGHT_DICT_BINARY.keys()))
for this_key in this_class_weight_dict.keys():
self.assertTrue(numpy.isclose(
this_class_weight_dict[this_key],
CLASS_WEIGHT_DICT_BINARY[this_key],
atol=TOLERANCE_FOR_CLASS_WEIGHT))
def test_get_class_weight_dict_ternary(self):
"""Ensures correct output from get_class_weight_dict.
In this case, input contains 3 classes.
"""
this_class_weight_dict = ml_utils.get_class_weight_dict(
CLASS_FRACTIONS_TERNARY)
self.assertTrue(set(this_class_weight_dict.keys()) ==
set(CLASS_WEIGHT_DICT_TERNARY.keys()))
for this_key in this_class_weight_dict.keys():
self.assertTrue(numpy.isclose(
this_class_weight_dict[this_key],
CLASS_WEIGHT_DICT_TERNARY[this_key],
atol=TOLERANCE_FOR_CLASS_WEIGHT))
def test_normalize_predictors_4d_minmax(self):
"""Ensures correct output from normalize_predictors.
In this case, predictor matrix is 4-D (no time dimension) and
normalization method is min-max.
"""
this_predictor_matrix, _ = ml_utils.normalize_predictors(
predictor_matrix=PREDICTOR_MATRIX_4D_DENORM + 0.,
normalization_type_string=ml_utils.MINMAX_STRING,
percentile_offset=PRCTILE_OFFSET_FOR_NORMALIZATION)
self.assertTrue(numpy.allclose(
this_predictor_matrix, PREDICTOR_MATRIX_4D_MINMAX_NORM,
atol=TOLERANCE, equal_nan=True
))
def test_normalize_predictors_5d_minmax(self):
"""Ensures correct output from normalize_predictors.
In this case, predictor matrix is 5-D (has time dimension) and
normalization method is min-max.
"""
this_predictor_matrix, _ = ml_utils.normalize_predictors(
predictor_matrix=PREDICTOR_MATRIX_5D_DENORM + 0.,
normalization_type_string=ml_utils.MINMAX_STRING,
percentile_offset=PRCTILE_OFFSET_FOR_NORMALIZATION)
self.assertTrue(numpy.allclose(
this_predictor_matrix, PREDICTOR_MATRIX_5D_MINMAX_NORM,
atol=TOLERANCE, equal_nan=True
))
def test_normalize_predictors_4d_z(self):
"""Ensures correct output from normalize_predictors.
In this case, predictor matrix is 4-D (no time dimension) and
normalization method is z-score.
"""
this_predictor_matrix, _ = ml_utils.normalize_predictors(
predictor_matrix=PREDICTOR_MATRIX_4D_DENORM + 0.,
normalization_type_string=ml_utils.Z_SCORE_STRING)
self.assertTrue(numpy.allclose(
this_predictor_matrix, PREDICTOR_MATRIX_4D_Z_NORM, atol=TOLERANCE,
equal_nan=True
))
def test_normalize_predictors_5d_z(self):
"""Ensures correct output from normalize_predictors.
In this case, predictor matrix is 5-D (has time dimension) and
normalization method is z-score.
"""
this_predictor_matrix, _ = ml_utils.normalize_predictors(
predictor_matrix=PREDICTOR_MATRIX_5D_DENORM + 0.,
normalization_type_string=ml_utils.Z_SCORE_STRING)
self.assertTrue(numpy.allclose(
this_predictor_matrix, PREDICTOR_MATRIX_5D_Z_NORM, atol=TOLERANCE,
equal_nan=True
))
def test_denormalize_predictors_4d_minmax(self):
"""Ensures correct output from denormalize_predictors.
In this case, predictor matrix is 4-D (no time dimension) and
normalization method is min-max.
"""
this_predictor_matrix, this_normalization_dict = (
ml_utils.normalize_predictors(
predictor_matrix=PREDICTOR_MATRIX_4D_DENORM + 0.,
normalization_type_string=ml_utils.MINMAX_STRING,
percentile_offset=PRCTILE_OFFSET_FOR_NORMALIZATION)
)
this_predictor_matrix = ml_utils.denormalize_predictors(
predictor_matrix=this_predictor_matrix,
normalization_dict=this_normalization_dict)
self.assertTrue(numpy.allclose(
this_predictor_matrix, PREDICTOR_MATRIX_4D_DENORM,
atol=TOLERANCE, equal_nan=True
))
def test_denormalize_predictors_5d_minmax(self):
"""Ensures correct output from denormalize_predictors.
In this case, predictor matrix is 5-D (no time dimension) and
normalization method is min-max.
"""
this_predictor_matrix, this_normalization_dict = (
ml_utils.normalize_predictors(
predictor_matrix=PREDICTOR_MATRIX_5D_DENORM + 0.,
normalization_type_string=ml_utils.MINMAX_STRING,
percentile_offset=PRCTILE_OFFSET_FOR_NORMALIZATION)
)
this_predictor_matrix = ml_utils.denormalize_predictors(
predictor_matrix=this_predictor_matrix,
normalization_dict=this_normalization_dict)
self.assertTrue(numpy.allclose(
this_predictor_matrix, PREDICTOR_MATRIX_5D_DENORM,
atol=TOLERANCE, equal_nan=True
))
def test_denormalize_predictors_4d_z(self):
"""Ensures correct output from denormalize_predictors.
In this case, predictor matrix is 4-D (no time dimension) and
normalization method is z-score.
"""
this_predictor_matrix, this_normalization_dict = (
ml_utils.normalize_predictors(
predictor_matrix=PREDICTOR_MATRIX_4D_DENORM + 0.,
normalization_type_string=ml_utils.Z_SCORE_STRING)
)
this_predictor_matrix = ml_utils.denormalize_predictors(
predictor_matrix=this_predictor_matrix,
normalization_dict=this_normalization_dict)
self.assertTrue(numpy.allclose(
this_predictor_matrix, PREDICTOR_MATRIX_4D_DENORM,
atol=TOLERANCE, equal_nan=True
))
def test_denormalize_predictors_5d_z(self):
"""Ensures correct output from denormalize_predictors.
In this case, predictor matrix is 5-D (no time dimension) and
normalization method is z-score.
"""
this_predictor_matrix, this_normalization_dict = (
ml_utils.normalize_predictors(
predictor_matrix=PREDICTOR_MATRIX_5D_DENORM + 0.,
normalization_type_string=ml_utils.Z_SCORE_STRING)
)
this_predictor_matrix = ml_utils.denormalize_predictors(
predictor_matrix=this_predictor_matrix,
normalization_dict=this_normalization_dict)
self.assertTrue(numpy.allclose(
this_predictor_matrix, PREDICTOR_MATRIX_5D_DENORM,
atol=TOLERANCE, equal_nan=True
))
def test_sample_target_points_binary_no_mask(self):
"""Ensures correct output from sample_target_points.
In this case, there are 2 classes and no mask.
"""
this_target_point_dict = ml_utils.sample_target_points(
target_matrix=FRONTAL_GRID_MATRIX_BINARY,
class_fractions=CLASS_FRACTIONS_FOR_BINARY_SAMPLING,
num_points_to_sample=NUM_POINTS_TO_SAMPLE, mask_matrix=None,
test_mode=True)
self.assertTrue(_compare_target_point_dicts(
this_target_point_dict, TARGET_POINT_DICT_BINARY_NO_MASK))
def test_sample_target_points_binary_with_mask(self):
"""Ensures correct output from sample_target_points.
In this case, there are 2 classes with a mask.
"""
this_target_point_dict = ml_utils.sample_target_points(
target_matrix=FRONTAL_GRID_MATRIX_BINARY,
class_fractions=CLASS_FRACTIONS_FOR_BINARY_SAMPLING,
num_points_to_sample=NUM_POINTS_TO_SAMPLE, mask_matrix=MASK_MATRIX,
test_mode=True)
self.assertTrue(_compare_target_point_dicts(
this_target_point_dict, TARGET_POINT_DICT_BINARY_WITH_MASK))
def test_sample_target_points_ternary_no_mask(self):
"""Ensures correct output from sample_target_points.
In this case, there are 3 classes and no mask.
"""
this_target_point_dict = ml_utils.sample_target_points(
target_matrix=FRONTAL_GRID_MATRIX_TERNARY,
class_fractions=CLASS_FRACTIONS_FOR_TERNARY_SAMPLING,
num_points_to_sample=NUM_POINTS_TO_SAMPLE, test_mode=True)
self.assertTrue(_compare_target_point_dicts(
this_target_point_dict, TARGET_POINT_DICT_TERNARY_NO_MASK))
def test_sample_target_points_ternary_with_mask(self):
"""Ensures correct output from sample_target_points.
In this case, there are 3 classes with a mask.
"""
this_target_point_dict = ml_utils.sample_target_points(
target_matrix=FRONTAL_GRID_MATRIX_TERNARY,
class_fractions=CLASS_FRACTIONS_FOR_TERNARY_SAMPLING,
num_points_to_sample=NUM_POINTS_TO_SAMPLE, mask_matrix=MASK_MATRIX,
test_mode=True)
self.assertTrue(_compare_target_point_dicts(
this_target_point_dict, TARGET_POINT_DICT_TERNARY_WITH_MASK))
def test_front_table_to_images(self):
"""Ensures correct output from front_table_to_images."""
this_frontal_grid_matrix = ml_utils.front_table_to_images(
frontal_grid_table=FRONTAL_GRID_TABLE,
num_rows_per_image=NUM_GRID_ROWS,
num_columns_per_image=NUM_GRID_COLUMNS)
self.assertTrue(numpy.array_equal(
this_frontal_grid_matrix, FRONTAL_GRID_MATRIX_TERNARY))
def test_binarize_front_images(self):
"""Ensures correct output from binarize_front_images."""
this_input_matrix = copy.deepcopy(FRONTAL_GRID_MATRIX_TERNARY)
this_binary_matrix = ml_utils.binarize_front_images(this_input_matrix)
self.assertTrue(numpy.array_equal(
this_binary_matrix, FRONTAL_GRID_MATRIX_BINARY))
def test_dilate_binary_target_images(self):
"""Ensures correct output from dilate_binary_target_images."""
this_input_matrix = copy.deepcopy(FRONTAL_GRID_MATRIX_BINARY)
this_dilated_matrix = ml_utils.dilate_binary_target_images(
target_matrix=this_input_matrix,
dilation_distance_metres=DILATION_DISTANCE_METRES)
self.assertTrue(numpy.array_equal(
this_dilated_matrix, FRONTAL_GRID_MATRIX_BINARY_DILATED))
def test_dilate_ternary_target_images(self):
"""Ensures correct output from dilate_ternary_target_images."""
this_input_matrix = copy.deepcopy(FRONTAL_GRID_MATRIX_TERNARY)
this_dilated_matrix = ml_utils.dilate_ternary_target_images(
target_matrix=this_input_matrix,
dilation_distance_metres=DILATION_DISTANCE_METRES)
self.assertTrue(numpy.array_equal(
this_dilated_matrix, FRONTAL_GRID_MATRIX_TERNARY_DILATED))
def test_stack_predictor_variables(self):
"""Ensures correct output from stack_predictor_variables."""
this_matrix = ml_utils.stack_predictor_variables(
TUPLE_OF_3D_PREDICTOR_MATRICES)
self.assertTrue(numpy.allclose(
this_matrix, PREDICTOR_MATRIX_4D, atol=TOLERANCE, equal_nan=True))
def test_stack_time_steps(self):
"""Ensures correct output from stack_time_steps."""
this_matrix = ml_utils.stack_time_steps(TUPLE_OF_4D_PREDICTOR_MATRICES)
self.assertTrue(numpy.allclose(
this_matrix, PREDICTOR_MATRIX_5D, atol=TOLERANCE, equal_nan=True))
def test_subset_narr_grid_for_fcn_input_3d(self):
"""Ensures correct output from subset_narr_grid_for_fcn_input.
In this case, input matrix is 3-D.
"""
this_matrix = ml_utils.subset_narr_grid_for_fcn_input(
FULL_NARR_MATRIX_3D)
self.assertTrue(numpy.allclose(
this_matrix, FCN_INPUT_MATRIX_3D, atol=TOLERANCE))
def test_subset_narr_grid_for_fcn_input_4d(self):
"""Ensures correct output from subset_narr_grid_for_fcn_input.
In this case, input matrix is 4-D.
"""
this_matrix = ml_utils.subset_narr_grid_for_fcn_input(
FULL_NARR_MATRIX_4D)
self.assertTrue(numpy.allclose(
this_matrix, FCN_INPUT_MATRIX_4D, atol=TOLERANCE))
def test_subset_narr_grid_for_fcn_input_5d(self):
"""Ensures correct output from subset_narr_grid_for_fcn_input.
In this case, input matrix is 5-D.
"""
this_matrix = ml_utils.subset_narr_grid_for_fcn_input(
FULL_NARR_MATRIX_5D)
self.assertTrue(numpy.allclose(
this_matrix, FCN_INPUT_MATRIX_5D, atol=TOLERANCE))
def test_downsize_grids_around_selected_points(self):
"""Ensures correct output from downsize_grids_around_selected_points."""
this_full_predictor_matrix = copy.deepcopy(
PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS)
(this_small_predictor_matrix,
this_target_vector,
these_example_indices,
these_center_rows,
these_center_columns) = ml_utils.downsize_grids_around_selected_points(
predictor_matrix=this_full_predictor_matrix,
target_matrix=TARGET_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,
num_rows_in_half_window=NUM_ROWS_IN_HALF_GRID_AROUND_SELECTED_PTS,
num_columns_in_half_window=
NUM_COLUMNS_IN_HALF_GRID_AROUND_SELECTED_PTS,
target_point_dict=TARGET_POINT_DICT_FOR_DOWNSIZING, test_mode=True)
self.assertTrue(numpy.allclose(
this_small_predictor_matrix, DOWNSIZED_MATRIX_AT_SELECTED_POINTS,
atol=TOLERANCE))
self.assertTrue(numpy.array_equal(
this_target_vector, TARGET_VECTOR_AT_SELECTED_POINTS))
self.assertTrue(numpy.array_equal(
these_example_indices, EXAMPLE_INDICES_AT_SELECTED_POINTS))
self.assertTrue(numpy.array_equal(
these_center_rows, CENTER_ROWS_AT_SELECTED_POINTS))
self.assertTrue(numpy.array_equal(
these_center_columns, CENTER_COLUMNS_AT_SELECTED_POINTS))
def test_find_gridded_prediction_file(self):
"""Ensures correct output from find_gridded_prediction_file."""
this_file_name = ml_utils.find_gridded_prediction_file(
directory_name=PREDICTION_DIR_NAME,
first_target_time_unix_sec=FIRST_PREDICTION_TIME_UNIX_SEC,
last_target_time_unix_sec=LAST_PREDICTION_TIME_UNIX_SEC,
raise_error_if_missing=False)
self.assertTrue(this_file_name == PREDICTION_FILE_NAME)
if __name__ == '__main__':
unittest.main()
| [
"gewittergefahr.gg_utils.nwp_model_utils.get_grid_dimensions",
"generalexam.machine_learning.machine_learning_utils.front_table_to_images",
"generalexam.machine_learning.machine_learning_utils.stack_time_steps",
"numpy.allclose",
"numpy.isclose",
"numpy.mean",
"generalexam.machine_learning.machine_learn... | [((438, 517), 'gewittergefahr.gg_utils.nwp_model_utils.get_grid_dimensions', 'nwp_model_utils.get_grid_dimensions', ([], {'model_name': 'nwp_model_utils.NARR_MODEL_NAME'}), '(model_name=nwp_model_utils.NARR_MODEL_NAME)\n', (473, 517), False, 'from gewittergefahr.gg_utils import nwp_model_utils\n'), ((545, 634), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': '(NUM_ROWS_IN_NARR, NUM_COLUMNS_IN_NARR)'}), '(low=0.0, high=1.0, size=(NUM_ROWS_IN_NARR,\n NUM_COLUMNS_IN_NARR))\n', (565, 634), False, 'import numpy\n'), ((657, 720), 'numpy.stack', 'numpy.stack', (['(FULL_NARR_MATRIX_2D, FULL_NARR_MATRIX_2D)'], {'axis': '(0)'}), '((FULL_NARR_MATRIX_2D, FULL_NARR_MATRIX_2D), axis=0)\n', (668, 720), False, 'import numpy\n'), ((748, 812), 'numpy.stack', 'numpy.stack', (['(FULL_NARR_MATRIX_3D, FULL_NARR_MATRIX_3D)'], {'axis': '(-1)'}), '((FULL_NARR_MATRIX_3D, FULL_NARR_MATRIX_3D), axis=-1)\n', (759, 812), False, 'import numpy\n'), ((840, 904), 'numpy.stack', 'numpy.stack', (['(FULL_NARR_MATRIX_4D, FULL_NARR_MATRIX_4D)'], {'axis': '(-1)'}), '((FULL_NARR_MATRIX_4D, FULL_NARR_MATRIX_4D), axis=-1)\n', (851, 904), False, 'import numpy\n'), ((1001, 1047), 'numpy.array', 'numpy.array', (['[1, 2, 3, 4]'], {'dtype': 'numpy.float32'}), '([1, 2, 3, 4], dtype=numpy.float32)\n', (1012, 1047), False, 'import numpy\n'), ((1070, 1149), 'numpy.array', 'numpy.array', (['[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]'], {'dtype': 'numpy.float32'}), '([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=numpy.float32)\n', (1081, 1149), False, 'import numpy\n'), ((1319, 1370), 'numpy.stack', 'numpy.stack', (['TUPLE_OF_2D_PREDICTOR_MATRICES'], {'axis': '(0)'}), '(TUPLE_OF_2D_PREDICTOR_MATRICES, axis=0)\n', (1330, 1370), False, 'import numpy\n'), ((1604, 1656), 'numpy.stack', 'numpy.stack', (['TUPLE_OF_3D_PREDICTOR_MATRICES'], {'axis': '(-1)'}), '(TUPLE_OF_3D_PREDICTOR_MATRICES, axis=-1)\n', (1615, 1656), False, 'import numpy\n'), ((1828, 1880), 'numpy.stack', 'numpy.stack', (['TUPLE_OF_4D_PREDICTOR_MATRICES'], {'axis': '(-2)'}), '(TUPLE_OF_4D_PREDICTOR_MATRICES, axis=-2)\n', (1839, 1880), False, 'import numpy\n'), ((1973, 2009), 'numpy.array', 'numpy.array', (['[0, 1, 1, 0]'], {'dtype': 'int'}), '([0, 1, 1, 0], dtype=int)\n', (1984, 2009), False, 'import numpy\n'), ((2037, 2073), 'numpy.array', 'numpy.array', (['[0, 2, 1, 0]'], {'dtype': 'int'}), '([0, 2, 1, 0], dtype=int)\n', (2048, 2073), False, 'import numpy\n'), ((2100, 2152), 'numpy.array', 'numpy.array', (['[[0, 1, 1, 0], [1, 0, 0, 1]]'], {'dtype': 'int'}), '([[0, 1, 1, 0], [1, 0, 0, 1]], dtype=int)\n', (2111, 2152), False, 'import numpy\n'), ((2218, 2289), 'numpy.stack', 'numpy.stack', (['(TARGET_VALUES_BINARY_2D, TARGET_VALUES_BINARY_2D)'], {'axis': '(0)'}), '((TARGET_VALUES_BINARY_2D, TARGET_VALUES_BINARY_2D), axis=0)\n', (2229, 2289), False, 'import numpy\n'), ((2390, 2694), 'numpy.array', 'numpy.array', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18], [19, 20,\n 21, 22, 23, 24, 25, 26, 27], [28, 29, 30, 31, 32, 33, 34, 35, 36], [37,\n 38, 39, 40, 41, 42, 43, 44, 45], [46, 47, 48, 49, 50, 51, 52, 53, 54],\n [55, 56, 57, 58, 59, 60, 61, 62, 63]]'], {'dtype': 'numpy.float32'}), '([[1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, \n 18], [19, 20, 21, 22, 23, 24, 25, 26, 27], [28, 29, 30, 31, 32, 33, 34,\n 35, 36], [37, 38, 39, 40, 41, 42, 43, 44, 45], [46, 47, 48, 49, 50, 51,\n 52, 53, 54], [55, 56, 57, 58, 59, 60, 61, 62, 63]], dtype=numpy.float32)\n', (2401, 2694), False, 'import numpy\n'), ((3416, 3595), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 2, 3, 4, 5], [1, 1, 1, 2, 3, 4, 5], [10, 10, 10, 11, 12, 13, 14],\n [19, 19, 19, 20, 21, 22, 23], [28, 28, 28, 29, 30, 31, 32]]'], {'dtype': 'numpy.float32'}), '([[1, 1, 1, 2, 3, 4, 5], [1, 1, 1, 2, 3, 4, 5], [10, 10, 10, 11,\n 12, 13, 14], [19, 19, 19, 20, 21, 22, 23], [28, 28, 28, 29, 30, 31, 32]\n ], dtype=numpy.float32)\n', (3427, 3595), False, 'import numpy\n'), ((3823, 4016), 'numpy.array', 'numpy.array', (['[[28, 28, 28, 29, 30, 31, 32], [37, 37, 37, 38, 39, 40, 41], [46, 46, 46, \n 47, 48, 49, 50], [55, 55, 55, 56, 57, 58, 59], [55, 55, 55, 56, 57, 58, 59]\n ]'], {'dtype': 'numpy.float32'}), '([[28, 28, 28, 29, 30, 31, 32], [37, 37, 37, 38, 39, 40, 41], [\n 46, 46, 46, 47, 48, 49, 50], [55, 55, 55, 56, 57, 58, 59], [55, 55, 55,\n 56, 57, 58, 59]], dtype=numpy.float32)\n', (3834, 4016), False, 'import numpy\n'), ((4257, 4436), 'numpy.array', 'numpy.array', (['[[5, 6, 7, 8, 9, 9, 9], [5, 6, 7, 8, 9, 9, 9], [14, 15, 16, 17, 18, 18, 18],\n [23, 24, 25, 26, 27, 27, 27], [32, 33, 34, 35, 36, 36, 36]]'], {'dtype': 'numpy.float32'}), '([[5, 6, 7, 8, 9, 9, 9], [5, 6, 7, 8, 9, 9, 9], [14, 15, 16, 17,\n 18, 18, 18], [23, 24, 25, 26, 27, 27, 27], [32, 33, 34, 35, 36, 36, 36]\n ], dtype=numpy.float32)\n', (4268, 4436), False, 'import numpy\n'), ((4670, 4863), 'numpy.array', 'numpy.array', (['[[32, 33, 34, 35, 36, 36, 36], [41, 42, 43, 44, 45, 45, 45], [50, 51, 52, \n 53, 54, 54, 54], [59, 60, 61, 62, 63, 63, 63], [59, 60, 61, 62, 63, 63, 63]\n ]'], {'dtype': 'numpy.float32'}), '([[32, 33, 34, 35, 36, 36, 36], [41, 42, 43, 44, 45, 45, 45], [\n 50, 51, 52, 53, 54, 54, 54], [59, 60, 61, 62, 63, 63, 63], [59, 60, 61,\n 62, 63, 63, 63]], dtype=numpy.float32)\n', (4681, 4863), False, 'import numpy\n'), ((5106, 5299), 'numpy.array', 'numpy.array', (['[[11, 12, 13, 14, 15, 16, 17], [20, 21, 22, 23, 24, 25, 26], [29, 30, 31, \n 32, 33, 34, 35], [38, 39, 40, 41, 42, 43, 44], [47, 48, 49, 50, 51, 52, 53]\n ]'], {'dtype': 'numpy.float32'}), '([[11, 12, 13, 14, 15, 16, 17], [20, 21, 22, 23, 24, 25, 26], [\n 29, 30, 31, 32, 33, 34, 35], [38, 39, 40, 41, 42, 43, 44], [47, 48, 49,\n 50, 51, 52, 53]], dtype=numpy.float32)\n', (5117, 5299), False, 'import numpy\n'), ((5512, 5577), 'numpy.stack', 'numpy.stack', (['(PRE_DOWNSIZED_MATRIX, PRE_DOWNSIZED_MATRIX)'], {'axis': '(0)'}), '((PRE_DOWNSIZED_MATRIX, PRE_DOWNSIZED_MATRIX), axis=0)\n', (5523, 5577), False, 'import numpy\n'), ((5609, 5681), 'numpy.stack', 'numpy.stack', (['(PRE_DOWNSIZED_MATRIX_3D, PRE_DOWNSIZED_MATRIX_3D)'], {'axis': '(-1)'}), '((PRE_DOWNSIZED_MATRIX_3D, PRE_DOWNSIZED_MATRIX_3D), axis=-1)\n', (5620, 5681), False, 'import numpy\n'), ((5713, 5785), 'numpy.stack', 'numpy.stack', (['(PRE_DOWNSIZED_MATRIX_4D, PRE_DOWNSIZED_MATRIX_4D)'], {'axis': '(-2)'}), '((PRE_DOWNSIZED_MATRIX_4D, PRE_DOWNSIZED_MATRIX_4D), axis=-2)\n', (5724, 5785), False, 'import numpy\n'), ((5823, 5898), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_TOP_LEFT, DOWNSIZED_MATRIX_TOP_LEFT)'], {'axis': '(0)'}), '((DOWNSIZED_MATRIX_TOP_LEFT, DOWNSIZED_MATRIX_TOP_LEFT), axis=0)\n', (5834, 5898), False, 'import numpy\n'), ((5935, 6021), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_TOP_LEFT_3D, DOWNSIZED_MATRIX_TOP_LEFT_3D)'], {'axis': '(-1)'}), '((DOWNSIZED_MATRIX_TOP_LEFT_3D, DOWNSIZED_MATRIX_TOP_LEFT_3D),\n axis=-1)\n', (5946, 6021), False, 'import numpy\n'), ((6054, 6140), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_TOP_LEFT_4D, DOWNSIZED_MATRIX_TOP_LEFT_4D)'], {'axis': '(-2)'}), '((DOWNSIZED_MATRIX_TOP_LEFT_4D, DOWNSIZED_MATRIX_TOP_LEFT_4D),\n axis=-2)\n', (6065, 6140), False, 'import numpy\n'), ((6177, 6262), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_BOTTOM_LEFT, DOWNSIZED_MATRIX_BOTTOM_LEFT)'], {'axis': '(0)'}), '((DOWNSIZED_MATRIX_BOTTOM_LEFT, DOWNSIZED_MATRIX_BOTTOM_LEFT),\n axis=0)\n', (6188, 6262), False, 'import numpy\n'), ((6298, 6390), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_BOTTOM_LEFT_3D, DOWNSIZED_MATRIX_BOTTOM_LEFT_3D)'], {'axis': '(-1)'}), '((DOWNSIZED_MATRIX_BOTTOM_LEFT_3D,\n DOWNSIZED_MATRIX_BOTTOM_LEFT_3D), axis=-1)\n', (6309, 6390), False, 'import numpy\n'), ((6430, 6522), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_BOTTOM_LEFT_4D, DOWNSIZED_MATRIX_BOTTOM_LEFT_4D)'], {'axis': '(-2)'}), '((DOWNSIZED_MATRIX_BOTTOM_LEFT_4D,\n DOWNSIZED_MATRIX_BOTTOM_LEFT_4D), axis=-2)\n', (6441, 6522), False, 'import numpy\n'), ((6561, 6638), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_TOP_RIGHT, DOWNSIZED_MATRIX_TOP_RIGHT)'], {'axis': '(0)'}), '((DOWNSIZED_MATRIX_TOP_RIGHT, DOWNSIZED_MATRIX_TOP_RIGHT), axis=0)\n', (6572, 6638), False, 'import numpy\n'), ((6676, 6764), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_TOP_RIGHT_3D, DOWNSIZED_MATRIX_TOP_RIGHT_3D)'], {'axis': '(-1)'}), '((DOWNSIZED_MATRIX_TOP_RIGHT_3D, DOWNSIZED_MATRIX_TOP_RIGHT_3D),\n axis=-1)\n', (6687, 6764), False, 'import numpy\n'), ((6798, 6886), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_TOP_RIGHT_4D, DOWNSIZED_MATRIX_TOP_RIGHT_4D)'], {'axis': '(-2)'}), '((DOWNSIZED_MATRIX_TOP_RIGHT_4D, DOWNSIZED_MATRIX_TOP_RIGHT_4D),\n axis=-2)\n', (6809, 6886), False, 'import numpy\n'), ((6924, 7011), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_BOTTOM_RIGHT, DOWNSIZED_MATRIX_BOTTOM_RIGHT)'], {'axis': '(0)'}), '((DOWNSIZED_MATRIX_BOTTOM_RIGHT, DOWNSIZED_MATRIX_BOTTOM_RIGHT),\n axis=0)\n', (6935, 7011), False, 'import numpy\n'), ((7048, 7142), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_BOTTOM_RIGHT_3D, DOWNSIZED_MATRIX_BOTTOM_RIGHT_3D)'], {'axis': '(-1)'}), '((DOWNSIZED_MATRIX_BOTTOM_RIGHT_3D,\n DOWNSIZED_MATRIX_BOTTOM_RIGHT_3D), axis=-1)\n', (7059, 7142), False, 'import numpy\n'), ((7183, 7277), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_BOTTOM_RIGHT_4D, DOWNSIZED_MATRIX_BOTTOM_RIGHT_4D)'], {'axis': '(-2)'}), '((DOWNSIZED_MATRIX_BOTTOM_RIGHT_4D,\n DOWNSIZED_MATRIX_BOTTOM_RIGHT_4D), axis=-2)\n', (7194, 7277), False, 'import numpy\n'), ((7313, 7384), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_MIDDLE, DOWNSIZED_MATRIX_MIDDLE)'], {'axis': '(0)'}), '((DOWNSIZED_MATRIX_MIDDLE, DOWNSIZED_MATRIX_MIDDLE), axis=0)\n', (7324, 7384), False, 'import numpy\n'), ((7419, 7497), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_MIDDLE_3D, DOWNSIZED_MATRIX_MIDDLE_3D)'], {'axis': '(-1)'}), '((DOWNSIZED_MATRIX_MIDDLE_3D, DOWNSIZED_MATRIX_MIDDLE_3D), axis=-1)\n', (7430, 7497), False, 'import numpy\n'), ((7532, 7610), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_MIDDLE_4D, DOWNSIZED_MATRIX_MIDDLE_4D)'], {'axis': '(-2)'}), '((DOWNSIZED_MATRIX_MIDDLE_4D, DOWNSIZED_MATRIX_MIDDLE_4D), axis=-2)\n', (7543, 7610), False, 'import numpy\n'), ((7717, 7740), 'numpy.array', 'numpy.array', (['[0.1, 0.9]'], {}), '([0.1, 0.9])\n', (7728, 7740), False, 'import numpy\n'), ((7808, 7828), 'numpy.array', 'numpy.array', (['[2, 15]'], {}), '([2, 15])\n', (7819, 7828), False, 'import numpy\n'), ((7865, 7888), 'numpy.array', 'numpy.array', (['[2, 3, 12]'], {}), '([2, 3, 12])\n', (7876, 7888), False, 'import numpy\n'), ((7916, 7944), 'numpy.array', 'numpy.array', (['[0.1, 0.2, 0.7]'], {}), '([0.1, 0.2, 0.7])\n', (7927, 7944), False, 'import numpy\n'), ((8011, 8030), 'numpy.array', 'numpy.array', (['[1, 3]'], {}), '([1, 3])\n', (8022, 8030), False, 'import numpy\n'), ((8067, 8089), 'numpy.array', 'numpy.array', (['[1, 1, 2]'], {}), '([1, 1, 2])\n', (8078, 8089), False, 'import numpy\n'), ((8433, 8487), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 3], [4, 5, 6, 7]]'], {'dtype': 'float'}), '([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=float)\n', (8444, 8487), False, 'import numpy\n'), ((8529, 8582), 'numpy.array', 'numpy.array', (['[[2, 4, 6, numpy.nan], [-1, -3, -5, -7]]'], {}), '([[2, 4, 6, numpy.nan], [-1, -3, -5, -7]])\n', (8540, 8582), False, 'import numpy\n'), ((8618, 8694), 'numpy.stack', 'numpy.stack', (['(FIRST_PREDICTOR_MATRIX_2D, FIRST_PREDICTOR_MATRIX_2D)'], {'axis': '(-1)'}), '((FIRST_PREDICTOR_MATRIX_2D, FIRST_PREDICTOR_MATRIX_2D), axis=-1)\n', (8629, 8694), False, 'import numpy\n'), ((8729, 8794), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D), axis=0)\n', (8740, 8794), False, 'import numpy\n'), ((8825, 8903), 'numpy.stack', 'numpy.stack', (['(SECOND_PREDICTOR_MATRIX_2D, SECOND_PREDICTOR_MATRIX_2D)'], {'axis': '(-1)'}), '((SECOND_PREDICTOR_MATRIX_2D, SECOND_PREDICTOR_MATRIX_2D), axis=-1)\n', (8836, 8903), False, 'import numpy\n'), ((8933, 9000), 'numpy.stack', 'numpy.stack', (['(THIS_SECOND_MATRIX_3D, THIS_SECOND_MATRIX_3D)'], {'axis': '(0)'}), '((THIS_SECOND_MATRIX_3D, THIS_SECOND_MATRIX_3D), axis=0)\n', (8944, 9000), False, 'import numpy\n'), ((9035, 9108), 'numpy.stack', 'numpy.stack', (['(PREDICTOR_MATRIX_4D_DENORM, THIS_SECOND_MATRIX_4D)'], {'axis': '(-2)'}), '((PREDICTOR_MATRIX_4D_DENORM, THIS_SECOND_MATRIX_4D), axis=-2)\n', (9046, 9108), False, 'import numpy\n'), ((9176, 9323), 'numpy.stack', 'numpy.stack', (['((FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN, (\n FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN)'], {'axis': '(-1)'}), '(((FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN, (\n FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN), axis=-1)\n', (9187, 9323), False, 'import numpy\n'), ((9363, 9428), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D), axis=0)\n', (9374, 9428), False, 'import numpy\n'), ((9498, 9645), 'numpy.stack', 'numpy.stack', (['((FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN, (\n FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN)'], {'axis': '(-1)'}), '(((FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN, (\n FIRST_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN), axis=-1)\n', (9509, 9645), False, 'import numpy\n'), ((9674, 9739), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D), axis=0)\n', (9685, 9739), False, 'import numpy\n'), ((9770, 9919), 'numpy.stack', 'numpy.stack', (['((SECOND_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN, (\n SECOND_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN)'], {'axis': '(-1)'}), '(((SECOND_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN, (\n SECOND_PREDICTOR_MATRIX_2D - THIS_MIN) / THIS_MAX_LESS_MIN), axis=-1)\n', (9781, 9919), False, 'import numpy\n'), ((9949, 10016), 'numpy.stack', 'numpy.stack', (['(THIS_SECOND_MATRIX_3D, THIS_SECOND_MATRIX_3D)'], {'axis': '(0)'}), '((THIS_SECOND_MATRIX_3D, THIS_SECOND_MATRIX_3D), axis=0)\n', (9960, 10016), False, 'import numpy\n'), ((10057, 10124), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX_4D, THIS_SECOND_MATRIX_4D)'], {'axis': '(-2)'}), '((THIS_FIRST_MATRIX_4D, THIS_SECOND_MATRIX_4D), axis=-2)\n', (10068, 10124), False, 'import numpy\n'), ((10246, 10283), 'numpy.mean', 'numpy.mean', (['FIRST_PREDICTOR_MATRIX_2D'], {}), '(FIRST_PREDICTOR_MATRIX_2D)\n', (10256, 10283), False, 'import numpy\n'), ((10297, 10341), 'numpy.std', 'numpy.std', (['FIRST_PREDICTOR_MATRIX_2D'], {'ddof': '(1)'}), '(FIRST_PREDICTOR_MATRIX_2D, ddof=1)\n', (10306, 10341), False, 'import numpy\n'), ((10366, 10501), 'numpy.stack', 'numpy.stack', (['((FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV, (\n FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV)'], {'axis': '(-1)'}), '(((FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV, (\n FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV), axis=-1)\n', (10377, 10501), False, 'import numpy\n'), ((10536, 10601), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D), axis=0)\n', (10547, 10601), False, 'import numpy\n'), ((10625, 10702), 'numpy.stack', 'numpy.stack', (['(FIRST_PREDICTOR_MATRIX_2D, SECOND_PREDICTOR_MATRIX_2D)'], {'axis': '(-1)'}), '((FIRST_PREDICTOR_MATRIX_2D, SECOND_PREDICTOR_MATRIX_2D), axis=-1)\n', (10636, 10702), False, 'import numpy\n'), ((10720, 10749), 'numpy.nanmean', 'numpy.nanmean', (['ALL_PREDICTORS'], {}), '(ALL_PREDICTORS)\n', (10733, 10749), False, 'import numpy\n'), ((10763, 10799), 'numpy.nanstd', 'numpy.nanstd', (['ALL_PREDICTORS'], {'ddof': '(1)'}), '(ALL_PREDICTORS, ddof=1)\n', (10775, 10799), False, 'import numpy\n'), ((10824, 10959), 'numpy.stack', 'numpy.stack', (['((FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV, (\n FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV)'], {'axis': '(-1)'}), '(((FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV, (\n FIRST_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV), axis=-1)\n', (10835, 10959), False, 'import numpy\n'), ((10988, 11053), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX_3D, THIS_FIRST_MATRIX_3D), axis=0)\n', (10999, 11053), False, 'import numpy\n'), ((11084, 11221), 'numpy.stack', 'numpy.stack', (['((SECOND_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV, (\n SECOND_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV)'], {'axis': '(-1)'}), '(((SECOND_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV, (\n SECOND_PREDICTOR_MATRIX_2D - THIS_MEAN) / THIS_STDEV), axis=-1)\n', (11095, 11221), False, 'import numpy\n'), ((11251, 11318), 'numpy.stack', 'numpy.stack', (['(THIS_SECOND_MATRIX_3D, THIS_SECOND_MATRIX_3D)'], {'axis': '(0)'}), '((THIS_SECOND_MATRIX_3D, THIS_SECOND_MATRIX_3D), axis=0)\n', (11262, 11318), False, 'import numpy\n'), ((11354, 11421), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX_4D, THIS_SECOND_MATRIX_4D)'], {'axis': '(-2)'}), '((THIS_FIRST_MATRIX_4D, THIS_SECOND_MATRIX_4D), axis=-2)\n', (11365, 11421), False, 'import numpy\n'), ((12155, 12192), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['THIS_DICT'], {}), '(THIS_DICT)\n', (12181, 12192), False, 'import pandas\n'), ((12838, 12875), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['THIS_DICT'], {}), '(THIS_DICT)\n', (12864, 12875), False, 'import pandas\n'), ((12897, 12985), 'pandas.concat', 'pandas.concat', (['[FRONTAL_GRID_TABLE1, FRONTAL_GRID_TABLE2]'], {'axis': '(0)', 'ignore_index': '(True)'}), '([FRONTAL_GRID_TABLE1, FRONTAL_GRID_TABLE2], axis=0,\n ignore_index=True)\n', (12910, 12985), False, 'import pandas\n'), ((13008, 13186), 'numpy.array', 'numpy.array', (['[[0, 1, 1, 0, 0, 0, 0, 1], [0, 2, 1, 1, 1, 1, 1, 1], [0, 2, 0, 0, 0, 0, 0, \n 0], [0, 2, 0, 0, 0, 0, 0, 0], [2, 2, 0, 0, 0, 0, 0, 0], [2, 0, 0, 0, 0,\n 0, 0, 0]]'], {}), '([[0, 1, 1, 0, 0, 0, 0, 1], [0, 2, 1, 1, 1, 1, 1, 1], [0, 2, 0, \n 0, 0, 0, 0, 0], [0, 2, 0, 0, 0, 0, 0, 0], [2, 2, 0, 0, 0, 0, 0, 0], [2,\n 0, 0, 0, 0, 0, 0, 0]])\n', (13019, 13186), False, 'import numpy\n'), ((13365, 13543), 'numpy.array', 'numpy.array', (['[[0, 0, 2, 2, 0, 0, 0, 0], [0, 2, 2, 0, 1, 1, 0, 0], [2, 2, 0, 0, 0, 1, 1, \n 0], [2, 2, 0, 0, 0, 0, 1, 1], [2, 2, 0, 0, 0, 0, 0, 0], [0, 2, 0, 0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 2, 2, 0, 0, 0, 0], [0, 2, 2, 0, 1, 1, 0, 0], [2, 2, 0, \n 0, 0, 1, 1, 0], [2, 2, 0, 0, 0, 0, 1, 1], [2, 2, 0, 0, 0, 0, 0, 0], [0,\n 2, 0, 0, 0, 0, 0, 0]])\n', (13376, 13543), False, 'import numpy\n'), ((13901, 14079), 'numpy.array', 'numpy.array', (['[[0, 1, 1, 0, 0, 0, 0, 1], [0, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0, 0, \n 0], [0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0,\n 0, 0, 0]]'], {}), '([[0, 1, 1, 0, 0, 0, 0, 1], [0, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, \n 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0], [1,\n 0, 0, 0, 0, 0, 0, 0]])\n', (13912, 14079), False, 'import numpy\n'), ((14258, 14436), 'numpy.array', 'numpy.array', (['[[0, 0, 1, 1, 0, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0], [1, 1, 0, 0, 0, 1, 1, \n 0], [1, 1, 0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 1, 1, 0, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0], [1, 1, 0, \n 0, 0, 1, 1, 0], [1, 1, 0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0, 0, 0], [0,\n 1, 0, 0, 0, 0, 0, 0]])\n', (14269, 14436), False, 'import numpy\n'), ((14851, 14874), 'numpy.array', 'numpy.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (14862, 14874), False, 'import numpy\n'), ((14890, 15079), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, \n 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0,\n 0, 0, 0]]'], {'dtype': 'int'}), '([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, \n 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0], [0,\n 0, 0, 0, 0, 0, 0, 0]], dtype=int)\n', (14901, 15079), False, 'import numpy\n'), ((15237, 15340), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4]'], {'dtype': 'int'}), '([0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4,\n 4, 4, 4, 4], dtype=int)\n', (15248, 15340), False, 'import numpy\n'), ((15379, 15482), 'numpy.array', 'numpy.array', (['[0, 3, 4, 5, 6, 0, 0, 2, 3, 4, 5, 6, 7, 0, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6]'], {'dtype': 'int'}), '([0, 3, 4, 5, 6, 0, 0, 2, 3, 4, 5, 6, 7, 0, 2, 3, 4, 5, 6, 7, 2,\n 3, 4, 5, 6], dtype=int)\n', (15390, 15482), False, 'import numpy\n'), ((15519, 15588), 'numpy.array', 'numpy.array', (['[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 4, 5]'], {'dtype': 'int'}), '([0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 4, 5], dtype=int)\n', (15530, 15588), False, 'import numpy\n'), ((15627, 15696), 'numpy.array', 'numpy.array', (['[1, 2, 7, 1, 2, 3, 4, 5, 6, 7, 1, 1, 0, 1, 0]'], {'dtype': 'int'}), '([1, 2, 7, 1, 2, 3, 4, 5, 6, 7, 1, 1, 0, 1, 0], dtype=int)\n', (15638, 15696), False, 'import numpy\n'), ((15991, 16060), 'numpy.array', 'numpy.array', (['[2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4]'], {'dtype': 'int'}), '([2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4], dtype=int)\n', (16002, 16060), False, 'import numpy\n'), ((16101, 16170), 'numpy.array', 'numpy.array', (['[2, 3, 4, 5, 6, 2, 3, 4, 5, 6, 2, 3, 4, 5, 6]'], {'dtype': 'int'}), '([2, 3, 4, 5, 6, 2, 3, 4, 5, 6, 2, 3, 4, 5, 6], dtype=int)\n', (16112, 16170), False, 'import numpy\n'), ((16209, 16260), 'numpy.array', 'numpy.array', (['[1, 1, 1, 1, 1, 1, 2, 3, 4]'], {'dtype': 'int'}), '([1, 1, 1, 1, 1, 1, 2, 3, 4], dtype=int)\n', (16220, 16260), False, 'import numpy\n'), ((16301, 16352), 'numpy.array', 'numpy.array', (['[1, 2, 3, 4, 5, 6, 1, 1, 1]'], {'dtype': 'int'}), '([1, 2, 3, 4, 5, 6, 1, 1, 1], dtype=int)\n', (16312, 16352), False, 'import numpy\n'), ((16658, 16684), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (16669, 16684), False, 'import numpy\n'), ((16718, 16744), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (16729, 16744), False, 'import numpy\n'), ((16776, 16830), 'numpy.array', 'numpy.array', (['[0, 0, 1, 1, 1, 1, 2, 2, 2, 2]'], {'dtype': 'int'}), '([0, 0, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)\n', (16787, 16830), False, 'import numpy\n'), ((16869, 16923), 'numpy.array', 'numpy.array', (['[2, 3, 1, 2, 4, 5, 0, 1, 5, 6]'], {'dtype': 'int'}), '([2, 3, 1, 2, 4, 5, 0, 1, 5, 6], dtype=int)\n', (16880, 16923), False, 'import numpy\n'), ((17218, 17254), 'numpy.array', 'numpy.array', (['[1, 1, 2, 2]'], {'dtype': 'int'}), '([1, 1, 2, 2], dtype=int)\n', (17229, 17254), False, 'import numpy\n'), ((17290, 17326), 'numpy.array', 'numpy.array', (['[3, 6, 2, 3]'], {'dtype': 'int'}), '([3, 6, 2, 3], dtype=int)\n', (17301, 17326), False, 'import numpy\n'), ((17360, 17414), 'numpy.array', 'numpy.array', (['[1, 1, 1, 1, 2, 2, 2, 3, 3, 4]'], {'dtype': 'int'}), '([1, 1, 1, 1, 2, 2, 2, 3, 3, 4], dtype=int)\n', (17371, 17414), False, 'import numpy\n'), ((17455, 17509), 'numpy.array', 'numpy.array', (['[1, 2, 4, 5, 1, 5, 6, 1, 6, 1]'], {'dtype': 'int'}), '([1, 2, 4, 5, 1, 5, 6, 1, 6, 1], dtype=int)\n', (17466, 17509), False, 'import numpy\n'), ((18418, 18446), 'numpy.array', 'numpy.array', (['[0.5, 0.2, 0.3]'], {}), '([0.5, 0.2, 0.3])\n', (18429, 18446), False, 'import numpy\n'), ((18478, 18581), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4]'], {'dtype': 'int'}), '([0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4,\n 4, 4, 4, 4], dtype=int)\n', (18489, 18581), False, 'import numpy\n'), ((18620, 18723), 'numpy.array', 'numpy.array', (['[0, 3, 4, 5, 6, 0, 0, 2, 3, 4, 5, 6, 7, 0, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6]'], {'dtype': 'int'}), '([0, 3, 4, 5, 6, 0, 0, 2, 3, 4, 5, 6, 7, 0, 2, 3, 4, 5, 6, 7, 2,\n 3, 4, 5, 6], dtype=int)\n', (18631, 18723), False, 'import numpy\n'), ((18760, 18786), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (18771, 18786), False, 'import numpy\n'), ((18820, 18846), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (18831, 18846), False, 'import numpy\n'), ((18880, 18931), 'numpy.array', 'numpy.array', (['[0, 0, 0, 1, 1, 1, 1, 1, 1]'], {'dtype': 'int'}), '([0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=int)\n', (18891, 18931), False, 'import numpy\n'), ((18972, 19023), 'numpy.array', 'numpy.array', (['[1, 2, 7, 2, 3, 4, 5, 6, 7]'], {'dtype': 'int'}), '([1, 2, 7, 2, 3, 4, 5, 6, 7], dtype=int)\n', (18983, 19023), False, 'import numpy\n'), ((19061, 19088), 'numpy.array', 'numpy.array', (['[1]'], {'dtype': 'int'}), '([1], dtype=int)\n', (19072, 19088), False, 'import numpy\n'), ((19124, 19151), 'numpy.array', 'numpy.array', (['[4]'], {'dtype': 'int'}), '([4], dtype=int)\n', (19135, 19151), False, 'import numpy\n'), ((19185, 19227), 'numpy.array', 'numpy.array', (['[1, 2, 3, 4, 4, 5]'], {'dtype': 'int'}), '([1, 2, 3, 4, 4, 5], dtype=int)\n', (19196, 19227), False, 'import numpy\n'), ((19263, 19305), 'numpy.array', 'numpy.array', (['[1, 1, 1, 0, 1, 0]'], {'dtype': 'int'}), '([1, 1, 1, 0, 1, 0], dtype=int)\n', (19274, 19305), False, 'import numpy\n'), ((19338, 19389), 'numpy.array', 'numpy.array', (['[0, 0, 1, 1, 2, 2, 3, 3, 4]'], {'dtype': 'int'}), '([0, 0, 1, 1, 2, 2, 3, 3, 4], dtype=int)\n', (19349, 19389), False, 'import numpy\n'), ((19430, 19481), 'numpy.array', 'numpy.array', (['[2, 3, 1, 2, 0, 1, 0, 1, 0]'], {'dtype': 'int'}), '([2, 3, 1, 2, 0, 1, 0, 1, 0], dtype=int)\n', (19441, 19481), False, 'import numpy\n'), ((20443, 20512), 'numpy.array', 'numpy.array', (['[2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4]'], {'dtype': 'int'}), '([2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4], dtype=int)\n', (20454, 20512), False, 'import numpy\n'), ((20553, 20622), 'numpy.array', 'numpy.array', (['[2, 3, 4, 5, 6, 2, 3, 4, 5, 6, 2, 3, 4, 5, 6]'], {'dtype': 'int'}), '([2, 3, 4, 5, 6, 2, 3, 4, 5, 6, 2, 3, 4, 5, 6], dtype=int)\n', (20564, 20622), False, 'import numpy\n'), ((20661, 20687), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (20672, 20687), False, 'import numpy\n'), ((20723, 20749), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (20734, 20749), False, 'import numpy\n'), ((20785, 20824), 'numpy.array', 'numpy.array', (['[1, 1, 1, 1, 1]'], {'dtype': 'int'}), '([1, 1, 1, 1, 1], dtype=int)\n', (20796, 20824), False, 'import numpy\n'), ((20862, 20901), 'numpy.array', 'numpy.array', (['[2, 3, 4, 5, 6]'], {'dtype': 'int'}), '([2, 3, 4, 5, 6], dtype=int)\n', (20873, 20901), False, 'import numpy\n'), ((20936, 20963), 'numpy.array', 'numpy.array', (['[1]'], {'dtype': 'int'}), '([1], dtype=int)\n', (20947, 20963), False, 'import numpy\n'), ((21001, 21028), 'numpy.array', 'numpy.array', (['[4]'], {'dtype': 'int'}), '([4], dtype=int)\n', (21012, 21028), False, 'import numpy\n'), ((21064, 21100), 'numpy.array', 'numpy.array', (['[1, 2, 3, 4]'], {'dtype': 'int'}), '([1, 2, 3, 4], dtype=int)\n', (21075, 21100), False, 'import numpy\n'), ((21138, 21174), 'numpy.array', 'numpy.array', (['[1, 1, 1, 1]'], {'dtype': 'int'}), '([1, 1, 1, 1], dtype=int)\n', (21149, 21174), False, 'import numpy\n'), ((21209, 21248), 'numpy.array', 'numpy.array', (['[1, 1, 2, 3, 4]'], {'dtype': 'int'}), '([1, 1, 2, 3, 4], dtype=int)\n', (21220, 21248), False, 'import numpy\n'), ((21286, 21325), 'numpy.array', 'numpy.array', (['[1, 2, 1, 1, 1]'], {'dtype': 'int'}), '([1, 2, 1, 1, 1], dtype=int)\n', (21297, 21325), False, 'import numpy\n'), ((22412, 22590), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 1, 1, 1, 1, 1, 1], [2, 2, 2, 1, 1, 1, 1, \n 1], [2, 2, 2, 0, 0, 0, 0, 0], [2, 2, 2, 0, 0, 0, 0, 0], [2, 2, 2, 0, 0,\n 0, 0, 0]]'], {}), '([[1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 1, 1, 1, 1, 1, 1], [2, 2, 2, \n 1, 1, 1, 1, 1], [2, 2, 2, 0, 0, 0, 0, 0], [2, 2, 2, 0, 0, 0, 0, 0], [2,\n 2, 2, 0, 0, 0, 0, 0]])\n', (22423, 22590), False, 'import numpy\n'), ((22769, 22947), 'numpy.array', 'numpy.array', (['[[2, 2, 2, 2, 2, 1, 1, 0], [2, 2, 2, 2, 1, 1, 1, 1], [2, 2, 2, 2, 1, 1, 1, \n 1], [2, 2, 2, 0, 1, 1, 1, 1], [2, 2, 2, 0, 0, 1, 1, 1], [2, 2, 2, 0, 0,\n 0, 0, 0]]'], {}), '([[2, 2, 2, 2, 2, 1, 1, 0], [2, 2, 2, 2, 1, 1, 1, 1], [2, 2, 2, \n 2, 1, 1, 1, 1], [2, 2, 2, 0, 1, 1, 1, 1], [2, 2, 2, 0, 0, 1, 1, 1], [2,\n 2, 2, 0, 0, 0, 0, 0]])\n', (22780, 22947), False, 'import numpy\n'), ((23247, 23425), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, \n 1], [1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0,\n 0, 0, 0]]'], {}), '([[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, \n 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [1,\n 1, 1, 0, 0, 0, 0, 0]])\n', (23258, 23425), False, 'import numpy\n'), ((23604, 23782), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, \n 1], [1, 1, 1, 0, 1, 1, 1, 1], [1, 1, 1, 0, 0, 1, 1, 1], [1, 1, 1, 0, 0,\n 0, 0, 0]]'], {}), '([[1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, \n 1, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1, 1, 1], [1, 1, 1, 0, 0, 1, 1, 1], [1,\n 1, 1, 0, 0, 0, 0, 0]])\n', (23615, 23782), False, 'import numpy\n'), ((24391, 24454), 'numpy.stack', 'numpy.stack', (['(FCN_INPUT_MATRIX_2D, FCN_INPUT_MATRIX_2D)'], {'axis': '(0)'}), '((FCN_INPUT_MATRIX_2D, FCN_INPUT_MATRIX_2D), axis=0)\n', (24402, 24454), False, 'import numpy\n'), ((24482, 24546), 'numpy.stack', 'numpy.stack', (['(FCN_INPUT_MATRIX_3D, FCN_INPUT_MATRIX_3D)'], {'axis': '(-1)'}), '((FCN_INPUT_MATRIX_3D, FCN_INPUT_MATRIX_3D), axis=-1)\n', (24493, 24546), False, 'import numpy\n'), ((24574, 24638), 'numpy.stack', 'numpy.stack', (['(FCN_INPUT_MATRIX_4D, FCN_INPUT_MATRIX_4D)'], {'axis': '(-2)'}), '((FCN_INPUT_MATRIX_4D, FCN_INPUT_MATRIX_4D), axis=-2)\n', (24585, 24638), False, 'import numpy\n'), ((24776, 24838), 'numpy.array', 'numpy.array', (['[[1, 3, 5, 7], [2, 4, 6, 8]]'], {'dtype': 'numpy.float32'}), '([[1, 3, 5, 7], [2, 4, 6, 8]], dtype=numpy.float32)\n', (24787, 24838), False, 'import numpy\n'), ((25006, 25074), 'numpy.stack', 'numpy.stack', (['(PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,)'], {'axis': '(0)'}), '((PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,), axis=0)\n', (25017, 25074), False, 'import numpy\n'), ((25127, 25295), 'numpy.stack', 'numpy.stack', (['(PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,\n PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,\n PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS)'], {'axis': '(-1)'}), '((PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,\n PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,\n PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS), axis=-1)\n', (25138, 25295), False, 'import numpy\n'), ((25329, 25396), 'numpy.array', 'numpy.array', (['[[1, 3, 5], [1, 3, 5], [2, 4, 6]]'], {'dtype': 'numpy.float32'}), '([[1, 3, 5], [1, 3, 5], [2, 4, 6]], dtype=numpy.float32)\n', (25340, 25396), False, 'import numpy\n'), ((25498, 25565), 'numpy.array', 'numpy.array', (['[[3, 5, 7], [3, 5, 7], [4, 6, 8]]'], {'dtype': 'numpy.float32'}), '([[3, 5, 7], [3, 5, 7], [4, 6, 8]], dtype=numpy.float32)\n', (25509, 25565), False, 'import numpy\n'), ((25667, 25734), 'numpy.array', 'numpy.array', (['[[1, 1, 3], [2, 2, 4], [2, 2, 4]]'], {'dtype': 'numpy.float32'}), '([[1, 1, 3], [2, 2, 4], [2, 2, 4]], dtype=numpy.float32)\n', (25678, 25734), False, 'import numpy\n'), ((25836, 25903), 'numpy.array', 'numpy.array', (['[[5, 7, 7], [6, 8, 8], [6, 8, 8]]'], {'dtype': 'numpy.float32'}), '([[5, 7, 7], [6, 8, 8], [6, 8, 8]], dtype=numpy.float32)\n', (25847, 25903), False, 'import numpy\n'), ((26025, 26077), 'numpy.array', 'numpy.array', (['[[0, 0, 1, 1], [2, 2, 0, 0]]'], {'dtype': 'int'}), '([[0, 0, 1, 1], [2, 2, 0, 0]], dtype=int)\n', (26036, 26077), False, 'import numpy\n'), ((26235, 26300), 'numpy.stack', 'numpy.stack', (['(TARGET_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,)'], {'axis': '(0)'}), '((TARGET_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS,), axis=0)\n', (26246, 26300), False, 'import numpy\n'), ((26639, 26760), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_R1_C3, DOWNSIZED_MATRIX_R1_C2, DOWNSIZED_MATRIX_R2_C4,\n DOWNSIZED_MATRIX_R2_C1)'], {'axis': '(0)'}), '((DOWNSIZED_MATRIX_R1_C3, DOWNSIZED_MATRIX_R1_C2,\n DOWNSIZED_MATRIX_R2_C4, DOWNSIZED_MATRIX_R2_C1), axis=0)\n', (26650, 26760), False, 'import numpy\n'), ((26804, 26945), 'numpy.stack', 'numpy.stack', (['(DOWNSIZED_MATRIX_AT_SELECTED_POINTS, DOWNSIZED_MATRIX_AT_SELECTED_POINTS,\n DOWNSIZED_MATRIX_AT_SELECTED_POINTS)'], {'axis': '(-1)'}), '((DOWNSIZED_MATRIX_AT_SELECTED_POINTS,\n DOWNSIZED_MATRIX_AT_SELECTED_POINTS,\n DOWNSIZED_MATRIX_AT_SELECTED_POINTS), axis=-1)\n', (26815, 26945), False, 'import numpy\n'), ((26989, 27025), 'numpy.array', 'numpy.array', (['[1, 0, 0, 2]'], {'dtype': 'int'}), '([1, 0, 0, 2], dtype=int)\n', (27000, 27025), False, 'import numpy\n'), ((27063, 27099), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0]'], {'dtype': 'int'}), '([0, 0, 0, 0], dtype=int)\n', (27074, 27099), False, 'import numpy\n'), ((27133, 27169), 'numpy.array', 'numpy.array', (['[0, 0, 1, 1]'], {'dtype': 'int'}), '([0, 0, 1, 1], dtype=int)\n', (27144, 27169), False, 'import numpy\n'), ((27206, 27242), 'numpy.array', 'numpy.array', (['[2, 1, 3, 0]'], {'dtype': 'int'}), '([2, 1, 3, 0], dtype=int)\n', (27217, 27242), False, 'import numpy\n'), ((11557, 11608), 'numpy.array', 'numpy.array', (['[0, 0, 0, 1, 1, 1, 1, 1, 1]'], {'dtype': 'int'}), '([0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=int)\n', (11568, 11608), False, 'import numpy\n'), ((11637, 11688), 'numpy.array', 'numpy.array', (['[1, 2, 7, 2, 3, 4, 5, 6, 7]'], {'dtype': 'int'}), '([1, 2, 7, 2, 3, 4, 5, 6, 7], dtype=int)\n', (11648, 11688), False, 'import numpy\n'), ((11714, 11756), 'numpy.array', 'numpy.array', (['[1, 2, 3, 4, 4, 5]'], {'dtype': 'int'}), '([1, 2, 3, 4, 4, 5], dtype=int)\n', (11725, 11756), False, 'import numpy\n'), ((11785, 11827), 'numpy.array', 'numpy.array', (['[1, 1, 1, 0, 1, 0]'], {'dtype': 'int'}), '([1, 1, 1, 0, 1, 0], dtype=int)\n', (11796, 11827), False, 'import numpy\n'), ((12218, 12260), 'numpy.array', 'numpy.array', (['[1, 1, 2, 2, 3, 3]'], {'dtype': 'int'}), '([1, 1, 2, 2, 3, 3], dtype=int)\n', (12229, 12260), False, 'import numpy\n'), ((12289, 12331), 'numpy.array', 'numpy.array', (['[4, 5, 5, 6, 6, 7]'], {'dtype': 'int'}), '([4, 5, 5, 6, 6, 7], dtype=int)\n', (12300, 12331), False, 'import numpy\n'), ((12362, 12419), 'numpy.array', 'numpy.array', (['[0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5]'], {'dtype': 'int'}), '([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=int)\n', (12373, 12419), False, 'import numpy\n'), ((12453, 12510), 'numpy.array', 'numpy.array', (['[2, 3, 1, 2, 0, 1, 0, 1, 0, 1, 1]'], {'dtype': 'int'}), '([2, 3, 1, 2, 0, 1, 0, 1, 0, 1, 1], dtype=int)\n', (12464, 12510), False, 'import numpy\n'), ((62705, 62720), 'unittest.main', 'unittest.main', ([], {}), '()\n', (62718, 62720), False, 'import unittest\n'), ((13736, 13796), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0)\n', (13747, 13796), False, 'import numpy\n'), ((14628, 14688), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0)\n', (14639, 14688), False, 'import numpy\n'), ((15731, 15808), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_ROWS_TIME1_NO_MASK, POSITIVE_ROWS_TIME1_NO_MASK)'], {}), '((NEGATIVE_ROWS_TIME1_NO_MASK, POSITIVE_ROWS_TIME1_NO_MASK))\n', (15748, 15808), False, 'import numpy\n'), ((15857, 15944), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_COLUMNS_TIME1_NO_MASK, POSITIVE_COLUMNS_TIME1_NO_MASK)'], {}), '((NEGATIVE_COLUMNS_TIME1_NO_MASK,\n POSITIVE_COLUMNS_TIME1_NO_MASK))\n', (15874, 15944), False, 'import numpy\n'), ((16389, 16474), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_ROWS_TIME1_WITH_MASK, POSITIVE_ROWS_TIME1_WITH_MASK)'], {}), '((NEGATIVE_ROWS_TIME1_WITH_MASK,\n POSITIVE_ROWS_TIME1_WITH_MASK))\n', (16406, 16474), False, 'import numpy\n'), ((16521, 16612), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_COLUMNS_TIME1_WITH_MASK, POSITIVE_COLUMNS_TIME1_WITH_MASK)'], {}), '((NEGATIVE_COLUMNS_TIME1_WITH_MASK,\n POSITIVE_COLUMNS_TIME1_WITH_MASK))\n', (16538, 16612), False, 'import numpy\n'), ((16958, 17035), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_ROWS_TIME2_NO_MASK, POSITIVE_ROWS_TIME2_NO_MASK)'], {}), '((NEGATIVE_ROWS_TIME2_NO_MASK, POSITIVE_ROWS_TIME2_NO_MASK))\n', (16975, 17035), False, 'import numpy\n'), ((17084, 17171), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_COLUMNS_TIME2_NO_MASK, POSITIVE_COLUMNS_TIME2_NO_MASK)'], {}), '((NEGATIVE_COLUMNS_TIME2_NO_MASK,\n POSITIVE_COLUMNS_TIME2_NO_MASK))\n', (17101, 17171), False, 'import numpy\n'), ((17546, 17631), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_ROWS_TIME2_WITH_MASK, POSITIVE_ROWS_TIME2_WITH_MASK)'], {}), '((NEGATIVE_ROWS_TIME2_WITH_MASK,\n POSITIVE_ROWS_TIME2_WITH_MASK))\n', (17563, 17631), False, 'import numpy\n'), ((17678, 17769), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_COLUMNS_TIME2_WITH_MASK, POSITIVE_COLUMNS_TIME2_WITH_MASK)'], {}), '((NEGATIVE_COLUMNS_TIME2_WITH_MASK,\n POSITIVE_COLUMNS_TIME2_WITH_MASK))\n', (17695, 17769), False, 'import numpy\n'), ((19516, 19630), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_ROWS_TIME1_NO_MASK, WARM_FRONT_ROWS_TIME1_NO_MASK,\n COLD_FRONT_ROWS_TIME1_NO_MASK)'], {}), '((NEGATIVE_ROWS_TIME1_NO_MASK,\n WARM_FRONT_ROWS_TIME1_NO_MASK, COLD_FRONT_ROWS_TIME1_NO_MASK))\n', (19533, 19630), False, 'import numpy\n'), ((19680, 19803), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_COLUMNS_TIME1_NO_MASK, WARM_FRONT_COLUMNS_TIME1_NO_MASK,\n COLD_FRONT_COLUMNS_TIME1_NO_MASK)'], {}), '((NEGATIVE_COLUMNS_TIME1_NO_MASK,\n WARM_FRONT_COLUMNS_TIME1_NO_MASK, COLD_FRONT_COLUMNS_TIME1_NO_MASK))\n', (19697, 19803), False, 'import numpy\n'), ((19851, 19965), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_ROWS_TIME2_NO_MASK, WARM_FRONT_ROWS_TIME2_NO_MASK,\n COLD_FRONT_ROWS_TIME2_NO_MASK)'], {}), '((NEGATIVE_ROWS_TIME2_NO_MASK,\n WARM_FRONT_ROWS_TIME2_NO_MASK, COLD_FRONT_ROWS_TIME2_NO_MASK))\n', (19868, 19965), False, 'import numpy\n'), ((20015, 20138), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_COLUMNS_TIME2_NO_MASK, WARM_FRONT_COLUMNS_TIME2_NO_MASK,\n COLD_FRONT_COLUMNS_TIME2_NO_MASK)'], {}), '((NEGATIVE_COLUMNS_TIME2_NO_MASK,\n WARM_FRONT_COLUMNS_TIME2_NO_MASK, COLD_FRONT_COLUMNS_TIME2_NO_MASK))\n', (20032, 20138), False, 'import numpy\n'), ((21357, 21477), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_ROWS_TIME1_WITH_MASK, WARM_FRONT_ROWS_TIME1_WITH_MASK,\n COLD_FRONT_ROWS_TIME1_WITH_MASK)'], {}), '((NEGATIVE_ROWS_TIME1_WITH_MASK,\n WARM_FRONT_ROWS_TIME1_WITH_MASK, COLD_FRONT_ROWS_TIME1_WITH_MASK))\n', (21374, 21477), False, 'import numpy\n'), ((21529, 21658), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_COLUMNS_TIME1_WITH_MASK, WARM_FRONT_COLUMNS_TIME1_WITH_MASK,\n COLD_FRONT_COLUMNS_TIME1_WITH_MASK)'], {}), '((NEGATIVE_COLUMNS_TIME1_WITH_MASK,\n WARM_FRONT_COLUMNS_TIME1_WITH_MASK, COLD_FRONT_COLUMNS_TIME1_WITH_MASK))\n', (21546, 21658), False, 'import numpy\n'), ((21708, 21828), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_ROWS_TIME2_WITH_MASK, WARM_FRONT_ROWS_TIME2_WITH_MASK,\n COLD_FRONT_ROWS_TIME2_WITH_MASK)'], {}), '((NEGATIVE_ROWS_TIME2_WITH_MASK,\n WARM_FRONT_ROWS_TIME2_WITH_MASK, COLD_FRONT_ROWS_TIME2_WITH_MASK))\n', (21725, 21828), False, 'import numpy\n'), ((21880, 22009), 'numpy.concatenate', 'numpy.concatenate', (['(NEGATIVE_COLUMNS_TIME2_WITH_MASK, WARM_FRONT_COLUMNS_TIME2_WITH_MASK,\n COLD_FRONT_COLUMNS_TIME2_WITH_MASK)'], {}), '((NEGATIVE_COLUMNS_TIME2_WITH_MASK,\n WARM_FRONT_COLUMNS_TIME2_WITH_MASK, COLD_FRONT_COLUMNS_TIME2_WITH_MASK))\n', (21897, 22009), False, 'import numpy\n'), ((23148, 23208), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0)\n', (23159, 23208), False, 'import numpy\n'), ((23982, 24042), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX)'], {'axis': '(0)'}), '((THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0)\n', (23993, 24042), False, 'import numpy\n'), ((26479, 26515), 'numpy.array', 'numpy.array', (['[0, 0, 1, 1]'], {'dtype': 'int'}), '([0, 0, 1, 1], dtype=int)\n', (26490, 26515), False, 'import numpy\n'), ((26560, 26596), 'numpy.array', 'numpy.array', (['[2, 1, 3, 0]'], {'dtype': 'int'}), '([2, 1, 3, 0], dtype=int)\n', (26571, 26596), False, 'import numpy\n'), ((29232, 29285), 'generalexam.machine_learning.machine_learning_utils._check_full_narr_matrix', 'ml_utils._check_full_narr_matrix', (['FULL_NARR_MATRIX_3D'], {}), '(FULL_NARR_MATRIX_3D)\n', (29264, 29285), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((29512, 29565), 'generalexam.machine_learning.machine_learning_utils._check_full_narr_matrix', 'ml_utils._check_full_narr_matrix', (['FULL_NARR_MATRIX_4D'], {}), '(FULL_NARR_MATRIX_4D)\n', (29544, 29565), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((29792, 29845), 'generalexam.machine_learning.machine_learning_utils._check_full_narr_matrix', 'ml_utils._check_full_narr_matrix', (['FULL_NARR_MATRIX_5D'], {}), '(FULL_NARR_MATRIX_5D)\n', (29824, 29845), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((31028, 31097), 'generalexam.machine_learning.machine_learning_utils._check_predictor_matrix', 'ml_utils._check_predictor_matrix', (['PREDICTOR_MATRIX_3D'], {'allow_nan': '(True)'}), '(PREDICTOR_MATRIX_3D, allow_nan=True)\n', (31060, 31097), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((31639, 31708), 'generalexam.machine_learning.machine_learning_utils._check_predictor_matrix', 'ml_utils._check_predictor_matrix', (['PREDICTOR_MATRIX_4D'], {'allow_nan': '(True)'}), '(PREDICTOR_MATRIX_4D, allow_nan=True)\n', (31671, 31708), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((31892, 31961), 'generalexam.machine_learning.machine_learning_utils._check_predictor_matrix', 'ml_utils._check_predictor_matrix', (['PREDICTOR_MATRIX_5D'], {'allow_nan': '(True)'}), '(PREDICTOR_MATRIX_5D, allow_nan=True)\n', (31924, 31961), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((32165, 32261), 'generalexam.machine_learning.machine_learning_utils._check_target_matrix', 'ml_utils._check_target_matrix', (['TARGET_VALUES_BINARY_1D'], {'assert_binary': '(True)', 'num_dimensions': '(1)'}), '(TARGET_VALUES_BINARY_1D, assert_binary=True,\n num_dimensions=1)\n', (32194, 32261), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((32837, 32933), 'generalexam.machine_learning.machine_learning_utils._check_target_matrix', 'ml_utils._check_target_matrix', (['TARGET_VALUES_BINARY_3D'], {'assert_binary': '(True)', 'num_dimensions': '(3)'}), '(TARGET_VALUES_BINARY_3D, assert_binary=True,\n num_dimensions=3)\n', (32866, 32933), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((33831, 33928), 'generalexam.machine_learning.machine_learning_utils._check_target_matrix', 'ml_utils._check_target_matrix', (['TARGET_VALUES_BINARY_1D'], {'assert_binary': '(False)', 'num_dimensions': '(1)'}), '(TARGET_VALUES_BINARY_1D, assert_binary=False,\n num_dimensions=1)\n', (33860, 33928), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((34590, 34896), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_3D', 'center_row': 'DOWNSIZING_CENTER_ROW_TOP_LEFT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_TOP_LEFT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_3D, center_row=DOWNSIZING_CENTER_ROW_TOP_LEFT,\n center_column=DOWNSIZING_CENTER_COLUMN_TOP_LEFT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (34625, 34896), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((35327, 35633), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_4D', 'center_row': 'DOWNSIZING_CENTER_ROW_TOP_LEFT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_TOP_LEFT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_4D, center_row=DOWNSIZING_CENTER_ROW_TOP_LEFT,\n center_column=DOWNSIZING_CENTER_COLUMN_TOP_LEFT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (35362, 35633), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((36064, 36370), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_5D', 'center_row': 'DOWNSIZING_CENTER_ROW_TOP_LEFT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_TOP_LEFT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_5D, center_row=DOWNSIZING_CENTER_ROW_TOP_LEFT,\n center_column=DOWNSIZING_CENTER_COLUMN_TOP_LEFT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (36099, 36370), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((36807, 37119), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_3D', 'center_row': 'DOWNSIZING_CENTER_ROW_BOTTOM_LEFT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_BOTTOM_LEFT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_3D, center_row=DOWNSIZING_CENTER_ROW_BOTTOM_LEFT,\n center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_LEFT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (36842, 37119), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((37559, 37871), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_4D', 'center_row': 'DOWNSIZING_CENTER_ROW_BOTTOM_LEFT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_BOTTOM_LEFT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_4D, center_row=DOWNSIZING_CENTER_ROW_BOTTOM_LEFT,\n center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_LEFT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (37594, 37871), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((38311, 38623), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_5D', 'center_row': 'DOWNSIZING_CENTER_ROW_BOTTOM_LEFT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_BOTTOM_LEFT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_5D, center_row=DOWNSIZING_CENTER_ROW_BOTTOM_LEFT,\n center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_LEFT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (38346, 38623), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((39059, 39367), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_3D', 'center_row': 'DOWNSIZING_CENTER_ROW_TOP_RIGHT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_TOP_RIGHT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_3D, center_row=DOWNSIZING_CENTER_ROW_TOP_RIGHT,\n center_column=DOWNSIZING_CENTER_COLUMN_TOP_RIGHT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (39094, 39367), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((39801, 40109), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_4D', 'center_row': 'DOWNSIZING_CENTER_ROW_TOP_RIGHT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_TOP_RIGHT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_4D, center_row=DOWNSIZING_CENTER_ROW_TOP_RIGHT,\n center_column=DOWNSIZING_CENTER_COLUMN_TOP_RIGHT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (39836, 40109), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((40543, 40851), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_5D', 'center_row': 'DOWNSIZING_CENTER_ROW_TOP_RIGHT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_TOP_RIGHT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_5D, center_row=DOWNSIZING_CENTER_ROW_TOP_RIGHT,\n center_column=DOWNSIZING_CENTER_COLUMN_TOP_RIGHT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (40578, 40851), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((41291, 41605), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_3D', 'center_row': 'DOWNSIZING_CENTER_ROW_BOTTOM_RIGHT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_BOTTOM_RIGHT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_3D, center_row=DOWNSIZING_CENTER_ROW_BOTTOM_RIGHT,\n center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_RIGHT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (41326, 41605), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((42048, 42362), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_4D', 'center_row': 'DOWNSIZING_CENTER_ROW_BOTTOM_RIGHT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_BOTTOM_RIGHT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_4D, center_row=DOWNSIZING_CENTER_ROW_BOTTOM_RIGHT,\n center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_RIGHT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (42083, 42362), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((42805, 43119), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_5D', 'center_row': 'DOWNSIZING_CENTER_ROW_BOTTOM_RIGHT', 'center_column': 'DOWNSIZING_CENTER_COLUMN_BOTTOM_RIGHT', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_5D, center_row=DOWNSIZING_CENTER_ROW_BOTTOM_RIGHT,\n center_column=DOWNSIZING_CENTER_COLUMN_BOTTOM_RIGHT,\n num_rows_in_half_window=NUM_ROWS_IN_DOWNSIZED_HALF_GRID,\n num_columns_in_half_window=NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (42840, 43119), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((43550, 43854), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_3D', 'center_row': 'DOWNSIZING_CENTER_ROW_MIDDLE', 'center_column': 'DOWNSIZING_CENTER_COLUMN_MIDDLE', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_3D, center_row=DOWNSIZING_CENTER_ROW_MIDDLE,\n center_column=DOWNSIZING_CENTER_COLUMN_MIDDLE, num_rows_in_half_window=\n NUM_ROWS_IN_DOWNSIZED_HALF_GRID, num_columns_in_half_window=\n NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (43585, 43854), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((44277, 44581), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_4D', 'center_row': 'DOWNSIZING_CENTER_ROW_MIDDLE', 'center_column': 'DOWNSIZING_CENTER_COLUMN_MIDDLE', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_4D, center_row=DOWNSIZING_CENTER_ROW_MIDDLE,\n center_column=DOWNSIZING_CENTER_COLUMN_MIDDLE, num_rows_in_half_window=\n NUM_ROWS_IN_DOWNSIZED_HALF_GRID, num_columns_in_half_window=\n NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (44312, 44581), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((45004, 45308), 'generalexam.machine_learning.machine_learning_utils._downsize_predictor_images', 'ml_utils._downsize_predictor_images', ([], {'predictor_matrix': 'PRE_DOWNSIZED_MATRIX_5D', 'center_row': 'DOWNSIZING_CENTER_ROW_MIDDLE', 'center_column': 'DOWNSIZING_CENTER_COLUMN_MIDDLE', 'num_rows_in_half_window': 'NUM_ROWS_IN_DOWNSIZED_HALF_GRID', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID'}), '(predictor_matrix=\n PRE_DOWNSIZED_MATRIX_5D, center_row=DOWNSIZING_CENTER_ROW_MIDDLE,\n center_column=DOWNSIZING_CENTER_COLUMN_MIDDLE, num_rows_in_half_window=\n NUM_ROWS_IN_DOWNSIZED_HALF_GRID, num_columns_in_half_window=\n NUM_COLUMNS_IN_DOWNSIZED_HALF_GRID)\n', (45039, 45308), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((45737, 45866), 'generalexam.machine_learning.machine_learning_utils._class_fractions_to_num_points', 'ml_utils._class_fractions_to_num_points', ([], {'class_fractions': 'CLASS_FRACTIONS_BINARY', 'num_points_total': 'NUM_POINTS_AVAILABLE_LARGE'}), '(class_fractions=\n CLASS_FRACTIONS_BINARY, num_points_total=NUM_POINTS_AVAILABLE_LARGE)\n', (45776, 45866), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((46279, 46409), 'generalexam.machine_learning.machine_learning_utils._class_fractions_to_num_points', 'ml_utils._class_fractions_to_num_points', ([], {'class_fractions': 'CLASS_FRACTIONS_TERNARY', 'num_points_total': 'NUM_POINTS_AVAILABLE_LARGE'}), '(class_fractions=\n CLASS_FRACTIONS_TERNARY, num_points_total=NUM_POINTS_AVAILABLE_LARGE)\n', (46318, 46409), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((46822, 46951), 'generalexam.machine_learning.machine_learning_utils._class_fractions_to_num_points', 'ml_utils._class_fractions_to_num_points', ([], {'class_fractions': 'CLASS_FRACTIONS_BINARY', 'num_points_total': 'NUM_POINTS_AVAILABLE_SMALL'}), '(class_fractions=\n CLASS_FRACTIONS_BINARY, num_points_total=NUM_POINTS_AVAILABLE_SMALL)\n', (46861, 46951), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((47364, 47494), 'generalexam.machine_learning.machine_learning_utils._class_fractions_to_num_points', 'ml_utils._class_fractions_to_num_points', ([], {'class_fractions': 'CLASS_FRACTIONS_TERNARY', 'num_points_total': 'NUM_POINTS_AVAILABLE_SMALL'}), '(class_fractions=\n CLASS_FRACTIONS_TERNARY, num_points_total=NUM_POINTS_AVAILABLE_SMALL)\n', (47403, 47494), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((47839, 47893), 'generalexam.machine_learning.machine_learning_utils.get_class_weight_dict', 'ml_utils.get_class_weight_dict', (['CLASS_FRACTIONS_BINARY'], {}), '(CLASS_FRACTIONS_BINARY)\n', (47869, 47893), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((48491, 48546), 'generalexam.machine_learning.machine_learning_utils.get_class_weight_dict', 'ml_utils.get_class_weight_dict', (['CLASS_FRACTIONS_TERNARY'], {}), '(CLASS_FRACTIONS_TERNARY)\n', (48521, 48546), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((49211, 49401), 'generalexam.machine_learning.machine_learning_utils.normalize_predictors', 'ml_utils.normalize_predictors', ([], {'predictor_matrix': '(PREDICTOR_MATRIX_4D_DENORM + 0.0)', 'normalization_type_string': 'ml_utils.MINMAX_STRING', 'percentile_offset': 'PRCTILE_OFFSET_FOR_NORMALIZATION'}), '(predictor_matrix=PREDICTOR_MATRIX_4D_DENORM +\n 0.0, normalization_type_string=ml_utils.MINMAX_STRING,\n percentile_offset=PRCTILE_OFFSET_FOR_NORMALIZATION)\n', (49240, 49401), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((49867, 50057), 'generalexam.machine_learning.machine_learning_utils.normalize_predictors', 'ml_utils.normalize_predictors', ([], {'predictor_matrix': '(PREDICTOR_MATRIX_5D_DENORM + 0.0)', 'normalization_type_string': 'ml_utils.MINMAX_STRING', 'percentile_offset': 'PRCTILE_OFFSET_FOR_NORMALIZATION'}), '(predictor_matrix=PREDICTOR_MATRIX_5D_DENORM +\n 0.0, normalization_type_string=ml_utils.MINMAX_STRING,\n percentile_offset=PRCTILE_OFFSET_FOR_NORMALIZATION)\n', (49896, 50057), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((50517, 50652), 'generalexam.machine_learning.machine_learning_utils.normalize_predictors', 'ml_utils.normalize_predictors', ([], {'predictor_matrix': '(PREDICTOR_MATRIX_4D_DENORM + 0.0)', 'normalization_type_string': 'ml_utils.Z_SCORE_STRING'}), '(predictor_matrix=PREDICTOR_MATRIX_4D_DENORM +\n 0.0, normalization_type_string=ml_utils.Z_SCORE_STRING)\n', (50546, 50652), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((51100, 51235), 'generalexam.machine_learning.machine_learning_utils.normalize_predictors', 'ml_utils.normalize_predictors', ([], {'predictor_matrix': '(PREDICTOR_MATRIX_5D_DENORM + 0.0)', 'normalization_type_string': 'ml_utils.Z_SCORE_STRING'}), '(predictor_matrix=PREDICTOR_MATRIX_5D_DENORM +\n 0.0, normalization_type_string=ml_utils.Z_SCORE_STRING)\n', (51129, 51235), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((51727, 51917), 'generalexam.machine_learning.machine_learning_utils.normalize_predictors', 'ml_utils.normalize_predictors', ([], {'predictor_matrix': '(PREDICTOR_MATRIX_4D_DENORM + 0.0)', 'normalization_type_string': 'ml_utils.MINMAX_STRING', 'percentile_offset': 'PRCTILE_OFFSET_FOR_NORMALIZATION'}), '(predictor_matrix=PREDICTOR_MATRIX_4D_DENORM +\n 0.0, normalization_type_string=ml_utils.MINMAX_STRING,\n percentile_offset=PRCTILE_OFFSET_FOR_NORMALIZATION)\n', (51756, 51917), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((52001, 52120), 'generalexam.machine_learning.machine_learning_utils.denormalize_predictors', 'ml_utils.denormalize_predictors', ([], {'predictor_matrix': 'this_predictor_matrix', 'normalization_dict': 'this_normalization_dict'}), '(predictor_matrix=this_predictor_matrix,\n normalization_dict=this_normalization_dict)\n', (52032, 52120), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((52613, 52803), 'generalexam.machine_learning.machine_learning_utils.normalize_predictors', 'ml_utils.normalize_predictors', ([], {'predictor_matrix': '(PREDICTOR_MATRIX_5D_DENORM + 0.0)', 'normalization_type_string': 'ml_utils.MINMAX_STRING', 'percentile_offset': 'PRCTILE_OFFSET_FOR_NORMALIZATION'}), '(predictor_matrix=PREDICTOR_MATRIX_5D_DENORM +\n 0.0, normalization_type_string=ml_utils.MINMAX_STRING,\n percentile_offset=PRCTILE_OFFSET_FOR_NORMALIZATION)\n', (52642, 52803), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((52887, 53006), 'generalexam.machine_learning.machine_learning_utils.denormalize_predictors', 'ml_utils.denormalize_predictors', ([], {'predictor_matrix': 'this_predictor_matrix', 'normalization_dict': 'this_normalization_dict'}), '(predictor_matrix=this_predictor_matrix,\n normalization_dict=this_normalization_dict)\n', (52918, 53006), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((53494, 53629), 'generalexam.machine_learning.machine_learning_utils.normalize_predictors', 'ml_utils.normalize_predictors', ([], {'predictor_matrix': '(PREDICTOR_MATRIX_4D_DENORM + 0.0)', 'normalization_type_string': 'ml_utils.Z_SCORE_STRING'}), '(predictor_matrix=PREDICTOR_MATRIX_4D_DENORM +\n 0.0, normalization_type_string=ml_utils.Z_SCORE_STRING)\n', (53523, 53629), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((53701, 53820), 'generalexam.machine_learning.machine_learning_utils.denormalize_predictors', 'ml_utils.denormalize_predictors', ([], {'predictor_matrix': 'this_predictor_matrix', 'normalization_dict': 'this_normalization_dict'}), '(predictor_matrix=this_predictor_matrix,\n normalization_dict=this_normalization_dict)\n', (53732, 53820), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((54308, 54443), 'generalexam.machine_learning.machine_learning_utils.normalize_predictors', 'ml_utils.normalize_predictors', ([], {'predictor_matrix': '(PREDICTOR_MATRIX_5D_DENORM + 0.0)', 'normalization_type_string': 'ml_utils.Z_SCORE_STRING'}), '(predictor_matrix=PREDICTOR_MATRIX_5D_DENORM +\n 0.0, normalization_type_string=ml_utils.Z_SCORE_STRING)\n', (54337, 54443), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((54515, 54634), 'generalexam.machine_learning.machine_learning_utils.denormalize_predictors', 'ml_utils.denormalize_predictors', ([], {'predictor_matrix': 'this_predictor_matrix', 'normalization_dict': 'this_normalization_dict'}), '(predictor_matrix=this_predictor_matrix,\n normalization_dict=this_normalization_dict)\n', (54546, 54634), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((55034, 55248), 'generalexam.machine_learning.machine_learning_utils.sample_target_points', 'ml_utils.sample_target_points', ([], {'target_matrix': 'FRONTAL_GRID_MATRIX_BINARY', 'class_fractions': 'CLASS_FRACTIONS_FOR_BINARY_SAMPLING', 'num_points_to_sample': 'NUM_POINTS_TO_SAMPLE', 'mask_matrix': 'None', 'test_mode': '(True)'}), '(target_matrix=FRONTAL_GRID_MATRIX_BINARY,\n class_fractions=CLASS_FRACTIONS_FOR_BINARY_SAMPLING,\n num_points_to_sample=NUM_POINTS_TO_SAMPLE, mask_matrix=None, test_mode=True\n )\n', (55063, 55248), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((55632, 55852), 'generalexam.machine_learning.machine_learning_utils.sample_target_points', 'ml_utils.sample_target_points', ([], {'target_matrix': 'FRONTAL_GRID_MATRIX_BINARY', 'class_fractions': 'CLASS_FRACTIONS_FOR_BINARY_SAMPLING', 'num_points_to_sample': 'NUM_POINTS_TO_SAMPLE', 'mask_matrix': 'MASK_MATRIX', 'test_mode': '(True)'}), '(target_matrix=FRONTAL_GRID_MATRIX_BINARY,\n class_fractions=CLASS_FRACTIONS_FOR_BINARY_SAMPLING,\n num_points_to_sample=NUM_POINTS_TO_SAMPLE, mask_matrix=MASK_MATRIX,\n test_mode=True)\n', (55661, 55852), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((56238, 56431), 'generalexam.machine_learning.machine_learning_utils.sample_target_points', 'ml_utils.sample_target_points', ([], {'target_matrix': 'FRONTAL_GRID_MATRIX_TERNARY', 'class_fractions': 'CLASS_FRACTIONS_FOR_TERNARY_SAMPLING', 'num_points_to_sample': 'NUM_POINTS_TO_SAMPLE', 'test_mode': '(True)'}), '(target_matrix=FRONTAL_GRID_MATRIX_TERNARY,\n class_fractions=CLASS_FRACTIONS_FOR_TERNARY_SAMPLING,\n num_points_to_sample=NUM_POINTS_TO_SAMPLE, test_mode=True)\n', (56267, 56431), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((56810, 57032), 'generalexam.machine_learning.machine_learning_utils.sample_target_points', 'ml_utils.sample_target_points', ([], {'target_matrix': 'FRONTAL_GRID_MATRIX_TERNARY', 'class_fractions': 'CLASS_FRACTIONS_FOR_TERNARY_SAMPLING', 'num_points_to_sample': 'NUM_POINTS_TO_SAMPLE', 'mask_matrix': 'MASK_MATRIX', 'test_mode': '(True)'}), '(target_matrix=FRONTAL_GRID_MATRIX_TERNARY,\n class_fractions=CLASS_FRACTIONS_FOR_TERNARY_SAMPLING,\n num_points_to_sample=NUM_POINTS_TO_SAMPLE, mask_matrix=MASK_MATRIX,\n test_mode=True)\n', (56839, 57032), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((57342, 57489), 'generalexam.machine_learning.machine_learning_utils.front_table_to_images', 'ml_utils.front_table_to_images', ([], {'frontal_grid_table': 'FRONTAL_GRID_TABLE', 'num_rows_per_image': 'NUM_GRID_ROWS', 'num_columns_per_image': 'NUM_GRID_COLUMNS'}), '(frontal_grid_table=FRONTAL_GRID_TABLE,\n num_rows_per_image=NUM_GRID_ROWS, num_columns_per_image=NUM_GRID_COLUMNS)\n', (57372, 57489), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((57772, 57814), 'copy.deepcopy', 'copy.deepcopy', (['FRONTAL_GRID_MATRIX_TERNARY'], {}), '(FRONTAL_GRID_MATRIX_TERNARY)\n', (57785, 57814), False, 'import copy\n'), ((57844, 57893), 'generalexam.machine_learning.machine_learning_utils.binarize_front_images', 'ml_utils.binarize_front_images', (['this_input_matrix'], {}), '(this_input_matrix)\n', (57874, 57893), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((58148, 58189), 'copy.deepcopy', 'copy.deepcopy', (['FRONTAL_GRID_MATRIX_BINARY'], {}), '(FRONTAL_GRID_MATRIX_BINARY)\n', (58161, 58189), False, 'import copy\n'), ((58220, 58344), 'generalexam.machine_learning.machine_learning_utils.dilate_binary_target_images', 'ml_utils.dilate_binary_target_images', ([], {'target_matrix': 'this_input_matrix', 'dilation_distance_metres': 'DILATION_DISTANCE_METRES'}), '(target_matrix=this_input_matrix,\n dilation_distance_metres=DILATION_DISTANCE_METRES)\n', (58256, 58344), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((58631, 58673), 'copy.deepcopy', 'copy.deepcopy', (['FRONTAL_GRID_MATRIX_TERNARY'], {}), '(FRONTAL_GRID_MATRIX_TERNARY)\n', (58644, 58673), False, 'import copy\n'), ((58704, 58829), 'generalexam.machine_learning.machine_learning_utils.dilate_ternary_target_images', 'ml_utils.dilate_ternary_target_images', ([], {'target_matrix': 'this_input_matrix', 'dilation_distance_metres': 'DILATION_DISTANCE_METRES'}), '(target_matrix=this_input_matrix,\n dilation_distance_metres=DILATION_DISTANCE_METRES)\n', (58741, 58829), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((59105, 59171), 'generalexam.machine_learning.machine_learning_utils.stack_predictor_variables', 'ml_utils.stack_predictor_variables', (['TUPLE_OF_3D_PREDICTOR_MATRICES'], {}), '(TUPLE_OF_3D_PREDICTOR_MATRICES)\n', (59139, 59171), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((59425, 59482), 'generalexam.machine_learning.machine_learning_utils.stack_time_steps', 'ml_utils.stack_time_steps', (['TUPLE_OF_4D_PREDICTOR_MATRICES'], {}), '(TUPLE_OF_4D_PREDICTOR_MATRICES)\n', (59450, 59482), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((59807, 59867), 'generalexam.machine_learning.machine_learning_utils.subset_narr_grid_for_fcn_input', 'ml_utils.subset_narr_grid_for_fcn_input', (['FULL_NARR_MATRIX_3D'], {}), '(FULL_NARR_MATRIX_3D)\n', (59846, 59867), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((60189, 60249), 'generalexam.machine_learning.machine_learning_utils.subset_narr_grid_for_fcn_input', 'ml_utils.subset_narr_grid_for_fcn_input', (['FULL_NARR_MATRIX_4D'], {}), '(FULL_NARR_MATRIX_4D)\n', (60228, 60249), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((60571, 60631), 'generalexam.machine_learning.machine_learning_utils.subset_narr_grid_for_fcn_input', 'ml_utils.subset_narr_grid_for_fcn_input', (['FULL_NARR_MATRIX_5D'], {}), '(FULL_NARR_MATRIX_5D)\n', (60610, 60631), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((60926, 60985), 'copy.deepcopy', 'copy.deepcopy', (['PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS'], {}), '(PREDICTOR_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS)\n', (60939, 60985), False, 'import copy\n'), ((61160, 61541), 'generalexam.machine_learning.machine_learning_utils.downsize_grids_around_selected_points', 'ml_utils.downsize_grids_around_selected_points', ([], {'predictor_matrix': 'this_full_predictor_matrix', 'target_matrix': 'TARGET_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS', 'num_rows_in_half_window': 'NUM_ROWS_IN_HALF_GRID_AROUND_SELECTED_PTS', 'num_columns_in_half_window': 'NUM_COLUMNS_IN_HALF_GRID_AROUND_SELECTED_PTS', 'target_point_dict': 'TARGET_POINT_DICT_FOR_DOWNSIZING', 'test_mode': '(True)'}), '(predictor_matrix=\n this_full_predictor_matrix, target_matrix=\n TARGET_MATRIX_TO_DOWNSIZE_AT_SELECTED_PTS, num_rows_in_half_window=\n NUM_ROWS_IN_HALF_GRID_AROUND_SELECTED_PTS, num_columns_in_half_window=\n NUM_COLUMNS_IN_HALF_GRID_AROUND_SELECTED_PTS, target_point_dict=\n TARGET_POINT_DICT_FOR_DOWNSIZING, test_mode=True)\n', (61206, 61541), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((62338, 62569), 'generalexam.machine_learning.machine_learning_utils.find_gridded_prediction_file', 'ml_utils.find_gridded_prediction_file', ([], {'directory_name': 'PREDICTION_DIR_NAME', 'first_target_time_unix_sec': 'FIRST_PREDICTION_TIME_UNIX_SEC', 'last_target_time_unix_sec': 'LAST_PREDICTION_TIME_UNIX_SEC', 'raise_error_if_missing': '(False)'}), '(directory_name=PREDICTION_DIR_NAME,\n first_target_time_unix_sec=FIRST_PREDICTION_TIME_UNIX_SEC,\n last_target_time_unix_sec=LAST_PREDICTION_TIME_UNIX_SEC,\n raise_error_if_missing=False)\n', (62375, 62569), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((28952, 29005), 'generalexam.machine_learning.machine_learning_utils._check_full_narr_matrix', 'ml_utils._check_full_narr_matrix', (['FULL_NARR_MATRIX_2D'], {}), '(FULL_NARR_MATRIX_2D)\n', (28984, 29005), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((30477, 30530), 'generalexam.machine_learning.machine_learning_utils._check_predictor_matrix', 'ml_utils._check_predictor_matrix', (['PREDICTOR_MATRIX_1D'], {}), '(PREDICTOR_MATRIX_1D)\n', (30509, 30530), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((30761, 30814), 'generalexam.machine_learning.machine_learning_utils._check_predictor_matrix', 'ml_utils._check_predictor_matrix', (['PREDICTOR_MATRIX_2D'], {}), '(PREDICTOR_MATRIX_2D)\n', (30793, 30814), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((31368, 31438), 'generalexam.machine_learning.machine_learning_utils._check_predictor_matrix', 'ml_utils._check_predictor_matrix', (['PREDICTOR_MATRIX_3D'], {'allow_nan': '(False)'}), '(PREDICTOR_MATRIX_3D, allow_nan=False)\n', (31400, 31438), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((32521, 32618), 'generalexam.machine_learning.machine_learning_utils._check_target_matrix', 'ml_utils._check_target_matrix', (['TARGET_VALUES_BINARY_1D'], {'assert_binary': '(False)', 'num_dimensions': '(3)'}), '(TARGET_VALUES_BINARY_1D, assert_binary=False,\n num_dimensions=3)\n', (32550, 32618), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((33193, 33290), 'generalexam.machine_learning.machine_learning_utils._check_target_matrix', 'ml_utils._check_target_matrix', (['TARGET_VALUES_BINARY_3D'], {'assert_binary': '(False)', 'num_dimensions': '(1)'}), '(TARGET_VALUES_BINARY_3D, assert_binary=False,\n num_dimensions=1)\n', (33222, 33290), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((33527, 33602), 'generalexam.machine_learning.machine_learning_utils._check_target_matrix', 'ml_utils._check_target_matrix', (['TARGET_VALUES_BINARY_2D'], {'assert_binary': '(False)'}), '(TARGET_VALUES_BINARY_2D, assert_binary=False)\n', (33556, 33602), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((34206, 34303), 'generalexam.machine_learning.machine_learning_utils._check_target_matrix', 'ml_utils._check_target_matrix', (['TARGET_VALUES_TERNARY_1D'], {'assert_binary': '(True)', 'num_dimensions': '(1)'}), '(TARGET_VALUES_TERNARY_1D, assert_binary=True,\n num_dimensions=1)\n', (34235, 34303), True, 'from generalexam.machine_learning import machine_learning_utils as ml_utils\n'), ((34966, 35039), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_TOP_LEFT_3D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_TOP_LEFT_3D, atol=TOLERANCE)\n', (34980, 35039), False, 'import numpy\n'), ((35703, 35776), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_TOP_LEFT_4D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_TOP_LEFT_4D, atol=TOLERANCE)\n', (35717, 35776), False, 'import numpy\n'), ((36440, 36513), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_TOP_LEFT_5D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_TOP_LEFT_5D, atol=TOLERANCE)\n', (36454, 36513), False, 'import numpy\n'), ((37189, 37265), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_BOTTOM_LEFT_3D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_BOTTOM_LEFT_3D, atol=TOLERANCE)\n', (37203, 37265), False, 'import numpy\n'), ((37941, 38017), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_BOTTOM_LEFT_4D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_BOTTOM_LEFT_4D, atol=TOLERANCE)\n', (37955, 38017), False, 'import numpy\n'), ((38693, 38769), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_BOTTOM_LEFT_5D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_BOTTOM_LEFT_5D, atol=TOLERANCE)\n', (38707, 38769), False, 'import numpy\n'), ((39437, 39511), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_TOP_RIGHT_3D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_TOP_RIGHT_3D, atol=TOLERANCE)\n', (39451, 39511), False, 'import numpy\n'), ((40179, 40253), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_TOP_RIGHT_4D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_TOP_RIGHT_4D, atol=TOLERANCE)\n', (40193, 40253), False, 'import numpy\n'), ((40921, 40995), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_TOP_RIGHT_5D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_TOP_RIGHT_5D, atol=TOLERANCE)\n', (40935, 40995), False, 'import numpy\n'), ((41675, 41752), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_BOTTOM_RIGHT_3D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_BOTTOM_RIGHT_3D, atol=TOLERANCE)\n', (41689, 41752), False, 'import numpy\n'), ((42432, 42509), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_BOTTOM_RIGHT_4D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_BOTTOM_RIGHT_4D, atol=TOLERANCE)\n', (42446, 42509), False, 'import numpy\n'), ((43189, 43266), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_BOTTOM_RIGHT_5D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_BOTTOM_RIGHT_5D, atol=TOLERANCE)\n', (43203, 43266), False, 'import numpy\n'), ((43922, 43993), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_MIDDLE_3D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_MIDDLE_3D, atol=TOLERANCE)\n', (43936, 43993), False, 'import numpy\n'), ((44649, 44720), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_MIDDLE_4D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_MIDDLE_4D, atol=TOLERANCE)\n', (44663, 44720), False, 'import numpy\n'), ((45376, 45447), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'DOWNSIZED_MATRIX_MIDDLE_5D'], {'atol': 'TOLERANCE'}), '(this_matrix, DOWNSIZED_MATRIX_MIDDLE_5D, atol=TOLERANCE)\n', (45390, 45447), False, 'import numpy\n'), ((45911, 45988), 'numpy.array_equal', 'numpy.array_equal', (['this_num_points_by_class', 'NUM_POINTS_BY_CLASS_BINARY_LARGE'], {}), '(this_num_points_by_class, NUM_POINTS_BY_CLASS_BINARY_LARGE)\n', (45928, 45988), False, 'import numpy\n'), ((46454, 46532), 'numpy.array_equal', 'numpy.array_equal', (['this_num_points_by_class', 'NUM_POINTS_BY_CLASS_TERNARY_LARGE'], {}), '(this_num_points_by_class, NUM_POINTS_BY_CLASS_TERNARY_LARGE)\n', (46471, 46532), False, 'import numpy\n'), ((46996, 47073), 'numpy.array_equal', 'numpy.array_equal', (['this_num_points_by_class', 'NUM_POINTS_BY_CLASS_BINARY_SMALL'], {}), '(this_num_points_by_class, NUM_POINTS_BY_CLASS_BINARY_SMALL)\n', (47013, 47073), False, 'import numpy\n'), ((47539, 47617), 'numpy.array_equal', 'numpy.array_equal', (['this_num_points_by_class', 'NUM_POINTS_BY_CLASS_TERNARY_SMALL'], {}), '(this_num_points_by_class, NUM_POINTS_BY_CLASS_TERNARY_SMALL)\n', (47556, 47617), False, 'import numpy\n'), ((49455, 49562), 'numpy.allclose', 'numpy.allclose', (['this_predictor_matrix', 'PREDICTOR_MATRIX_4D_MINMAX_NORM'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(this_predictor_matrix, PREDICTOR_MATRIX_4D_MINMAX_NORM, atol\n =TOLERANCE, equal_nan=True)\n', (49469, 49562), False, 'import numpy\n'), ((50111, 50218), 'numpy.allclose', 'numpy.allclose', (['this_predictor_matrix', 'PREDICTOR_MATRIX_5D_MINMAX_NORM'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(this_predictor_matrix, PREDICTOR_MATRIX_5D_MINMAX_NORM, atol\n =TOLERANCE, equal_nan=True)\n', (50125, 50218), False, 'import numpy\n'), ((50698, 50800), 'numpy.allclose', 'numpy.allclose', (['this_predictor_matrix', 'PREDICTOR_MATRIX_4D_Z_NORM'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(this_predictor_matrix, PREDICTOR_MATRIX_4D_Z_NORM, atol=\n TOLERANCE, equal_nan=True)\n', (50712, 50800), False, 'import numpy\n'), ((51281, 51383), 'numpy.allclose', 'numpy.allclose', (['this_predictor_matrix', 'PREDICTOR_MATRIX_5D_Z_NORM'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(this_predictor_matrix, PREDICTOR_MATRIX_5D_Z_NORM, atol=\n TOLERANCE, equal_nan=True)\n', (51295, 51383), False, 'import numpy\n'), ((52167, 52269), 'numpy.allclose', 'numpy.allclose', (['this_predictor_matrix', 'PREDICTOR_MATRIX_4D_DENORM'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(this_predictor_matrix, PREDICTOR_MATRIX_4D_DENORM, atol=\n TOLERANCE, equal_nan=True)\n', (52181, 52269), False, 'import numpy\n'), ((53053, 53155), 'numpy.allclose', 'numpy.allclose', (['this_predictor_matrix', 'PREDICTOR_MATRIX_5D_DENORM'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(this_predictor_matrix, PREDICTOR_MATRIX_5D_DENORM, atol=\n TOLERANCE, equal_nan=True)\n', (53067, 53155), False, 'import numpy\n'), ((53867, 53969), 'numpy.allclose', 'numpy.allclose', (['this_predictor_matrix', 'PREDICTOR_MATRIX_4D_DENORM'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(this_predictor_matrix, PREDICTOR_MATRIX_4D_DENORM, atol=\n TOLERANCE, equal_nan=True)\n', (53881, 53969), False, 'import numpy\n'), ((54681, 54783), 'numpy.allclose', 'numpy.allclose', (['this_predictor_matrix', 'PREDICTOR_MATRIX_5D_DENORM'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(this_predictor_matrix, PREDICTOR_MATRIX_5D_DENORM, atol=\n TOLERANCE, equal_nan=True)\n', (54695, 54783), False, 'import numpy\n'), ((57548, 57620), 'numpy.array_equal', 'numpy.array_equal', (['this_frontal_grid_matrix', 'FRONTAL_GRID_MATRIX_TERNARY'], {}), '(this_frontal_grid_matrix, FRONTAL_GRID_MATRIX_TERNARY)\n', (57565, 57620), False, 'import numpy\n'), ((57919, 57984), 'numpy.array_equal', 'numpy.array_equal', (['this_binary_matrix', 'FRONTAL_GRID_MATRIX_BINARY'], {}), '(this_binary_matrix, FRONTAL_GRID_MATRIX_BINARY)\n', (57936, 57984), False, 'import numpy\n'), ((58391, 58465), 'numpy.array_equal', 'numpy.array_equal', (['this_dilated_matrix', 'FRONTAL_GRID_MATRIX_BINARY_DILATED'], {}), '(this_dilated_matrix, FRONTAL_GRID_MATRIX_BINARY_DILATED)\n', (58408, 58465), False, 'import numpy\n'), ((58876, 58951), 'numpy.array_equal', 'numpy.array_equal', (['this_dilated_matrix', 'FRONTAL_GRID_MATRIX_TERNARY_DILATED'], {}), '(this_dilated_matrix, FRONTAL_GRID_MATRIX_TERNARY_DILATED)\n', (58893, 58951), False, 'import numpy\n'), ((59209, 59294), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'PREDICTOR_MATRIX_4D'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(this_matrix, PREDICTOR_MATRIX_4D, atol=TOLERANCE, equal_nan=True\n )\n', (59223, 59294), False, 'import numpy\n'), ((59507, 59592), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'PREDICTOR_MATRIX_5D'], {'atol': 'TOLERANCE', 'equal_nan': '(True)'}), '(this_matrix, PREDICTOR_MATRIX_5D, atol=TOLERANCE, equal_nan=True\n )\n', (59521, 59592), False, 'import numpy\n'), ((59905, 59969), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'FCN_INPUT_MATRIX_3D'], {'atol': 'TOLERANCE'}), '(this_matrix, FCN_INPUT_MATRIX_3D, atol=TOLERANCE)\n', (59919, 59969), False, 'import numpy\n'), ((60287, 60351), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'FCN_INPUT_MATRIX_4D'], {'atol': 'TOLERANCE'}), '(this_matrix, FCN_INPUT_MATRIX_4D, atol=TOLERANCE)\n', (60301, 60351), False, 'import numpy\n'), ((60669, 60733), 'numpy.allclose', 'numpy.allclose', (['this_matrix', 'FCN_INPUT_MATRIX_5D'], {'atol': 'TOLERANCE'}), '(this_matrix, FCN_INPUT_MATRIX_5D, atol=TOLERANCE)\n', (60683, 60733), False, 'import numpy\n'), ((61622, 61722), 'numpy.allclose', 'numpy.allclose', (['this_small_predictor_matrix', 'DOWNSIZED_MATRIX_AT_SELECTED_POINTS'], {'atol': 'TOLERANCE'}), '(this_small_predictor_matrix,\n DOWNSIZED_MATRIX_AT_SELECTED_POINTS, atol=TOLERANCE)\n', (61636, 61722), False, 'import numpy\n'), ((61769, 61840), 'numpy.array_equal', 'numpy.array_equal', (['this_target_vector', 'TARGET_VECTOR_AT_SELECTED_POINTS'], {}), '(this_target_vector, TARGET_VECTOR_AT_SELECTED_POINTS)\n', (61786, 61840), False, 'import numpy\n'), ((61879, 61955), 'numpy.array_equal', 'numpy.array_equal', (['these_example_indices', 'EXAMPLE_INDICES_AT_SELECTED_POINTS'], {}), '(these_example_indices, EXAMPLE_INDICES_AT_SELECTED_POINTS)\n', (61896, 61955), False, 'import numpy\n'), ((61994, 62062), 'numpy.array_equal', 'numpy.array_equal', (['these_center_rows', 'CENTER_ROWS_AT_SELECTED_POINTS'], {}), '(these_center_rows, CENTER_ROWS_AT_SELECTED_POINTS)\n', (62011, 62062), False, 'import numpy\n'), ((62101, 62175), 'numpy.array_equal', 'numpy.array_equal', (['these_center_columns', 'CENTER_COLUMNS_AT_SELECTED_POINTS'], {}), '(these_center_columns, CENTER_COLUMNS_AT_SELECTED_POINTS)\n', (62118, 62175), False, 'import numpy\n'), ((28421, 28519), 'numpy.array_equal', 'numpy.array_equal', (['first_target_point_dict[this_key][i]', 'second_target_point_dict[this_key][i]'], {}), '(first_target_point_dict[this_key][i],\n second_target_point_dict[this_key][i])\n', (28438, 28519), False, 'import numpy\n'), ((30209, 30245), 'numpy.transpose', 'numpy.transpose', (['FULL_NARR_MATRIX_3D'], {}), '(FULL_NARR_MATRIX_3D)\n', (30224, 30245), False, 'import numpy\n'), ((48116, 48237), 'numpy.isclose', 'numpy.isclose', (['this_class_weight_dict[this_key]', 'CLASS_WEIGHT_DICT_BINARY[this_key]'], {'atol': 'TOLERANCE_FOR_CLASS_WEIGHT'}), '(this_class_weight_dict[this_key], CLASS_WEIGHT_DICT_BINARY[\n this_key], atol=TOLERANCE_FOR_CLASS_WEIGHT)\n', (48129, 48237), False, 'import numpy\n'), ((48770, 48892), 'numpy.isclose', 'numpy.isclose', (['this_class_weight_dict[this_key]', 'CLASS_WEIGHT_DICT_TERNARY[this_key]'], {'atol': 'TOLERANCE_FOR_CLASS_WEIGHT'}), '(this_class_weight_dict[this_key], CLASS_WEIGHT_DICT_TERNARY[\n this_key], atol=TOLERANCE_FOR_CLASS_WEIGHT)\n', (48783, 48892), False, 'import numpy\n')] |
# @Time : 2020/11/16
# @Author : <NAME>
# @Email : <EMAIL>
"""
textbox.data.dataset.dataset
##################################
"""
import numpy as np
import os
from logging import getLogger
from textbox.utils.enum_type import SpecialTokens
class Dataset(object):
def __init__(self, config):
self.config = config
self.dataset_path = config['data_path']
self.logger = getLogger()
self.padding_token = SpecialTokens.PAD
self.unknown_token = SpecialTokens.UNK
self.sos_token = SpecialTokens.SOS
self.eos_token = SpecialTokens.EOS
self.special_token_list = [self.padding_token, self.unknown_token, self.sos_token, self.eos_token]
if ('user_token_list' in config):
self.user_token_list = config['user_token_list']
self.user_token_idx = [4 + i for i, _ in enumerate(self.user_token_list)]
self.special_token_list += self.user_token_list
self.max_vocab_size = config['max_vocab_size']
self.max_seq_length = config['max_seq_length']
self.restored_exist = self.detect_restored(self.dataset_path)
if self.restored_exist:
self._load_restored()
else:
self._from_scratch()
def _from_scratch(self):
"""Load dataset from scratch.
Initialize attributes firstly, then load data from atomic files, pre-process the dataset lastly.
"""
self.logger.info('Loading data from scratch')
self._get_preset()
self._load_data(self.dataset_path)
self._data_processing()
self._dump_data(self.dataset_path)
def _load_restored(self):
"""Load dataset from restored.
Initialize attributes firstly, then load data from binary files.
"""
self.logger.info('Loading data from restored')
self._get_preset()
self.load_restored(self.dataset_path)
@staticmethod
def check_file_exist(filename):
if not os.path.isfile(filename):
return False
return True
def _get_preset(self):
"""Initialization useful inside attributes.
"""
raise NotImplementedError('Method [_get_preset] should be implemented.')
def _load_data(self, dataset_path):
r"""Load dataset with dataset split strategy.
Args:
dataset_path (str): path of dataset dir.
"""
raise NotImplementedError('Method [_load_data] should be implemented.')
def _dump_data(self, dataset_path):
r"""dump dataset with processed dataset.
Args:
dataset_path (str): path of dataset dir.
"""
raise NotImplementedError('Method [_dump_data] should be implemented.')
def _text2id(self, text_data, token2idx):
r"""transform text to id. but out of vocab word will still be saved as original word
input:
text_data: list -> list -> word, original text
token2idx: dict, map token to index
output:
text_idx_data: list -> list -> int, list of word index
"""
text_idx_data = []
for text in text_data:
text_idx = self._token2idx(text, token2idx)
text_idx_data.append(text_idx)
return text_idx_data
def _id2text(self, idx_data, idx2token):
r"""transform id to text.
input:
idx_data: list -> list -> int, list of word idx
idx2token: dict, map token to index
output:
text_data: list -> list -> word, list of word
"""
text_data = []
for text in idx_data:
text = self._idx2token(text, idx2token)
text_data.append(text)
return text_data
def _token2idx(self, inputs, token2idx):
if isinstance(inputs, list):
return [self._token2idx(x, token2idx) for x in inputs]
return token2idx.get(inputs, inputs)
def _idx2token(self, inputs, idx2token):
if isinstance(inputs, list):
return [self._idx2token(x, idx2token) for x in inputs]
return idx2token.get(inputs, inputs)
def _data_processing(self):
r"""Necessary processing steps for dataset.
"""
raise NotImplementedError('Method [_data_processing] should be implemented.')
def _build_vocab(self):
r"""Shuffle the order of data, and it will be called by :meth:`__iter__()` if self.shuffle is True.
"""
raise NotImplementedError('Method [_build_vocab] should be implemented.')
def shuffle(self):
r"""Shuffle the order of data, and it will be called by :meth:`__iter__()` if self.shuffle is True.
"""
raise NotImplementedError('Method [shuffle] should be implemented.')
@property
def vocab_size(self):
r"""The vocabulary size.
"""
return len(self.token2idx)
@property
def padding_token_id(self):
r"""The `int` index of the special token indicating the padding token.
"""
return self.token2idx[self.padding_token]
@property
def unknown_token_id(self):
r"""The `int` index of the special token indicating the unknown token.
"""
return self.token2idx[self.unknown_token]
@property
def sos_token_id(self):
r"""The `int` index of the special token indicating the start of sequence.
"""
return self.token2idx[self.sos_token]
@property
def eos_token_id(self):
r"""The `int` index of the special token indicating the end of sequence.
"""
return self.token2idx[self.eos_token]
@staticmethod
def _calcu_split_ids(tot, ratios):
r"""Given split ratios, and total number, calculate the number of each part after splitting.
Other than the first one, each part is rounded down.
Args:
tot (int): Total number.
ratios (list): List of split ratios. No need to be normalized.
Returns:
list: Number of each part after splitting.
"""
cnt = [int(ratios[i] * tot) for i in range(len(ratios))]
cnt[0] = tot - sum(cnt[1:])
split_ids = np.cumsum(cnt)[:-1]
return list(split_ids)
def detect_restored(self, dataset_path):
r"""Detect whether restored datasets exisit in dataset_path.
"""
raise NotImplementedError('Method [detect_restored] should be implemented.')
def split_by_ratio(self, ratios):
r"""Split dataset by ratios.
Args:
ratios (list): List of split ratios. No need to be normalized.
Returns:
list: List of : `list -> int`, whose interaction features has been splitted.
Note:
Other than the first one, each part is rounded down.
"""
pass
def build(self):
r"""Prepare splitted data elements for dataloader.
Returns:
list: List of dict : provide necessary elements for dataloader.
"""
raise NotImplementedError('Method [build] should be implemented.')
| [
"numpy.cumsum",
"os.path.isfile",
"logging.getLogger"
] | [((402, 413), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (411, 413), False, 'from logging import getLogger\n'), ((1985, 2009), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1999, 2009), False, 'import os\n'), ((6171, 6185), 'numpy.cumsum', 'np.cumsum', (['cnt'], {}), '(cnt)\n', (6180, 6185), True, 'import numpy as np\n')] |
"""
The goal here is to see if there is any relationship between the
average salinity of a specified volume and a filtered version of
the forcing by rivers or QSin.
Designed to run over three years, so that we capture the effect
of the increasing salinity from 2017 to 2019.
"""
import os; import sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import tef_fun
import flux_fun
import matplotlib.pyplot as plt
import numpy as np
import pickle
import pandas as pd
from datetime import datetime, timedelta
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gridname', type=str, default='cas6')
parser.add_argument('-t', '--tag', type=str, default='v3')
parser.add_argument('-x', '--ex_name', type=str, default='lo8b')
#parser.add_argument('-v', '--volume', type=str, default='Puget Sound')
args = parser.parse_args()
#which_vol = args.volume
# Get Ldir
Ldir = Lfun.Lstart(args.gridname, args.tag)
gtagex = args.gridname + '_' + args.tag + '_' + args.ex_name
indir00 = Ldir['LOo'] + 'tef/'
outdir = indir00 + 'misc_figs_cas6/'
def vlines(ax, lims):
ax.vlines([datetime(2018,1,1),datetime(2019,1,1)],lims[0],lims[1], alpha=.5)
ax.vlines([datetime(2017,7,1),datetime(2018,7,1),datetime(2019,7,1)],lims[0],
lims[1], alpha=.3,ls='--')
plt.close('all')
vol_list = ['Puget Sound', 'Puget Sound Inner', 'Salish Sea', 'Hood Canal']
#vol_list = ['Puget Sound Inner']
for which_vol in vol_list:
qr_dict = {} # save annual mean river flow
qe_dict = {} # save annual mean exchange flow
yy = 0
for year in [2017, 2018, 2019]:
year_str = str(year)
# select input/output location
run_name = gtagex+'_'+year_str+'.01.01_'+year_str+'.12.31'
indir0 = indir00 + run_name + '/'
# load low passed segment volume and net salt DataFrames
v_lp_df = pd.read_pickle(indir0 + 'flux/daily_segment_volume.p')
sv_lp_df = pd.read_pickle(indir0 + 'flux/daily_segment_net_salt.p')
# info specific to each volume
if which_vol == 'Salish Sea':
seg_list = list(v_lp_df.columns)
sect_sign_dict = {'jdf1':1, 'sog5':-1}
slim = (30,34); stick=range(slim[0],slim[1]+1,1)
qlim = (75,275); qtick=range(qlim[0],qlim[1]+25,25)
rlim = (0,20); rtick=range(rlim[0],rlim[1]+5,5)
blim = (-800,800); btick=range(blim[0],blim[1]+200,200)
elif which_vol == 'Puget Sound':
seg_list = flux_fun.ssA + flux_fun.ssM + flux_fun.ssT + flux_fun.ssS + flux_fun.ssW + flux_fun.ssH
sect_sign_dict = {'ai1':1, 'dp':1}
slim = (29,33); stick=range(slim[0],slim[1]+1,1)
qlim = (30,50); qtick=range(qlim[0],qlim[1]+10,10)
rlim = (0,3); rtick=range(rlim[0],rlim[1]+1,1)
blim = (-100,100); btick=range(blim[0],blim[1]+50,50)
elif which_vol == 'Puget Sound Inner':
seg_list = flux_fun.ssM + flux_fun.ssT + flux_fun.ssS + flux_fun.ssW
sect_sign_dict = {'ai4':1, 'dp':1}
slim = (27,32); stick=range(slim[0],slim[1]+1,1)
qlim = (15,35); qtick=range(qlim[0],qlim[1]+10,10)
rlim = (0,3); rtick=range(rlim[0],rlim[1]+1,1)
blim = (-100,100); btick=range(blim[0],blim[1]+50,50)
elif which_vol == 'Hood Canal':
seg_list = flux_fun.ssH
sect_sign_dict = {'hc1':1}
slim = (28,32); stick=range(slim[0],slim[1]+1,1)
qlim = (0,8); qtick=range(qlim[0],qlim[1]+2,2)
rlim = (0,.3); rtick=list(np.arange(rlim[0],rlim[1]+.1,.1))
blim = (-8,8); btick=range(blim[0],blim[1]+2,2)
sv_lp_df = sv_lp_df[seg_list]
v_lp_df = v_lp_df[seg_list]
river_list = []
for seg_name in seg_list:
seg = flux_fun.segs[seg_name]
river_list = river_list + seg['R']
riv_df = pd.read_pickle(Ldir['LOo'] + 'river/'
+ Ldir['gtag'] + '_'+year_str+'.01.01_'+year_str+'.12.31.p')
riv_df.index += timedelta(days=0.5)
riv_df = riv_df.loc[sv_lp_df.index, river_list]
tef_df_dict = {}
for sn in sect_sign_dict.keys():
in_sign = sect_sign_dict[sn]
tef_df_dict[sn] = flux_fun.get_fluxes(indir0, sn, in_sign=in_sign)
vol_df, salt_df, vol_rel_err, salt_rel_err, salt_rel_err_qe = flux_fun.get_budgets(
sv_lp_df, v_lp_df, riv_df, tef_df_dict, seg_list)
qr_dict[year] = vol_df['Qr'].mean()
qe_dict[year] = (vol_df['Qin'].mean() + vol_df['-Qout'].mean())/2
#print(qr_dict[year])
if yy == 0:
vol_df_all = vol_df.copy()
salt_df_all = salt_df.copy()
else:
vol_df_all = vol_df_all.append(vol_df)
salt_df_all = salt_df_all.append(salt_df)
yy += 1
# Fix a problem where .resample() would drop the Sin column
# because it was somhow not numeric
for cn in vol_df_all.columns:
vol_df_all[cn] = pd.to_numeric(vol_df_all[cn])
for cn in salt_df_all.columns:
salt_df_all[cn] = pd.to_numeric(salt_df_all[cn])
# NOTE: I think .to_numeric() operates on Series, so we only do one column at a time.
vol_df_all = vol_df_all.resample('M', loffset='-15d').mean()
salt_df_all = salt_df_all.resample('M', loffset='-15d').mean()
# rescale to be 1000 m3/s
for vn in ['QSin', '-QSout', 'Ftide', 'dSnet_dt', 'Error', 'Qe', 'Qnet', 'QeDS', '-QrSbar']:
salt_df_all[vn] = salt_df_all[vn]/1000
for vn in ['Qin', '-Qout', 'Qtide', 'Qr', 'V', 'dV_dt', 'Error']:
vol_df_all[vn] = vol_df_all[vn]/1000
# plotting
tx = .05
ty = .9
ty2 = .05
fs = 16
lw = 3
dt0 = datetime(2017,1,1)
dt1 = datetime(2020,1,1)
plt.rc('font', size=fs)
fig = plt.figure(figsize=(18,10))
ax = fig.add_subplot(221)
salt_df_all[['Smean', 'Sin','Sout']].plot(ax=ax, grid=False, color=['goldenrod','r','b'], linewidth=lw)
ax.legend(labels=[r'$\frac{1}{V}\int{S \ dV}$',r'$S_{in}$',r'$S_{out}$'], loc='lower right')
ax.text(tx, ty, '(a) ' + which_vol + ' Salinities $[g \ kg^{-1}]$', size=fs, transform=ax.transAxes,
bbox=dict(facecolor='w', edgecolor='None',alpha=.5), weight='bold')
ax.set_xticklabels([])
ax.set_xticklabels([], minor=True)
ax.set_xlim(dt0, dt1)
ax.set_ylim(slim)
ax.set_yticks(stick)
vlines(ax,slim)
ax.set_xticks([datetime(2017,1,1),datetime(2017,7,1),datetime(2018,1,1),
datetime(2018,7,1),datetime(2019,1,1),datetime(2019,7,1),datetime(2019,12,31)])
ax = fig.add_subplot(222)
vol_df_all[['Qin', '-Qout']].plot(ax=ax, grid=False, color=['r','b'], linewidth=lw)
ax.legend(labels=[r'$Q_{in}$',r'$-Q_{out}$'], loc='lower right')
ax.set_ylim(bottom=0)
ax.text(tx, ty2, r'(b) Exchange Flow $[1000\ m^{3}s^{-1}]$', size=fs, transform=ax.transAxes,
bbox=dict(facecolor='w', edgecolor='None',alpha=.5), weight='bold')
ax.set_xticklabels([])
ax.set_xticklabels([], minor=True)
ax.set_xlim(dt0, dt1)
ax.set_ylim(qlim)
ax.set_yticks(qtick)
vlines(ax,qlim)
ax.set_xticks([datetime(2017,1,1),datetime(2017,7,1),datetime(2018,1,1),
datetime(2018,7,1),datetime(2019,1,1),datetime(2019,7,1),datetime(2019,12,31)])
#add annual means
for year in [2017,2018,2019]:
ax.text(datetime(year,7,1), qlim[0] + (qlim[1]-qlim[0])*2.5/3,
'Mean:\n%0.1f $[10^{3} m^{3}s^{-1}]$' % (qe_dict[year]/1000),
ha='center', va='center',
color = 'purple', weight='bold')
ax = fig.add_subplot(224)
vol_df_all['Qr'].plot(ax=ax, grid=False, legend=False, color='c', linewidth=lw)
ax.set_ylim(bottom=0)
ax.text(tx, ty2, r'(d) Net River Flow $[1000 \ m^{3}s^{-1}]$', size=fs, transform=ax.transAxes,
bbox=dict(facecolor='w', edgecolor='None',alpha=.5), weight='bold')
ax.set_xlim(dt0, dt1)
ax.set_ylim(rlim)
ax.set_yticks(rtick)
vlines(ax,rlim)
ax.set_xticks([datetime(2017,1,1),datetime(2017,7,1),datetime(2018,1,1),
datetime(2018,7,1),datetime(2019,1,1),datetime(2019,7,1),datetime(2019,12,31)])
ax.set_xticklabels(['','2017','','2018','','2019',''], rotation=0,
fontdict={'horizontalalignment':'center'})
#add annual means
for year in [2017,2018,2019]:
ax.text(datetime(year,7,1), rlim[1]*2.5/3,
'Mean:\n' + str(int(qr_dict[year])) + ' $[m^{3}s^{-1}]$',
ha='center', va='center',
color = 'c', weight='bold')
ax.set_xlabel('Year')
ax = fig.add_subplot(223)
salt_df_all[['dSnet_dt','QeDS', '-QrSbar']].plot(ax=ax, grid=False,
color=['sandybrown','darkorchid','cornflowerblue'],
linewidth=lw)
ax.legend(labels=[r'$\frac{d}{dt}\int{S \ dV}$',r'$Q_e \Delta S$', r'$-Q_R \overline{S}$'], loc='upper right')
ax.text(tx, ty, r'(c) Salt Budget Terms $[1000 \ g \ kg^{-1} \ m^{3}s^{-1}]$',
size=fs, transform=ax.transAxes,
bbox=dict(facecolor='w', edgecolor='None',alpha=.5), weight='bold')
ax.set_xlim(dt0, dt1)
ax.set_ylim(blim)
ax.set_yticks(btick)
vlines(ax,blim)
ax.hlines(0,dt0,dt1)
ax.set_xticks([datetime(2017,1,1),datetime(2017,7,1),datetime(2018,1,1),
datetime(2018,7,1),datetime(2019,1,1),datetime(2019,7,1),datetime(2019,12,31)])
ax.set_xticklabels(['','2017','','2018','','2019',''], rotation=0,
fontdict={'horizontalalignment':'center'})
ax.set_xlabel('Year')
fig.tight_layout()
fig.savefig(outdir + 'Salt_3year_'+which_vol.replace(' ','_')+'.png')
plt.show()
plt.rcdefaults()
| [
"os.path.abspath",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"datetime.datetime",
"matplotlib.pyplot.rcdefaults",
"matplotlib.pyplot.figure",
"datetime.timedelta",
"Lfun.Lstart",
"matplotlib.pyplot.rc",
"flux_fun.get_fluxes",
"pandas.read_pickle",
"numpy... | [((545, 570), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (568, 570), False, 'import argparse\n'), ((904, 940), 'Lfun.Lstart', 'Lfun.Lstart', (['args.gridname', 'args.tag'], {}), '(args.gridname, args.tag)\n', (915, 940), False, 'import Lfun\n'), ((1298, 1314), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1307, 1314), True, 'import matplotlib.pyplot as plt\n'), ((9640, 9656), 'matplotlib.pyplot.rcdefaults', 'plt.rcdefaults', ([], {}), '()\n', (9654, 9656), True, 'import matplotlib.pyplot as plt\n'), ((320, 347), 'os.path.abspath', 'os.path.abspath', (['"""../alpha"""'], {}), "('../alpha')\n", (335, 347), False, 'import os\n'), ((5761, 5781), 'datetime.datetime', 'datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (5769, 5781), False, 'from datetime import datetime, timedelta\n'), ((5790, 5810), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (5798, 5810), False, 'from datetime import datetime, timedelta\n'), ((5814, 5837), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'fs'}), "('font', size=fs)\n", (5820, 5837), True, 'import matplotlib.pyplot as plt\n'), ((5848, 5876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (5858, 5876), True, 'import matplotlib.pyplot as plt\n'), ((9624, 9634), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9632, 9634), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1917), 'pandas.read_pickle', 'pd.read_pickle', (["(indir0 + 'flux/daily_segment_volume.p')"], {}), "(indir0 + 'flux/daily_segment_volume.p')\n", (1877, 1917), True, 'import pandas as pd\n'), ((1937, 1993), 'pandas.read_pickle', 'pd.read_pickle', (["(indir0 + 'flux/daily_segment_net_salt.p')"], {}), "(indir0 + 'flux/daily_segment_net_salt.p')\n", (1951, 1993), True, 'import pandas as pd\n'), ((3904, 4014), 'pandas.read_pickle', 'pd.read_pickle', (["(Ldir['LOo'] + 'river/' + Ldir['gtag'] + '_' + year_str + '.01.01_' +\n year_str + '.12.31.p')"], {}), "(Ldir['LOo'] + 'river/' + Ldir['gtag'] + '_' + year_str +\n '.01.01_' + year_str + '.12.31.p')\n", (3918, 4014), True, 'import pandas as pd\n'), ((4039, 4058), 'datetime.timedelta', 'timedelta', ([], {'days': '(0.5)'}), '(days=0.5)\n', (4048, 4058), False, 'from datetime import datetime, timedelta\n'), ((4377, 4447), 'flux_fun.get_budgets', 'flux_fun.get_budgets', (['sv_lp_df', 'v_lp_df', 'riv_df', 'tef_df_dict', 'seg_list'], {}), '(sv_lp_df, v_lp_df, riv_df, tef_df_dict, seg_list)\n', (4397, 4447), False, 'import flux_fun\n'), ((5039, 5068), 'pandas.to_numeric', 'pd.to_numeric', (['vol_df_all[cn]'], {}), '(vol_df_all[cn])\n', (5052, 5068), True, 'import pandas as pd\n'), ((5130, 5160), 'pandas.to_numeric', 'pd.to_numeric', (['salt_df_all[cn]'], {}), '(salt_df_all[cn])\n', (5143, 5160), True, 'import pandas as pd\n'), ((1109, 1129), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (1117, 1129), False, 'from datetime import datetime, timedelta\n'), ((1128, 1148), 'datetime.datetime', 'datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (1136, 1148), False, 'from datetime import datetime, timedelta\n'), ((1190, 1210), 'datetime.datetime', 'datetime', (['(2017)', '(7)', '(1)'], {}), '(2017, 7, 1)\n', (1198, 1210), False, 'from datetime import datetime, timedelta\n'), ((1209, 1229), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(1)'], {}), '(2018, 7, 1)\n', (1217, 1229), False, 'from datetime import datetime, timedelta\n'), ((1228, 1248), 'datetime.datetime', 'datetime', (['(2019)', '(7)', '(1)'], {}), '(2019, 7, 1)\n', (1236, 1248), False, 'from datetime import datetime, timedelta\n'), ((4257, 4305), 'flux_fun.get_fluxes', 'flux_fun.get_fluxes', (['indir0', 'sn'], {'in_sign': 'in_sign'}), '(indir0, sn, in_sign=in_sign)\n', (4276, 4305), False, 'import flux_fun\n'), ((6471, 6491), 'datetime.datetime', 'datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (6479, 6491), False, 'from datetime import datetime, timedelta\n'), ((6490, 6510), 'datetime.datetime', 'datetime', (['(2017)', '(7)', '(1)'], {}), '(2017, 7, 1)\n', (6498, 6510), False, 'from datetime import datetime, timedelta\n'), ((6509, 6529), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (6517, 6529), False, 'from datetime import datetime, timedelta\n'), ((6537, 6557), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(1)'], {}), '(2018, 7, 1)\n', (6545, 6557), False, 'from datetime import datetime, timedelta\n'), ((6556, 6576), 'datetime.datetime', 'datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (6564, 6576), False, 'from datetime import datetime, timedelta\n'), ((6575, 6595), 'datetime.datetime', 'datetime', (['(2019)', '(7)', '(1)'], {}), '(2019, 7, 1)\n', (6583, 6595), False, 'from datetime import datetime, timedelta\n'), ((6594, 6616), 'datetime.datetime', 'datetime', (['(2019)', '(12)', '(31)'], {}), '(2019, 12, 31)\n', (6602, 6616), False, 'from datetime import datetime, timedelta\n'), ((7183, 7203), 'datetime.datetime', 'datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (7191, 7203), False, 'from datetime import datetime, timedelta\n'), ((7202, 7222), 'datetime.datetime', 'datetime', (['(2017)', '(7)', '(1)'], {}), '(2017, 7, 1)\n', (7210, 7222), False, 'from datetime import datetime, timedelta\n'), ((7221, 7241), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (7229, 7241), False, 'from datetime import datetime, timedelta\n'), ((7249, 7269), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(1)'], {}), '(2018, 7, 1)\n', (7257, 7269), False, 'from datetime import datetime, timedelta\n'), ((7268, 7288), 'datetime.datetime', 'datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (7276, 7288), False, 'from datetime import datetime, timedelta\n'), ((7287, 7307), 'datetime.datetime', 'datetime', (['(2019)', '(7)', '(1)'], {}), '(2019, 7, 1)\n', (7295, 7307), False, 'from datetime import datetime, timedelta\n'), ((7306, 7328), 'datetime.datetime', 'datetime', (['(2019)', '(12)', '(31)'], {}), '(2019, 12, 31)\n', (7314, 7328), False, 'from datetime import datetime, timedelta\n'), ((7401, 7421), 'datetime.datetime', 'datetime', (['year', '(7)', '(1)'], {}), '(year, 7, 1)\n', (7409, 7421), False, 'from datetime import datetime, timedelta\n'), ((8042, 8062), 'datetime.datetime', 'datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (8050, 8062), False, 'from datetime import datetime, timedelta\n'), ((8061, 8081), 'datetime.datetime', 'datetime', (['(2017)', '(7)', '(1)'], {}), '(2017, 7, 1)\n', (8069, 8081), False, 'from datetime import datetime, timedelta\n'), ((8080, 8100), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (8088, 8100), False, 'from datetime import datetime, timedelta\n'), ((8108, 8128), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(1)'], {}), '(2018, 7, 1)\n', (8116, 8128), False, 'from datetime import datetime, timedelta\n'), ((8127, 8147), 'datetime.datetime', 'datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (8135, 8147), False, 'from datetime import datetime, timedelta\n'), ((8146, 8166), 'datetime.datetime', 'datetime', (['(2019)', '(7)', '(1)'], {}), '(2019, 7, 1)\n', (8154, 8166), False, 'from datetime import datetime, timedelta\n'), ((8165, 8187), 'datetime.datetime', 'datetime', (['(2019)', '(12)', '(31)'], {}), '(2019, 12, 31)\n', (8173, 8187), False, 'from datetime import datetime, timedelta\n'), ((8382, 8402), 'datetime.datetime', 'datetime', (['year', '(7)', '(1)'], {}), '(year, 7, 1)\n', (8390, 8402), False, 'from datetime import datetime, timedelta\n'), ((9228, 9248), 'datetime.datetime', 'datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (9236, 9248), False, 'from datetime import datetime, timedelta\n'), ((9247, 9267), 'datetime.datetime', 'datetime', (['(2017)', '(7)', '(1)'], {}), '(2017, 7, 1)\n', (9255, 9267), False, 'from datetime import datetime, timedelta\n'), ((9266, 9286), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (9274, 9286), False, 'from datetime import datetime, timedelta\n'), ((9294, 9314), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(1)'], {}), '(2018, 7, 1)\n', (9302, 9314), False, 'from datetime import datetime, timedelta\n'), ((9313, 9333), 'datetime.datetime', 'datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (9321, 9333), False, 'from datetime import datetime, timedelta\n'), ((9332, 9352), 'datetime.datetime', 'datetime', (['(2019)', '(7)', '(1)'], {}), '(2019, 7, 1)\n', (9340, 9352), False, 'from datetime import datetime, timedelta\n'), ((9351, 9373), 'datetime.datetime', 'datetime', (['(2019)', '(12)', '(31)'], {}), '(2019, 12, 31)\n', (9359, 9373), False, 'from datetime import datetime, timedelta\n'), ((3566, 3604), 'numpy.arange', 'np.arange', (['rlim[0]', '(rlim[1] + 0.1)', '(0.1)'], {}), '(rlim[0], rlim[1] + 0.1, 0.1)\n', (3575, 3604), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
class PolytopeCompressor(object):
def __init__(self, size, shape, c_dim=128, ks=128):
self.Ks = ks
self.size = size
self.shape = shape
self.dim = c_dim
self.code_dtype = np.uint8 if self.Ks <= 2 ** 7 else (np.uint16 if self.Ks <= 2 ** 15 else np.uint32)
self.M = size // self.dim
assert size % self.dim == 0, \
"dimension of variable should be smaller than {} or dividable by {}".format(self.dim, self.dim)
self.codewords = np.concatenate((np.eye(self.dim), -np.eye(self.dim)))
def compress(self, vec):
vec = vec.reshape((-1, self.dim))
norms = np.linalg.norm(vec, axis=1)
codes = np.argmax(np.abs(vec), axis=1)
sign_flip = (1 - np.sign(vec[np.arange(len(vec)), codes]).astype(dtype=np.int32)) * self.dim // 2
assert np.all(sign_flip >= 0)
codes += sign_flip
return [norms, codes.astype(self.code_dtype)]
def decompress(self, signature):
[norms, codes] = signature
vec = np.empty((len(norms), self.dim), dtype=np.float32)
vec[:, :] = self.codewords[codes[:], :]
vec[:, :] = (vec.transpose() * norms).transpose()
return vec.reshape(self.shape)
| [
"numpy.abs",
"numpy.linalg.norm",
"numpy.eye",
"numpy.all"
] | [((693, 720), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {'axis': '(1)'}), '(vec, axis=1)\n', (707, 720), True, 'import numpy as np\n'), ((889, 911), 'numpy.all', 'np.all', (['(sign_flip >= 0)'], {}), '(sign_flip >= 0)\n', (895, 911), True, 'import numpy as np\n'), ((747, 758), 'numpy.abs', 'np.abs', (['vec'], {}), '(vec)\n', (753, 758), True, 'import numpy as np\n'), ((567, 583), 'numpy.eye', 'np.eye', (['self.dim'], {}), '(self.dim)\n', (573, 583), True, 'import numpy as np\n'), ((586, 602), 'numpy.eye', 'np.eye', (['self.dim'], {}), '(self.dim)\n', (592, 602), True, 'import numpy as np\n')] |
import numpy as np
y=np.array([1,0,1,1,1,0,0,1,0,1])
y_predict=np.array([1,1,1,0,1,0,0,1,0,0])
# −(ylog(p)+(1−y)log(1−p))
ellipsis = 1e-15
def log_loss(y,y_predict):
ellipsis=1e-15
y_predict = np.array([max(i, ellipsis) for i in y_predict])
y_predict = np.array([min(i, 1 - ellipsis) for i in y_predict])
return sum(-(y*np.log(y_predict)+(1-y)*np.log(1-y_predict)))/len(y)
print(log_loss(y,y_predict))
| [
"numpy.array",
"numpy.log"
] | [((21, 61), 'numpy.array', 'np.array', (['[1, 0, 1, 1, 1, 0, 0, 1, 0, 1]'], {}), '([1, 0, 1, 1, 1, 0, 0, 1, 0, 1])\n', (29, 61), True, 'import numpy as np\n'), ((63, 103), 'numpy.array', 'np.array', (['[1, 1, 1, 0, 1, 0, 0, 1, 0, 0]'], {}), '([1, 1, 1, 0, 1, 0, 0, 1, 0, 0])\n', (71, 103), True, 'import numpy as np\n'), ((336, 353), 'numpy.log', 'np.log', (['y_predict'], {}), '(y_predict)\n', (342, 353), True, 'import numpy as np\n'), ((360, 381), 'numpy.log', 'np.log', (['(1 - y_predict)'], {}), '(1 - y_predict)\n', (366, 381), True, 'import numpy as np\n')] |
"""
This script builds the NER/POS tagged data we trained on for TriviaQA. It should be
run with `port` indicating were a CoreNLP server can be found, we used
`stanford-corenlp-full-2018-10-05`. If the server has multiple threads, the `n_processes`
flag can be used to send multiple queries at a time to speed up tagging.
This script does not 100% precisely reproduce the cached data that is loaded by default in our
train/eval scripts. In particular, the POS and NER tags are (very occasionally) different.
I am not sure what the cause is, but my test show <1% of the tags differ.
"""
import json
import regex
from typing import List, Union, Optional, Dict
import numpy as np
import requests
import argparse
from debias.datasets.triviaqa_cp import AnnotatedTriviaQaExample, load_annotated_triviaqa
from debias.preprocessing.corenlp_client import CoreNLPClient
from debias.utils import process_par, py_utils
from triviaqa_cp.triviaqa_cp_evaluation import normalize_answer
def extract_normalized_answers(ans):
"""Get the normalized answers from a json TriviaQa question"""
if ans is not None:
answers = ans['NormalizedAliases']
human_answers = ans.get('HumanAnswers')
if human_answers is not None:
# This are fair game since they are used in the eval script, but be
# careful to normalize them as well
answers += [normalize_answer(x) for x in human_answers]
else:
answers = None # test question
return answers
def find_answer_spans(para: List[str], tokenized_answers: List[List[str]]):
"""Find spans that the eval script would given an EM of 1 in `para`"""
words = [normalize_answer(w) for w in para]
occurances = []
for answer_ix, answer in enumerate(tokenized_answers):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next == "":
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
resplit = r"\p{Pd}\p{Po}\p{Ps}\p{Pe}\p{S}\p{Pc}"
resplit = "([" + resplit + "]|'')"
split_regex = r"(?![\.,'])" + resplit
split_regex = regex.compile(split_regex)
def extract_tokens(annotations, tags=True):
"""Extract tokens from CoreNLP output"""
words, pos, ner = [], [], []
sentence_lens = []
on_len = 0
for sentences in annotations:
if len(sentences["tokens"]) == 0:
raise RuntimeError()
for token in sentences["tokens"]:
w = token["originalText"]
if w == "''" or w == '``':
split = [w]
else:
# We tokenize a bit more aggresively the CoreNLP so span-based models
# can make fine-grained choices of what text to return
split = [x for x in split_regex.split(w) if len(x) > 0]
if len(split) == 1:
words.append(w)
if tags:
p, n = token["pos"], token["ner"]
pos.append(p)
ner.append(n)
else:
words += split
if tags:
p, n = token["pos"], token["ner"]
ner += [n] * len(split)
pos += ['SEP' if split_regex.match(x) else p for x in split]
sentence_lens.append(len(words) - on_len)
on_len = len(words)
if tags:
return words, pos, ner, sentence_lens
else:
return words, sentence_lens
class AnnotateTriviaqaQuestions(process_par.Processor):
"""Turns JSON TriviaQA questions into `AnnotatedTriviaQaExample`"""
def __init__(self, port, reuse_session=False, legacy_tokenization=True):
self.port = port
self.reuse_session = reuse_session
self.legacy_tokenization = legacy_tokenization
def process(self, data: List[Dict]) -> List[AnnotatedTriviaQaExample]:
cli = CoreNLPClient(port=self.port)
sess = None
if self.reuse_session:
sess = requests.Session()
sess.trust_env = False
out = []
for example in data:
q_tok = extract_tokens(cli.query_tokenize(example['Question'], sess=sess)["sentences"], False)[0]
answers = extract_normalized_answers(example["Answer"])
answers_tokenized = []
for ans in answers:
ans_tok = extract_tokens(cli.query_tokenize(ans, sess=sess)["sentences"], False)[0]
if len(ans_tok) > 0: # Can happen very wonky unicode answers
answers_tokenized.append(ans_tok)
tok = []
pos = []
ner = []
for para in example['Passage'].split("\n"):
if self.legacy_tokenization:
# The original code did tokenization and NER separately instead of doing both
# in one query in order to do some additional caching. Unfortunately this can slightly
# change tagging output due to the respitting we do, so we preserve that behavior here.
_tok = extract_tokens(cli.query_tokenize(para, sess=sess)["sentences"], False)[0]
sentences = cli.query_ner(" ".join(_tok), sess=sess, whitespace=True)["sentences"]
else:
sentences = cli.query_ner(para, sess=sess)["sentences"]
p_tok, p_pos, p_ner, _ = extract_tokens(sentences, True)
tok += p_tok
pos += p_pos
ner += p_ner
spans = np.array(find_answer_spans(tok, answers_tokenized))
spans[:, 1] -= 1 # Switch to inclusive
out.append(AnnotatedTriviaQaExample(
example["QuestionId"], example["QuestionType"], np.array(example["QuestionTypeProbs"]),
q_tok, tok, pos, ner, answers, spans))
return out
def main():
parser = argparse.ArgumentParser("Builds annotated TriviaQA-CP data")
parser.add_argument("source", help="Source TriviaQa-CP file")
parser.add_argument("output", help="Output pickle file")
parser.add_argument("--port", default=9000, type=int)
parser.add_argument("--n_processes", default=1, type=int)
parser.add_argument("--no_legacy_tokenization", action="store_true",
help="Turn off legacy tokenization, which will more closely reproduce our"
" results, but might make tagging worse in rare cases")
args = parser.parse_args()
with open(args.source, "r") as f:
examples = json.load(f)['Data']
annotator = AnnotateTriviaqaQuestions(
args.port, legacy_tokenization=not args.no_legacy_tokenization)
output = process_par.process_par(examples, annotator, args.n_processes, 10)
with open(args.output, "wb") as f:
f.write(output)
if __name__ == '__main__':
main() | [
"json.load",
"debias.utils.process_par.process_par",
"argparse.ArgumentParser",
"regex.compile",
"requests.Session",
"debias.preprocessing.corenlp_client.CoreNLPClient",
"triviaqa_cp.triviaqa_cp_evaluation.normalize_answer",
"numpy.array"
] | [((2379, 2405), 'regex.compile', 'regex.compile', (['split_regex'], {}), '(split_regex)\n', (2392, 2405), False, 'import regex\n'), ((5673, 5733), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Builds annotated TriviaQA-CP data"""'], {}), "('Builds annotated TriviaQA-CP data')\n", (5696, 5733), False, 'import argparse\n'), ((6447, 6513), 'debias.utils.process_par.process_par', 'process_par.process_par', (['examples', 'annotator', 'args.n_processes', '(10)'], {}), '(examples, annotator, args.n_processes, 10)\n', (6470, 6513), False, 'from debias.utils import process_par, py_utils\n'), ((1623, 1642), 'triviaqa_cp.triviaqa_cp_evaluation.normalize_answer', 'normalize_answer', (['w'], {}), '(w)\n', (1639, 1642), False, 'from triviaqa_cp.triviaqa_cp_evaluation import normalize_answer\n'), ((3923, 3952), 'debias.preprocessing.corenlp_client.CoreNLPClient', 'CoreNLPClient', ([], {'port': 'self.port'}), '(port=self.port)\n', (3936, 3952), False, 'from debias.preprocessing.corenlp_client import CoreNLPClient\n'), ((4009, 4027), 'requests.Session', 'requests.Session', ([], {}), '()\n', (4025, 4027), False, 'import requests\n'), ((6305, 6317), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6314, 6317), False, 'import json\n'), ((1355, 1374), 'triviaqa_cp.triviaqa_cp_evaluation.normalize_answer', 'normalize_answer', (['x'], {}), '(x)\n', (1371, 1374), False, 'from triviaqa_cp.triviaqa_cp_evaluation import normalize_answer\n'), ((5546, 5584), 'numpy.array', 'np.array', (["example['QuestionTypeProbs']"], {}), "(example['QuestionTypeProbs'])\n", (5554, 5584), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 16:25:09 2017
@author: dbasaran
"""
import numpy as np
from keras.layers import Dense, Reshape, BatchNormalization, Bidirectional, GRU
from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D
from keras import backend as Kb, Model
import keras as K
import h5py
import os
from sklearn.preprocessing import LabelBinarizer, normalize
import csv
import pandas as pd
import mir_eval
import extract_HF0
# Parameters of the model are globally defined for the trained network
number_of_patches = 20
patch_size = 25
segment_length = 500 # number_of_patches x patch_size
feature_size = 301
number_of_classes = 62
step_notes = 5
SR = 22050
hop_size = 256
RNN = 'GRU'
#########################################################
## GET PATH FUNCTIONS: Functions to return paths
def get_path():
'''
Gets the path of the main folder
:return: path (string)
'''
path = os.getcwd()
path = path[:path.rfind('/')]
return path
def get_path_to_quantized_annotations():
quantized_annotations_path = '{0}/quantized_annotations'.format(get_path())
return quantized_annotations_path
def get_path_to_dataset_audio():
audio_path = '{0}/medleydb_audio'.format(get_path())
return audio_path
def get_path_to_pitch_estimations():
# Wrapper function
results_path = get_model_output_save_path()
return results_path
def get_model_output_save_path():
model_output_save_path = '{0}/medleydb_melody_results/C-RNN_results'.format(get_path())
if not os.path.exists(model_output_save_path):
os.makedirs(model_output_save_path)
return model_output_save_path
def get_dataset_splits_save_path():
dataset_splits_save_path = '{0}/medleydb_dataset_splits'.format(get_path())
if not os.path.exists(dataset_splits_save_path):
os.makedirs(dataset_splits_save_path)
return dataset_splits_save_path
def get_hf0_path():
path = '{0}/medleydb_features/HF0s_STFT'.format(get_path())
return path
def get_dataset_test_load_path():
dataset_test_load_path = get_hf0_path()
return dataset_test_load_path
def get_dataset_load_path():
dataset_load_path = get_dataset_splits_save_path()
return dataset_load_path
def get_trained_model_save_path(dataset_name):
trained_model_save_path = '{0}/trained_models'.format(get_path(dataset_name=dataset_name))
if not os.path.exists(trained_model_save_path):
os.makedirs(trained_model_save_path)
return trained_model_save_path
#######################################################
def get_labels(track_name):
'''
Get labels for the track
:param track_name: String - Name of the track in the MedleyDB dataset
:return: labels: Numpy array - quantized labels of the track with -1 for non-melody and all other target classes starting from 0
'''
quantized_annotation_path = get_path_to_quantized_annotations() \
+ '/{0}_quantized_labels_Fs-22050_hop-256.h5'.format(track_name)
labels_file = h5py.File(quantized_annotation_path , 'r')
labels = np.array(labels_file['labels'])
return labels
def get_pitch_estimation_from_csv(track_name):
'''
Gets the pitch estimation of a track from the csv file
:param track_name: String - Name of the track in the MedleyDB dataset
:return: pitch_estimation: Numpy array - Estimations for each frame
'''
estimation_path = get_path_to_pitch_estimations() + '/{0}.csv'.format(track_name)
data = pd.read_csv(estimation_path, delimiter=',', header=None)
pitch_estimation = np.array(data)[:, 1]
return pitch_estimation
def construct_model(number_of_patches, patch_size, feature_size, number_of_classes, step_notes, RNN='LSTM',
verbose=True):
kernel_coeff = 0.00001
number_of_channels = 1
input_shape = (number_of_patches, patch_size, feature_size, number_of_channels)
inputs = Input(shape=input_shape)
zp = ZeroPadding3D(padding=(0, 0, 2))(inputs)
#### CNN LAYERS ####
cnn1 = TimeDistributed(Conv2D(64, (1, 5),
padding='valid',
activation='relu',
strides=(1, np.int(step_notes)),
kernel_regularizer=K.regularizers.l2(kernel_coeff),
data_format='channels_last', name='cnn1'))(zp)
cnn1a = BatchNormalization()(cnn1)
zp = ZeroPadding3D(padding=(0, 1, 2))(cnn1a)
cnn2 = TimeDistributed(
Conv2D(64, (3, 5), padding='valid', activation='relu', data_format='channels_last', name='cnn2'))(zp)
cnn2a = BatchNormalization()(cnn2)
zp = ZeroPadding3D(padding=(0, 1, 1))(cnn2a)
cnn3 = TimeDistributed(
Conv2D(64, (3, 3), padding='valid', activation='relu', data_format='channels_last', name='cnn3'))(zp)
cnn3a = BatchNormalization()(cnn3)
zp = ZeroPadding3D(padding=(0, 1, 7))(cnn3a)
cnn4 = TimeDistributed(
Conv2D(16, (3, 15), padding='valid', activation='relu', data_format='channels_last', name='cnn4'))(zp)
cnn4a = BatchNormalization()(cnn4)
cnn5 = TimeDistributed(
Conv2D(1, (1, 1), padding='same', activation='relu', data_format='channels_last', name='cnn5'))(cnn4a)
#### RESHAPING LAYERS ####
cnn5a = Lambda(lambda x: Kb.squeeze(x, axis=4))(cnn5)
cnn5b = Reshape((number_of_patches * patch_size, -1), name='cnn5-reshape')(cnn5a)
#### BIDIRECTIONAL RNN LAYERS ####
if RNN == 'LSTM':
rnn1 = Bidirectional(LSTM(128,
kernel_regularizer=K.regularizers.l1_l2(0.0001),
return_sequences=True), name='rnn1')(cnn5b)
elif RNN == 'GRU':
rnn1 = Bidirectional(GRU(128,
kernel_regularizer=K.regularizers.l1_l2(0.0001),
return_sequences=True), name='rnn1')(cnn5b)
#### CLASSIFICATION (DENSE) LAYER ####
classifier = TimeDistributed(Dense(number_of_classes,
activation='softmax',
kernel_regularizer=K.regularizers.l2(0.00001),
bias_regularizer=K.regularizers.l2()), name='output')(rnn1)
model = Model(inputs=inputs, outputs=classifier)
if verbose == True:
model.summary()
print('{0} as RNN!'.format(RNN))
return model
def class_to_freq(input_class, minF0=55, maxF0=1760, number_of_classes=62, step_notes=1):
'''
'''
output_freq = np.zeros(input_class.shape)
F0_list = minF0 * 2 ** (np.arange(number_of_classes - 1) / (12. * step_notes))
F0_list = np.append(0, F0_list)
output_freq = F0_list[input_class+1]
return output_freq
def get_prediction(HF0, model):
lb = LabelBinarizer()
lb.fit(np.arange(-1,number_of_classes-1))
length_of_sequence = HF0.shape[1]
number_of_segments = np.int(np.floor(length_of_sequence/segment_length))
# This is to ensure all the data is used and the length is a multiple of segment_length
x_test = np.append(HF0[:,:number_of_segments*segment_length],HF0[:,-segment_length:],axis=1)
x_test = normalize(x_test,norm='l1',axis=0)
x_test = x_test.T
# Arrange the shape of the input for the network
number_of_samples = np.int(x_test.shape[0] / (number_of_patches * patch_size))
x_test = np.reshape(x_test, (number_of_samples, number_of_patches, patch_size, feature_size))
x_test = x_test[:, :, :, :, np.newaxis]
y_predicted = model.predict(x_test, batch_size=16)
y_predicted = np.reshape(y_predicted, (y_predicted.shape[0] * y_predicted.shape[1], y_predicted.shape[2]))
y_pred = np.argmax(y_predicted[:, 1:], axis=1)
pitch_estimates = class_to_freq(y_pred, number_of_classes=number_of_classes, step_notes=1)
y_pred = np.argmax(y_predicted, axis=1)
signs = np.ones(y_pred.shape)
signs[y_pred == 0] = -1
pitch_estimates = signs * pitch_estimates
# Set the length of the output estimates back to original length
pitch_estimates[length_of_sequence-segment_length:length_of_sequence] = pitch_estimates[-segment_length:]
pitch_estimates = pitch_estimates[:length_of_sequence]
return pitch_estimates
def save_output(pitch_estimates, output_path):
"""Save output to a csv file
Parameters
----------
pitch_estimates : np.ndarray
array of frequency values
output_path : str
path to save output
"""
times = np.arange(len(pitch_estimates)) * np.float(hop_size)/SR
with open(output_path, 'w') as fhandle:
csv_writer = csv.writer(fhandle, delimiter=',')
for t, f in zip(times, pitch_estimates):
csv_writer.writerow([t, f])
def print_evaluation_results_statistics(evaluation_results_statistics, args):
print('\n**************************************************\n')
print('Model {0} - Evaluation results:'.format(args.model_name))
print(' voicing_recall: mean={0}, std={1}'.format(evaluation_results_statistics['voicing_recall'][0],
evaluation_results_statistics['voicing_recall'][1]))
print(' voicing_false_alarm: mean={0}, std={1}'.format(evaluation_results_statistics['voicing_false_alarm'][0],
evaluation_results_statistics['voicing_false_alarm'][1]))
print(' raw_pitch_accuracy: mean={0}, std={1}'.format(evaluation_results_statistics['raw_pitch_accuracy'][0],
evaluation_results_statistics['raw_pitch_accuracy'][1]))
print(' raw_chroma_accuracy: mean={0}, std={1}'.format(evaluation_results_statistics['raw_chroma_accuracy'][0],
evaluation_results_statistics['raw_chroma_accuracy'][1]))
print(' overall_accuracy: mean={0}, std={1}'.format(evaluation_results_statistics['overall_accuracy'][0],
evaluation_results_statistics['overall_accuracy'][1]))
print('\n**************************************************\n')
def get_evaluation_results_statistics(voicing_recall,
voicing_false_alarm,
raw_pitch_accuracy,
raw_chroma_accuracy,
overall_accuracy):
evaluation_results_statistics = {}
evaluation_results_statistics['voicing_recall'] = [np.mean(voicing_recall), np.std(voicing_recall)]
evaluation_results_statistics['voicing_false_alarm'] = [np.mean(voicing_false_alarm), np.std(voicing_false_alarm)]
evaluation_results_statistics['raw_pitch_accuracy'] = [np.mean(raw_pitch_accuracy), np.std(raw_pitch_accuracy)]
evaluation_results_statistics['raw_chroma_accuracy'] = [np.mean(raw_chroma_accuracy), np.std(raw_chroma_accuracy)]
evaluation_results_statistics['overall_accuracy'] = [np.mean(overall_accuracy), np.std(overall_accuracy)]
return evaluation_results_statistics
def evaluate_melody_prediction(track_name, pitch_estimates, verbose):
try:
if 'corrected_pitch' in track_name:
track_name_original = track_name.split('_corrected_pitch')[0]
else:
track_name_original = track_name
labels = get_labels(track_name=track_name_original)
except:
print('Error')
if pitch_estimates is None:
pitch_estimates = get_pitch_estimation_from_csv(track_name=track_name)
labels = class_to_freq(labels)
min_len = np.min((len(labels), len(pitch_estimates)))
labels = labels[:min_len]
pitch_estimation = pitch_estimates[:min_len]
evaluation_results = {}
(ref_v, ref_c, est_v, est_c) = mir_eval.melody.to_cent_voicing(np.arange(np.size(labels)),
labels,
np.arange(np.size(labels)),
pitch_estimation)
vr, vfa = mir_eval.melody.voicing_measures(ref_v, est_v)
rpa = mir_eval.melody.raw_pitch_accuracy(ref_v, ref_c, est_v, est_c, cent_tolerance=80)
rca = mir_eval.melody.raw_chroma_accuracy(ref_v, ref_c, est_v, est_c, cent_tolerance=80)
oa = mir_eval.melody.overall_accuracy(ref_v, ref_c, est_v, est_c, cent_tolerance=80)
evaluation_results['Voicing Recall'] = vr
evaluation_results['Voicing False Alarm'] = vfa
evaluation_results['Raw Pitch Accuracy'] = rpa
evaluation_results['Raw Chroma Accuracy'] = rca
evaluation_results['Overall Accuracy'] = oa
if verbose:
print('{0} - Evaluation results:'.format(track_name))
print(' voicing_recall_rate = {0}'.format(vr))
print(' voicing_false_alarm_rate = {0}'.format(vfa))
print(' raw_pitch_accuracy = {0}'.format(rpa))
print(' raw_chroma_accuracy = {0}'.format(rca))
print(' overall_accuracy = {0}'.format(oa))
return evaluation_results
def load_model(model_weights_path=None):
model = construct_model(number_of_patches, patch_size, feature_size, number_of_classes, step_notes, RNN=RNN)
if model_weights_path == None:
model.load_weights('weights_C-RNN.h5')
else:
model.load_weights(filepath=model_weights_path)
return model
def compute_output(HF0, save_dir, save_name):
model = load_model()
pitch_estimates = get_prediction(HF0, model)
output_path = '{0}/{1}.csv'.format(save_dir, save_name)
save_output(pitch_estimates, output_path)
return pitch_estimates
def print_evaluation_results_statistics(evaluation_results_statistics):
print('\n**************************************************\n')
print('Evaluation results:')
print(' voicing_recall: mean={0}, std={1}'.format(evaluation_results_statistics['voicing_recall'][0],
evaluation_results_statistics['voicing_recall'][1]))
print(' voicing_false_alarm: mean={0}, std={1}'.format(evaluation_results_statistics['voicing_false_alarm'][0],
evaluation_results_statistics['voicing_false_alarm'][1]))
print(' raw_pitch_accuracy: mean={0}, std={1}'.format(evaluation_results_statistics['raw_pitch_accuracy'][0],
evaluation_results_statistics['raw_pitch_accuracy'][1]))
print(' raw_chroma_accuracy: mean={0}, std={1}'.format(evaluation_results_statistics['raw_chroma_accuracy'][0],
evaluation_results_statistics['raw_chroma_accuracy'][1]))
print(' overall_accuracy: mean={0}, std={1}'.format(evaluation_results_statistics['overall_accuracy'][0],
evaluation_results_statistics['overall_accuracy'][1]))
print('\n**************************************************\n')
def get_evaluation_results_statistics(voicing_recall,
voicing_false_alarm,
raw_pitch_accuracy,
raw_chroma_accuracy,
overall_accuracy):
evaluation_results_statistics = {}
evaluation_results_statistics['voicing_recall'] = [np.mean(voicing_recall), np.std(voicing_recall)]
evaluation_results_statistics['voicing_false_alarm'] = [np.mean(voicing_false_alarm), np.std(voicing_false_alarm)]
evaluation_results_statistics['raw_pitch_accuracy'] = [np.mean(raw_pitch_accuracy), np.std(raw_pitch_accuracy)]
evaluation_results_statistics['raw_chroma_accuracy'] = [np.mean(raw_chroma_accuracy), np.std(raw_chroma_accuracy)]
evaluation_results_statistics['overall_accuracy'] = [np.mean(overall_accuracy), np.std(overall_accuracy)]
return evaluation_results_statistics
def main_prediction(file_path, evaluate_results=False):
'''
main function to estimate melody from SF-NMF activations of a track. If the dataset is indicated, then the estimated
pitch values are also evaluated. Note that the system here is trained with MedleyDB alone.
:param HF0_fpath: (String) The path to the SF-NMF activations (HF0) of the target track
:param dataset: (String) Indicates which dataset is the track from (medleydb / jazzomat). Default value None
:return:
'''
## Load the file: Either an audio file or HF0 estimation file
try:
if '.wav' == file_path[-4:]:
HF0 = extract_HF0.main(audio_fpath=file_path)
elif '.h5' == file_path[-3:]:
feats = h5py.File(HF0_fpath, 'r')
HF0 = np.array(feats['HF0'])
track_name = os.path.basename(HF0_fpath).split('.h5')[0]
except:
raise RuntimeError('Wav file or HF0 file could not be loaded!')
## Load the model
try:
model = load_model()
except:
raise RuntimeError('Model could not be loaded!')
## Estimate the dominant melody
try:
pitch_estimates = get_prediction(HF0, model)
except:
raise RuntimeError('An error occured in the melody estimation!')
## Save the estimations to a csv file
try:
output_path = '{0}/{1}.csv'.format(get_model_output_save_path(),
track_name)
save_output(pitch_estimates, output_path)
except:
output_path = '{0}.csv'.format(track_name)
save_output(pitch_estimates, output_path)
## Evaluate the results if annotations are available
try:
if evaluate_results:
evaluation_results = evaluate_melody_prediction(track_name=track_name,
pitch_estimates=pitch_estimates,
verbose=True)
return evaluation_results
except:
raise RuntimeError('An error occured in the evaluation!')
if __name__ == '__main__':
# Example usage:
track_name = 'AClassicEducation_NightOwl'
HF0_fpath = '{0}/{1}.h5'.format(get_hf0_path(),track_name)
audio_fpath = '{0}/{1}.wav'.format(get_path_to_dataset_audio(),track_name)
main_prediction(file_path=audio_fpath, evaluate_results=True)
| [
"keras.regularizers.l2",
"sklearn.preprocessing.LabelBinarizer",
"numpy.argmax",
"pandas.read_csv",
"numpy.floor",
"numpy.ones",
"keras.layers.ZeroPadding3D",
"numpy.mean",
"numpy.arange",
"keras.layers.Input",
"keras.layers.Reshape",
"keras.regularizers.l1_l2",
"extract_HF0.main",
"mir_ev... | [((977, 988), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (986, 988), False, 'import os\n'), ((3091, 3132), 'h5py.File', 'h5py.File', (['quantized_annotation_path', '"""r"""'], {}), "(quantized_annotation_path, 'r')\n", (3100, 3132), False, 'import h5py\n'), ((3147, 3178), 'numpy.array', 'np.array', (["labels_file['labels']"], {}), "(labels_file['labels'])\n", (3155, 3178), True, 'import numpy as np\n'), ((3566, 3622), 'pandas.read_csv', 'pd.read_csv', (['estimation_path'], {'delimiter': '""","""', 'header': 'None'}), "(estimation_path, delimiter=',', header=None)\n", (3577, 3622), True, 'import pandas as pd\n'), ((3993, 4017), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (3998, 4017), False, 'from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D\n'), ((6371, 6411), 'keras.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'classifier'}), '(inputs=inputs, outputs=classifier)\n', (6376, 6411), False, 'from keras import backend as Kb, Model\n'), ((6649, 6676), 'numpy.zeros', 'np.zeros', (['input_class.shape'], {}), '(input_class.shape)\n', (6657, 6676), True, 'import numpy as np\n'), ((6775, 6796), 'numpy.append', 'np.append', (['(0)', 'F0_list'], {}), '(0, F0_list)\n', (6784, 6796), True, 'import numpy as np\n'), ((6907, 6923), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (6921, 6923), False, 'from sklearn.preprocessing import LabelBinarizer, normalize\n'), ((7191, 7285), 'numpy.append', 'np.append', (['HF0[:, :number_of_segments * segment_length]', 'HF0[:, -segment_length:]'], {'axis': '(1)'}), '(HF0[:, :number_of_segments * segment_length], HF0[:, -\n segment_length:], axis=1)\n', (7200, 7285), True, 'import numpy as np\n'), ((7288, 7324), 'sklearn.preprocessing.normalize', 'normalize', (['x_test'], {'norm': '"""l1"""', 'axis': '(0)'}), "(x_test, norm='l1', axis=0)\n", (7297, 7324), False, 'from sklearn.preprocessing import LabelBinarizer, normalize\n'), ((7423, 7481), 'numpy.int', 'np.int', (['(x_test.shape[0] / (number_of_patches * patch_size))'], {}), '(x_test.shape[0] / (number_of_patches * patch_size))\n', (7429, 7481), True, 'import numpy as np\n'), ((7495, 7583), 'numpy.reshape', 'np.reshape', (['x_test', '(number_of_samples, number_of_patches, patch_size, feature_size)'], {}), '(x_test, (number_of_samples, number_of_patches, patch_size,\n feature_size))\n', (7505, 7583), True, 'import numpy as np\n'), ((7698, 7794), 'numpy.reshape', 'np.reshape', (['y_predicted', '(y_predicted.shape[0] * y_predicted.shape[1], y_predicted.shape[2])'], {}), '(y_predicted, (y_predicted.shape[0] * y_predicted.shape[1],\n y_predicted.shape[2]))\n', (7708, 7794), True, 'import numpy as np\n'), ((7805, 7842), 'numpy.argmax', 'np.argmax', (['y_predicted[:, 1:]'], {'axis': '(1)'}), '(y_predicted[:, 1:], axis=1)\n', (7814, 7842), True, 'import numpy as np\n'), ((7951, 7981), 'numpy.argmax', 'np.argmax', (['y_predicted'], {'axis': '(1)'}), '(y_predicted, axis=1)\n', (7960, 7981), True, 'import numpy as np\n'), ((7994, 8015), 'numpy.ones', 'np.ones', (['y_pred.shape'], {}), '(y_pred.shape)\n', (8001, 8015), True, 'import numpy as np\n'), ((12276, 12322), 'mir_eval.melody.voicing_measures', 'mir_eval.melody.voicing_measures', (['ref_v', 'est_v'], {}), '(ref_v, est_v)\n', (12308, 12322), False, 'import mir_eval\n'), ((12333, 12418), 'mir_eval.melody.raw_pitch_accuracy', 'mir_eval.melody.raw_pitch_accuracy', (['ref_v', 'ref_c', 'est_v', 'est_c'], {'cent_tolerance': '(80)'}), '(ref_v, ref_c, est_v, est_c,\n cent_tolerance=80)\n', (12367, 12418), False, 'import mir_eval\n'), ((12425, 12511), 'mir_eval.melody.raw_chroma_accuracy', 'mir_eval.melody.raw_chroma_accuracy', (['ref_v', 'ref_c', 'est_v', 'est_c'], {'cent_tolerance': '(80)'}), '(ref_v, ref_c, est_v, est_c,\n cent_tolerance=80)\n', (12460, 12511), False, 'import mir_eval\n'), ((12517, 12596), 'mir_eval.melody.overall_accuracy', 'mir_eval.melody.overall_accuracy', (['ref_v', 'ref_c', 'est_v', 'est_c'], {'cent_tolerance': '(80)'}), '(ref_v, ref_c, est_v, est_c, cent_tolerance=80)\n', (12549, 12596), False, 'import mir_eval\n'), ((1590, 1628), 'os.path.exists', 'os.path.exists', (['model_output_save_path'], {}), '(model_output_save_path)\n', (1604, 1628), False, 'import os\n'), ((1638, 1673), 'os.makedirs', 'os.makedirs', (['model_output_save_path'], {}), '(model_output_save_path)\n', (1649, 1673), False, 'import os\n'), ((1839, 1879), 'os.path.exists', 'os.path.exists', (['dataset_splits_save_path'], {}), '(dataset_splits_save_path)\n', (1853, 1879), False, 'import os\n'), ((1889, 1926), 'os.makedirs', 'os.makedirs', (['dataset_splits_save_path'], {}), '(dataset_splits_save_path)\n', (1900, 1926), False, 'import os\n'), ((2451, 2490), 'os.path.exists', 'os.path.exists', (['trained_model_save_path'], {}), '(trained_model_save_path)\n', (2465, 2490), False, 'import os\n'), ((2500, 2536), 'os.makedirs', 'os.makedirs', (['trained_model_save_path'], {}), '(trained_model_save_path)\n', (2511, 2536), False, 'import os\n'), ((3646, 3660), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3654, 3660), True, 'import numpy as np\n'), ((4028, 4060), 'keras.layers.ZeroPadding3D', 'ZeroPadding3D', ([], {'padding': '(0, 0, 2)'}), '(padding=(0, 0, 2))\n', (4041, 4060), False, 'from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D\n'), ((4492, 4512), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4510, 4512), False, 'from keras.layers import Dense, Reshape, BatchNormalization, Bidirectional, GRU\n'), ((4529, 4561), 'keras.layers.ZeroPadding3D', 'ZeroPadding3D', ([], {'padding': '(0, 1, 2)'}), '(padding=(0, 1, 2))\n', (4542, 4561), False, 'from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D\n'), ((4721, 4741), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4739, 4741), False, 'from keras.layers import Dense, Reshape, BatchNormalization, Bidirectional, GRU\n'), ((4758, 4790), 'keras.layers.ZeroPadding3D', 'ZeroPadding3D', ([], {'padding': '(0, 1, 1)'}), '(padding=(0, 1, 1))\n', (4771, 4790), False, 'from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D\n'), ((4950, 4970), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4968, 4970), False, 'from keras.layers import Dense, Reshape, BatchNormalization, Bidirectional, GRU\n'), ((4987, 5019), 'keras.layers.ZeroPadding3D', 'ZeroPadding3D', ([], {'padding': '(0, 1, 7)'}), '(padding=(0, 1, 7))\n', (5000, 5019), False, 'from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D\n'), ((5180, 5200), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5198, 5200), False, 'from keras.layers import Dense, Reshape, BatchNormalization, Bidirectional, GRU\n'), ((5450, 5516), 'keras.layers.Reshape', 'Reshape', (['(number_of_patches * patch_size, -1)'], {'name': '"""cnn5-reshape"""'}), "((number_of_patches * patch_size, -1), name='cnn5-reshape')\n", (5457, 5516), False, 'from keras.layers import Dense, Reshape, BatchNormalization, Bidirectional, GRU\n'), ((6935, 6971), 'numpy.arange', 'np.arange', (['(-1)', '(number_of_classes - 1)'], {}), '(-1, number_of_classes - 1)\n', (6944, 6971), True, 'import numpy as np\n'), ((7041, 7086), 'numpy.floor', 'np.floor', (['(length_of_sequence / segment_length)'], {}), '(length_of_sequence / segment_length)\n', (7049, 7086), True, 'import numpy as np\n'), ((8729, 8763), 'csv.writer', 'csv.writer', (['fhandle'], {'delimiter': '""","""'}), "(fhandle, delimiter=',')\n", (8739, 8763), False, 'import csv\n'), ((10686, 10709), 'numpy.mean', 'np.mean', (['voicing_recall'], {}), '(voicing_recall)\n', (10693, 10709), True, 'import numpy as np\n'), ((10711, 10733), 'numpy.std', 'np.std', (['voicing_recall'], {}), '(voicing_recall)\n', (10717, 10733), True, 'import numpy as np\n'), ((10795, 10823), 'numpy.mean', 'np.mean', (['voicing_false_alarm'], {}), '(voicing_false_alarm)\n', (10802, 10823), True, 'import numpy as np\n'), ((10825, 10852), 'numpy.std', 'np.std', (['voicing_false_alarm'], {}), '(voicing_false_alarm)\n', (10831, 10852), True, 'import numpy as np\n'), ((10913, 10940), 'numpy.mean', 'np.mean', (['raw_pitch_accuracy'], {}), '(raw_pitch_accuracy)\n', (10920, 10940), True, 'import numpy as np\n'), ((10942, 10968), 'numpy.std', 'np.std', (['raw_pitch_accuracy'], {}), '(raw_pitch_accuracy)\n', (10948, 10968), True, 'import numpy as np\n'), ((11030, 11058), 'numpy.mean', 'np.mean', (['raw_chroma_accuracy'], {}), '(raw_chroma_accuracy)\n', (11037, 11058), True, 'import numpy as np\n'), ((11060, 11087), 'numpy.std', 'np.std', (['raw_chroma_accuracy'], {}), '(raw_chroma_accuracy)\n', (11066, 11087), True, 'import numpy as np\n'), ((11146, 11171), 'numpy.mean', 'np.mean', (['overall_accuracy'], {}), '(overall_accuracy)\n', (11153, 11171), True, 'import numpy as np\n'), ((11173, 11197), 'numpy.std', 'np.std', (['overall_accuracy'], {}), '(overall_accuracy)\n', (11179, 11197), True, 'import numpy as np\n'), ((15621, 15644), 'numpy.mean', 'np.mean', (['voicing_recall'], {}), '(voicing_recall)\n', (15628, 15644), True, 'import numpy as np\n'), ((15646, 15668), 'numpy.std', 'np.std', (['voicing_recall'], {}), '(voicing_recall)\n', (15652, 15668), True, 'import numpy as np\n'), ((15730, 15758), 'numpy.mean', 'np.mean', (['voicing_false_alarm'], {}), '(voicing_false_alarm)\n', (15737, 15758), True, 'import numpy as np\n'), ((15760, 15787), 'numpy.std', 'np.std', (['voicing_false_alarm'], {}), '(voicing_false_alarm)\n', (15766, 15787), True, 'import numpy as np\n'), ((15848, 15875), 'numpy.mean', 'np.mean', (['raw_pitch_accuracy'], {}), '(raw_pitch_accuracy)\n', (15855, 15875), True, 'import numpy as np\n'), ((15877, 15903), 'numpy.std', 'np.std', (['raw_pitch_accuracy'], {}), '(raw_pitch_accuracy)\n', (15883, 15903), True, 'import numpy as np\n'), ((15965, 15993), 'numpy.mean', 'np.mean', (['raw_chroma_accuracy'], {}), '(raw_chroma_accuracy)\n', (15972, 15993), True, 'import numpy as np\n'), ((15995, 16022), 'numpy.std', 'np.std', (['raw_chroma_accuracy'], {}), '(raw_chroma_accuracy)\n', (16001, 16022), True, 'import numpy as np\n'), ((16081, 16106), 'numpy.mean', 'np.mean', (['overall_accuracy'], {}), '(overall_accuracy)\n', (16088, 16106), True, 'import numpy as np\n'), ((16108, 16132), 'numpy.std', 'np.std', (['overall_accuracy'], {}), '(overall_accuracy)\n', (16114, 16132), True, 'import numpy as np\n'), ((4606, 4707), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 5)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'data_format': '"""channels_last"""', 'name': '"""cnn2"""'}), "(64, (3, 5), padding='valid', activation='relu', data_format=\n 'channels_last', name='cnn2')\n", (4612, 4707), False, 'from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D\n'), ((4835, 4936), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'data_format': '"""channels_last"""', 'name': '"""cnn3"""'}), "(64, (3, 3), padding='valid', activation='relu', data_format=\n 'channels_last', name='cnn3')\n", (4841, 4936), False, 'from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D\n'), ((5064, 5166), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 15)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'data_format': '"""channels_last"""', 'name': '"""cnn4"""'}), "(16, (3, 15), padding='valid', activation='relu', data_format=\n 'channels_last', name='cnn4')\n", (5070, 5166), False, 'from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D\n'), ((5244, 5343), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(1, 1)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'data_format': '"""channels_last"""', 'name': '"""cnn5"""'}), "(1, (1, 1), padding='same', activation='relu', data_format=\n 'channels_last', name='cnn5')\n", (5250, 5343), False, 'from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D\n'), ((8642, 8660), 'numpy.float', 'np.float', (['hop_size'], {}), '(hop_size)\n', (8650, 8660), True, 'import numpy as np\n'), ((11988, 12003), 'numpy.size', 'np.size', (['labels'], {}), '(labels)\n', (11995, 12003), True, 'import numpy as np\n'), ((12158, 12173), 'numpy.size', 'np.size', (['labels'], {}), '(labels)\n', (12165, 12173), True, 'import numpy as np\n'), ((16815, 16854), 'extract_HF0.main', 'extract_HF0.main', ([], {'audio_fpath': 'file_path'}), '(audio_fpath=file_path)\n', (16831, 16854), False, 'import extract_HF0\n'), ((5408, 5429), 'keras.backend.squeeze', 'Kb.squeeze', (['x'], {'axis': '(4)'}), '(x, axis=4)\n', (5418, 5429), True, 'from keras import backend as Kb, Model\n'), ((6706, 6738), 'numpy.arange', 'np.arange', (['(number_of_classes - 1)'], {}), '(number_of_classes - 1)\n', (6715, 6738), True, 'import numpy as np\n'), ((16913, 16938), 'h5py.File', 'h5py.File', (['HF0_fpath', '"""r"""'], {}), "(HF0_fpath, 'r')\n", (16922, 16938), False, 'import h5py\n'), ((16957, 16979), 'numpy.array', 'np.array', (["feats['HF0']"], {}), "(feats['HF0'])\n", (16965, 16979), True, 'import numpy as np\n'), ((4365, 4396), 'keras.regularizers.l2', 'K.regularizers.l2', (['kernel_coeff'], {}), '(kernel_coeff)\n', (4382, 4396), True, 'import keras as K\n'), ((6231, 6255), 'keras.regularizers.l2', 'K.regularizers.l2', (['(1e-05)'], {}), '(1e-05)\n', (6248, 6255), True, 'import keras as K\n'), ((6315, 6334), 'keras.regularizers.l2', 'K.regularizers.l2', ([], {}), '()\n', (6332, 6334), True, 'import keras as K\n'), ((17002, 17029), 'os.path.basename', 'os.path.basename', (['HF0_fpath'], {}), '(HF0_fpath)\n', (17018, 17029), False, 'import os\n'), ((4291, 4309), 'numpy.int', 'np.int', (['step_notes'], {}), '(step_notes)\n', (4297, 4309), True, 'import numpy as np\n'), ((5678, 5706), 'keras.regularizers.l1_l2', 'K.regularizers.l1_l2', (['(0.0001)'], {}), '(0.0001)\n', (5698, 5706), True, 'import keras as K\n'), ((5899, 5927), 'keras.regularizers.l1_l2', 'K.regularizers.l1_l2', (['(0.0001)'], {}), '(0.0001)\n', (5919, 5927), True, 'import keras as K\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
def softmaxcrossentropy(x, target, weight=None, reduction='mean'): # type: ignore
max_x = np.max(x, axis=1, keepdims=True)
exp_x = np.exp(x - max_x)
p = exp_x / np.sum(exp_x, axis=1, keepdims=True)
inp = np.log(p)
input_shape = inp.shape
if len(input_shape) == 2:
N, C = input_shape
neg_gather_element_input = np.zeros((N, ), dtype=np.float32)
for i in range(N):
neg_gather_element_input[i] = -inp[i][target[i]]
if len(input_shape) == 3:
N, C, D = input_shape
neg_gather_element_input = np.zeros((N, D), dtype=np.float32)
for i in range(N):
for d in range(D):
neg_gather_element_input[i][d] = -inp[i][target[i][d]][d]
loss = neg_gather_element_input
if weight is not None:
gather_weight = np.take(weight, target)
loss = gather_weight * loss
if reduction == 'mean':
return loss.sum() / gather_weight.sum()
if reduction == 'mean':
loss = np.mean(loss)
if reduction == 'sum':
loss = np.sum(loss)
return loss
class SoftmaxCrossEntropyLoss(Base):
@staticmethod
def export_softmaxcrossentropy_none(): # type: () -> None
# Define operator attributes.
reduction = 'none'
# Create operator.
node = onnx.helper.make_node('SoftmaxCrossEntropyLoss',
inputs=['x', 'y'],
outputs=['z'],
reduction=reduction)
# Define operator inputs.
np.random.seed(0)
x = np.random.rand(3, 5).astype(np.float32)
labels = np.random.randint(0, high=5, size=(3, ))
# Compute SoftmaxCrossEntropyLoss
sce = softmaxcrossentropy(x, labels, reduction='none')
# Check results
expect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_none')
@staticmethod
def export_softmaxcrossentropy_none_weights(): # type: () -> None
# Define operator attributes.
reduction = 'none'
# Create operator.
node = onnx.helper.make_node('SoftmaxCrossEntropyLoss',
inputs=['x', 'y', 'w'],
outputs=['z'],
reduction=reduction)
# Define operator inputs.
np.random.seed(0)
x = np.random.rand(3, 5).astype(np.float32)
labels = np.random.randint(0, high=5, size=(3, ))
weights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)
# Compute SoftmaxCrossEntropyLoss
sce = softmaxcrossentropy(x, labels, weight=weights, reduction='none')
# Check results
expect(node, inputs=[x, labels, weights], outputs=[sce], name='test_softmax_cross_entropy_none_weights')
@staticmethod
def export_softmaxcrossentropy_sum(): # type: () -> None
# Define operator attributes.
reduction = 'sum'
# Create operator.
node = onnx.helper.make_node('SoftmaxCrossEntropyLoss',
inputs=['x', 'y'],
outputs=['z'],
reduction=reduction)
# Define operator inputs.
np.random.seed(0)
x = np.random.rand(3, 5).astype(np.float32)
labels = np.random.randint(0, high=5, size=(3, ))
# Compute SoftmaxCrossEntropyLoss
sce = softmaxcrossentropy(x, labels, reduction='sum')
# Check results
expect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_sum')
@staticmethod
def export_softmaxcrossentropy_mean(): # type: () -> None
# Define operator attributes.
reduction = 'mean'
# Create operator.
node = onnx.helper.make_node('SoftmaxCrossEntropyLoss',
inputs=['x', 'y'],
outputs=['z'],
reduction=reduction)
# Define operator inputs.
np.random.seed(0)
x = np.random.rand(3, 5).astype(np.float32)
labels = np.random.randint(0, high=5, size=(3, ))
# Compute SoftmaxCrossEntropyLoss
sce = softmaxcrossentropy(x, labels)
# Check results
expect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_mean')
@staticmethod
def export_softmaxcrossentropy_mean_3d(): # type: () -> None
# Define operator attributes.
reduction = 'mean'
# Create operator.
node = onnx.helper.make_node('SoftmaxCrossEntropyLoss',
inputs=['x', 'y'],
outputs=['z'],
reduction=reduction)
# Define operator inputs.
np.random.seed(0)
x = np.random.rand(3, 5, 2).astype(np.float32)
y = np.random.randint(0, high=5, size=(3, 2))
# Compute SoftmaxCrossEntropyLoss
sce = softmaxcrossentropy(x, y)
# Check results
expect(node, inputs=[x, y], outputs=[sce], name='test_softmax_cross_entropy_mean_3d')
@staticmethod
def export_softmaxcrossentropy_mean_weights(): # type: () -> None
# Define operator attributes.
reduction = 'mean'
# Create operator.
node = onnx.helper.make_node('SoftmaxCrossEntropyLoss',
inputs=['x', 'y', 'w'],
outputs=['z'],
reduction=reduction)
# Define operator inputs.
np.random.seed(0)
x = np.random.rand(3, 5).astype(np.float32)
labels = np.random.randint(0, high=5, size=(3, ))
weights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)
# Compute SoftmaxCrossEntropyLoss
sce = softmaxcrossentropy(x, labels, weight=weights)
# Check results
expect(node, inputs=[x, labels, weights], outputs=[sce], name='test_softmax_cross_entropy_mean_weight')
| [
"onnx.helper.make_node",
"numpy.sum",
"numpy.log",
"numpy.random.seed",
"numpy.zeros",
"numpy.max",
"numpy.mean",
"numpy.take",
"numpy.exp",
"numpy.random.randint",
"numpy.array",
"numpy.random.rand"
] | [((340, 372), 'numpy.max', 'np.max', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (346, 372), True, 'import numpy as np\n'), ((385, 402), 'numpy.exp', 'np.exp', (['(x - max_x)'], {}), '(x - max_x)\n', (391, 402), True, 'import numpy as np\n'), ((466, 475), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (472, 475), True, 'import numpy as np\n'), ((419, 455), 'numpy.sum', 'np.sum', (['exp_x'], {'axis': '(1)', 'keepdims': '(True)'}), '(exp_x, axis=1, keepdims=True)\n', (425, 455), True, 'import numpy as np\n'), ((596, 628), 'numpy.zeros', 'np.zeros', (['(N,)'], {'dtype': 'np.float32'}), '((N,), dtype=np.float32)\n', (604, 628), True, 'import numpy as np\n'), ((813, 847), 'numpy.zeros', 'np.zeros', (['(N, D)'], {'dtype': 'np.float32'}), '((N, D), dtype=np.float32)\n', (821, 847), True, 'import numpy as np\n'), ((1068, 1091), 'numpy.take', 'np.take', (['weight', 'target'], {}), '(weight, target)\n', (1075, 1091), True, 'import numpy as np\n'), ((1256, 1269), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (1263, 1269), True, 'import numpy as np\n'), ((1312, 1324), 'numpy.sum', 'np.sum', (['loss'], {}), '(loss)\n', (1318, 1324), True, 'import numpy as np\n'), ((1571, 1679), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SoftmaxCrossEntropyLoss"""'], {'inputs': "['x', 'y']", 'outputs': "['z']", 'reduction': 'reduction'}), "('SoftmaxCrossEntropyLoss', inputs=['x', 'y'], outputs\n =['z'], reduction=reduction)\n", (1592, 1679), False, 'import onnx\n'), ((1829, 1846), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1843, 1846), True, 'import numpy as np\n'), ((1916, 1955), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(5)', 'size': '(3,)'}), '(0, high=5, size=(3,))\n', (1933, 1955), True, 'import numpy as np\n'), ((2382, 2494), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SoftmaxCrossEntropyLoss"""'], {'inputs': "['x', 'y', 'w']", 'outputs': "['z']", 'reduction': 'reduction'}), "('SoftmaxCrossEntropyLoss', inputs=['x', 'y', 'w'],\n outputs=['z'], reduction=reduction)\n", (2403, 2494), False, 'import onnx\n'), ((2645, 2662), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2659, 2662), True, 'import numpy as np\n'), ((2732, 2771), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(5)', 'size': '(3,)'}), '(0, high=5, size=(3,))\n', (2749, 2771), True, 'import numpy as np\n'), ((2791, 2844), 'numpy.array', 'np.array', (['[0.9, 0.7, 0.8, 0.9, 0.9]'], {'dtype': 'np.float32'}), '([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n', (2799, 2844), True, 'import numpy as np\n'), ((3293, 3401), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SoftmaxCrossEntropyLoss"""'], {'inputs': "['x', 'y']", 'outputs': "['z']", 'reduction': 'reduction'}), "('SoftmaxCrossEntropyLoss', inputs=['x', 'y'], outputs\n =['z'], reduction=reduction)\n", (3314, 3401), False, 'import onnx\n'), ((3551, 3568), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3565, 3568), True, 'import numpy as np\n'), ((3638, 3677), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(5)', 'size': '(3,)'}), '(0, high=5, size=(3,))\n', (3655, 3677), True, 'import numpy as np\n'), ((4094, 4202), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SoftmaxCrossEntropyLoss"""'], {'inputs': "['x', 'y']", 'outputs': "['z']", 'reduction': 'reduction'}), "('SoftmaxCrossEntropyLoss', inputs=['x', 'y'], outputs\n =['z'], reduction=reduction)\n", (4115, 4202), False, 'import onnx\n'), ((4352, 4369), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4366, 4369), True, 'import numpy as np\n'), ((4439, 4478), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(5)', 'size': '(3,)'}), '(0, high=5, size=(3,))\n', (4456, 4478), True, 'import numpy as np\n'), ((4882, 4990), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SoftmaxCrossEntropyLoss"""'], {'inputs': "['x', 'y']", 'outputs': "['z']", 'reduction': 'reduction'}), "('SoftmaxCrossEntropyLoss', inputs=['x', 'y'], outputs\n =['z'], reduction=reduction)\n", (4903, 4990), False, 'import onnx\n'), ((5140, 5157), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5154, 5157), True, 'import numpy as np\n'), ((5225, 5266), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(5)', 'size': '(3, 2)'}), '(0, high=5, size=(3, 2))\n', (5242, 5266), True, 'import numpy as np\n'), ((5667, 5779), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SoftmaxCrossEntropyLoss"""'], {'inputs': "['x', 'y', 'w']", 'outputs': "['z']", 'reduction': 'reduction'}), "('SoftmaxCrossEntropyLoss', inputs=['x', 'y', 'w'],\n outputs=['z'], reduction=reduction)\n", (5688, 5779), False, 'import onnx\n'), ((5930, 5947), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5944, 5947), True, 'import numpy as np\n'), ((6017, 6056), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(5)', 'size': '(3,)'}), '(0, high=5, size=(3,))\n', (6034, 6056), True, 'import numpy as np\n'), ((6076, 6129), 'numpy.array', 'np.array', (['[0.9, 0.7, 0.8, 0.9, 0.9]'], {'dtype': 'np.float32'}), '([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n', (6084, 6129), True, 'import numpy as np\n'), ((1859, 1879), 'numpy.random.rand', 'np.random.rand', (['(3)', '(5)'], {}), '(3, 5)\n', (1873, 1879), True, 'import numpy as np\n'), ((2675, 2695), 'numpy.random.rand', 'np.random.rand', (['(3)', '(5)'], {}), '(3, 5)\n', (2689, 2695), True, 'import numpy as np\n'), ((3581, 3601), 'numpy.random.rand', 'np.random.rand', (['(3)', '(5)'], {}), '(3, 5)\n', (3595, 3601), True, 'import numpy as np\n'), ((4382, 4402), 'numpy.random.rand', 'np.random.rand', (['(3)', '(5)'], {}), '(3, 5)\n', (4396, 4402), True, 'import numpy as np\n'), ((5170, 5193), 'numpy.random.rand', 'np.random.rand', (['(3)', '(5)', '(2)'], {}), '(3, 5, 2)\n', (5184, 5193), True, 'import numpy as np\n'), ((5960, 5980), 'numpy.random.rand', 'np.random.rand', (['(3)', '(5)'], {}), '(3, 5)\n', (5974, 5980), True, 'import numpy as np\n')] |
import numpy as np
import math
__all__ = ["Atom", "Molecule"]
a0 = 5.2917720859E-11 # Bohr radius
eV = 0.000123984 # cm-1 to eV; eV = hc 10^2 / e
Eh = 27.2114 # Hartree energy
u = 1.66053892E-27 # atomic mass unit
me = 9.10938291E-31 # electron mass
atom_numbers = {'H': 1.0, 'C': 6.0, 'N': 7.0, 'O': 8.0, 'F': 9.0}
class Atom(object):
def __init__(self, symbol = 'H', charge=1.0, coords=[0,0,0]):
self.basis = ''
self.symbol = symbol
self.charge = charge
self.coords = np.array(coords)
self.reset()
def __str__(self):
return repr(self.coords)
def angstrom_coords(self):
return repr(self.coords * 0.52917720859)
def move(self, disp=[0,0,0]):
self.current_coords += np.array(disp)
def reset(self):
self.current_coords = np.array(self.coords)
class Molecule(object):
def __init__(self, filename=None):
self.atoms = []
self.modes_frequencies = []
self.modes_displacement_vectors = []
self.modes_reduced_masses = []
if filename is not None:
self.read_aoforce_output(filename)
def number_of_atoms(self):
return len(self.atoms)
def number_of_modes(self):
return len(self.modes_frequencies)
def reorder_atoms(self, order=[]):
if not len(order) == self.number_of_atoms():
raise ValueError('order array does not match number of atoms')
self.atoms = [self.atoms[i-1] for i in order]
self.modes_displacement_vectors = [[mode[i-1] for i in order] for mode in self.modes_displacement_vectors]
def reorder_modes(self, order=[]):
if not len(order) == self.number_of_modes():
raise ValueError('order array does not match number of modes')
self.modes_frequencies = [self.modes_frequencies[i-1] for i in order]
self.modes_reduced_masses = [self.modes_reduced_masses[i-1] for i in order]
self.modes_displacement_vectors = [self.modes_displacement_vectors[i-1] for i in order]
def rotate(self, axis='x', degrees=90):
axes_names = {'x': [1,0,0], 'y': [0,1,0], 'z': [0,0,1]}
axis = axes_names[axis.lower()]
theta = degrees * np.pi / 180
R = rotation_matrix(axis, theta)
for atom in self.atoms:
atom.coords = np.dot(R, atom.coords)
atom.current_coords = np.dot(R, atom.current_coords)
self.modes_displacement_vectors = [[np.dot(R, vector) for vector in mode] for mode in self.modes_displacement_vectors]
def reset_to_equilibrium(self):
for atom in self.atoms:
atom.reset()
def add_atom_displacement(self, mode, Q):
# displace atoms by Q along mode
# parameters may be lists
if type(mode) is not list:
mode = [mode]
if type(Q) is not list:
Q = [Q]
if not len(mode) == len(Q):
raise ValueError('mode and Q list lengths do not match')
for m, q in zip(mode, Q):
for atom, disp in zip(self.atoms, self.modes_displacement_vectors[m-1]):
atom.move(q*disp)
def set_atom_displacement(self, mode, Q):
# displace atoms by Q along mode
# parameters may be lists
if type(mode) is not list:
mode = [mode]
if type(Q) is not list:
Q = [Q]
if not len(mode) == len(Q):
raise ValueError('mode and Q list lengths do not match')
self.reset_to_equilibrium()
self.add_atom_displacement(mode, Q)
def firefly_coords(self, eq=False):
inplines = []
for atom in self.atoms:
line = []
line.append(atom.symbol)
line.append('\t')
line.append('%.1f' % atom.charge)
line.append('\t')
if eq == True:
line.append('\t'.join(['%.14f'%x for x in atom.coords]))
else:
line.append('\t'.join(['%.14f'%x for x in atom.current_coords]))
line.append('\n')
inplines.append(''.join(line))
return ''.join(inplines)
def firefly_coords_with_basis(self, eq=False):
inplines = []
for atom in self.atoms:
line = []
line.append(atom.symbol)
line.append('\t')
line.append('%.1f' % atom.charge)
line.append('\t')
if eq == True:
line.append('\t'.join(['%.14f'%(x*0.52917720859) for x in atom.coords]))
else:
line.append('\t'.join(['%.14f'%(x*0.52917720859) for x in atom.current_coords]))
line.append('\n')
line.append(atom.basis)
line.append('\n\n')
inplines.append(''.join(line))
return ''.join(inplines)
def read_aoforce_output(self, filename='aoforce.out'):
with open(filename) as f:
for line in f:
# find and read coordinates from aoforce output
if 'actual cartesian coordinates' in line:
for line in f:
atom_line = line.strip().split()
if len(atom_line) == 5:
# parse coordinates
atom_symbol = atom_line[1].upper()
atom_charge = atom_numbers[atom_symbol]
atom_coords = [float(i) for i in atom_line[2:5]]
atom = Atom(atom_symbol, atom_charge, atom_coords)
self.atoms.append(atom)
elif len(atom_line) == 1:
# ignore separator line
continue
else:
# we have reached the end,
# coordinates are followed by an empty line
break
# find and read normal modes
if line.strip().startswith('mode '):
# read mode numbers
mode_numbers = line.split()
del mode_numbers[0] # delete label
mode_numbers = [int(i) for i in mode_numbers]
for _ in mode_numbers:
self.modes_displacement_vectors.append([])
#print mode_numbers
# nested loop for reading only this block of normal mode displacement vectors
for line in f:
stripped_line = line.strip()
# read mode frequencies
if stripped_line.startswith('frequency'):
frequencies = stripped_line.split()
del frequencies[0] # delete label
frequencies = [float(i) for i in frequencies]
self.modes_frequencies.extend(frequencies)
# continue with next line
continue
# read displacement vectors
if stripped_line[:4].strip().isdigit():
x_displacements = stripped_line.split()
y_displacements = next(f).split()
z_displacements = next(f).split()
# remove trailing items
del x_displacements[:3]
del y_displacements[0]
del z_displacements[0]
for mode, x_disp, y_disp, z_disp in zip(mode_numbers, x_displacements, y_displacements, z_displacements):
disp_vector = np.array([float(i) for i in [x_disp, y_disp, z_disp]])
self.modes_displacement_vectors[mode-1].append(disp_vector)
# continue with next line
continue
# read reduced masses
if stripped_line.startswith('reduced mass'):
reduced_masses = stripped_line.split()
del reduced_masses[:2] # delete label
reduced_masses = [float(i) for i in reduced_masses]
self.modes_reduced_masses.extend(reduced_masses)
# reduced masses are last line of block
# quit nested loop
break
# delete rotational and translational modes
if self.number_of_atoms() > 2:
trans_rot_degrees_of_freedom = 6
else:
trans_rot_degrees_of_freedom = 5
del self.modes_frequencies[:trans_rot_degrees_of_freedom]
del self.modes_reduced_masses[:trans_rot_degrees_of_freedom]
del self.modes_displacement_vectors[:trans_rot_degrees_of_freedom]
# convert mode displacements from turbomole units to [amu^(1/2) a0]
for idx, disp_vector in enumerate(self.modes_displacement_vectors):
freq = self.modes_frequencies[idx]
mred = self.modes_reduced_masses[idx]
disp_au = disp_vector / np.sqrt(freq * eV / Eh * mred * u / me)
self.modes_displacement_vectors[idx] = disp_au
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis/math.sqrt(np.dot(axis, axis))
a = math.cos(theta/2.0)
b, c, d = -axis*math.sin(theta/2.0)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
| [
"numpy.asarray",
"math.sin",
"numpy.array",
"math.cos",
"numpy.dot",
"numpy.sqrt"
] | [((9496, 9512), 'numpy.asarray', 'np.asarray', (['axis'], {}), '(axis)\n', (9506, 9512), True, 'import numpy as np\n'), ((9567, 9588), 'math.cos', 'math.cos', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (9575, 9588), False, 'import math\n'), ((9736, 9907), 'numpy.array', 'np.array', (['[[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa + cc -\n bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]'], {}), '([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad),\n aa + cc - bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa +\n dd - bb - cc]])\n', (9744, 9907), True, 'import numpy as np\n'), ((538, 554), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (546, 554), True, 'import numpy as np\n'), ((780, 794), 'numpy.array', 'np.array', (['disp'], {}), '(disp)\n', (788, 794), True, 'import numpy as np\n'), ((847, 868), 'numpy.array', 'np.array', (['self.coords'], {}), '(self.coords)\n', (855, 868), True, 'import numpy as np\n'), ((9607, 9628), 'math.sin', 'math.sin', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (9615, 9628), False, 'import math\n'), ((2347, 2369), 'numpy.dot', 'np.dot', (['R', 'atom.coords'], {}), '(R, atom.coords)\n', (2353, 2369), True, 'import numpy as np\n'), ((2404, 2434), 'numpy.dot', 'np.dot', (['R', 'atom.current_coords'], {}), '(R, atom.current_coords)\n', (2410, 2434), True, 'import numpy as np\n'), ((9539, 9557), 'numpy.dot', 'np.dot', (['axis', 'axis'], {}), '(axis, axis)\n', (9545, 9557), True, 'import numpy as np\n'), ((2480, 2497), 'numpy.dot', 'np.dot', (['R', 'vector'], {}), '(R, vector)\n', (2486, 2497), True, 'import numpy as np\n'), ((9218, 9257), 'numpy.sqrt', 'np.sqrt', (['(freq * eV / Eh * mred * u / me)'], {}), '(freq * eV / Eh * mred * u / me)\n', (9225, 9257), True, 'import numpy as np\n')] |
import argparse
import numpy as np
from Tester.tester import Tester
from Trainer.Models.model_gnet_light import ModelGNetLight
from Trainer.Models.model_gnet_deep import ModelGNetDeep
from Trainer.Models.model_gnet_deep_v2 import ModelGNetDeepV2
from Trainer.Models.model_gnet_deep_deep import ModelGNetDeepDeep
parser = argparse.ArgumentParser(description='cnn-number-detection')
parser.add_argument(
'--model_type',
help='type of model to use',
default='ModelGNetDeep')
parser.add_argument(
'--model_path', help='path to the saved model',
default='CNN-gnet-deep-ultimate-data-15-epochs-128-batch-size')
parser.add_argument(
'--test_image',
help='path to the image for testing the model')
parser.add_argument('--test_folder', help='folder with images for inference')
parser.add_argument(
'--test_on_random',
help='if a randomly generated image should be used for inference',
action='store_true')
args = parser.parse_args()
def main():
# create the model to use for inference
model_class = eval(args.model_type)
model_obj = model_class('Tester', load_data=False)
tester = Tester(model_obj, args.model_path)
if not args.test_image is None:
# test the model with a given image
tester.test_model_with_image(args.test_image)
elif args.test_on_random and model_obj.uses_color:
# test the model with a random gray scale array
image_random_gray = np.random.randint(
0, 255, size=(28, 28), dtype=np.uint8)
tester.test_model_with_array(image_random_gray)
elif args.test_on_random and not model_obj.uses_color:
# test the model with a random color array
image_random_color = np.random.randint(
0, 255, size=(28, 28, 3), dtype=np.uint8)
tester.test_model_with_array(image_random_color)
elif not args.test_folder is None:
# test the model with a folder of images
tester.test_model_with_folder('continuous', display_all=False)
if __name__ == "__main__":
main()
| [
"Tester.tester.Tester",
"numpy.random.randint",
"argparse.ArgumentParser"
] | [((323, 382), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""cnn-number-detection"""'}), "(description='cnn-number-detection')\n", (346, 382), False, 'import argparse\n'), ((1132, 1166), 'Tester.tester.Tester', 'Tester', (['model_obj', 'args.model_path'], {}), '(model_obj, args.model_path)\n', (1138, 1166), False, 'from Tester.tester import Tester\n'), ((1442, 1498), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(28, 28)', 'dtype': 'np.uint8'}), '(0, 255, size=(28, 28), dtype=np.uint8)\n', (1459, 1498), True, 'import numpy as np\n'), ((1708, 1767), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(28, 28, 3)', 'dtype': 'np.uint8'}), '(0, 255, size=(28, 28, 3), dtype=np.uint8)\n', (1725, 1767), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
from ase import Atoms
import matid.geometry
import numpy as np
class System(Atoms):
def __init__(
self,
symbols=None,
positions=None,
numbers=None,
tags=None,
momenta=None,
masses=None,
magmoms=None,
charges=None,
scaled_positions=None,
cell=None,
pbc=None,
celldisp=None,
constraint=None,
calculator=None,
info=None,
wyckoff_letters=None,
equivalent_atoms=None):
super(System, self).__init__(
symbols,
positions,
numbers,
tags,
momenta,
masses,
magmoms,
charges,
scaled_positions,
cell,
pbc,
celldisp,
constraint,
calculator,
info)
self.wyckoff_letters = wyckoff_letters
self.equivalent_atoms = equivalent_atoms
@staticmethod
def from_atoms(atoms):
"""Creates a System object from ASE.Atoms object.
"""
system = System(
positions=atoms.get_positions(),
symbols=atoms.get_chemical_symbols(),
cell=atoms.get_cell(),
pbc=atoms.get_pbc(),
)
return system
def to_scaled(self, positions, wrap=False):
"""Used to transform a set of positions to the basis defined by the
cell of this system.
Args:
positions (numpy.ndarray): The positions to scale
wrap (numpy.ndarray): Whether the positions should be wrapped
inside the cell.
Returns:
numpy.ndarray: The scaled positions
"""
return matid.geometry.to_scaled(
self.get_cell(),
positions,
wrap,
self.get_pbc())
def to_cartesian(self, scaled_positions, wrap=False):
"""Used to transofrm a set of relative positions to the cartesian basis
defined by the cell of this system.
Args:
positions (numpy.ndarray): The positions to scale
wrap (numpy.ndarray): Whether the positions should be wrapped
inside the cell.
Returns:
numpy.ndarray: The cartesian positions
"""
return matid.geometry.to_cartesian(
self.get_cell(),
scaled_positions,
wrap,
self.get_pbc())
def translate(self, translation, relative=False):
"""Translates the positions by the given translation.
Args:
translation (1x3 numpy.array): The translation to apply.
relative (bool): True if given translation is relative to cell
vectors.
"""
matid.geometry.translate(self, translation, relative)
def get_wyckoff_letters(self):
"""Returns a list of Wyckoff letters for the atoms in the system. This
information is only available is explicitly set.
Returns:
np.ndarray: Wyckoff letters as a list of strings.
"""
return np.array(self.wyckoff_letters)
def set_wyckoff_letters(self, wyckoff_letters):
"""Used to set the Wyckoff letters of for the atoms in this system.
Args:
wyckoff_letters(sequence of str): The Wyckoff letters for the atoms
in this system.
"""
self.wyckoff_letters = np.array(wyckoff_letters)
def get_equivalent_atoms(self):
"""Returns a list of indices marking the equivalence for the atoms in
the system. This information is only available is explicitly set.
Returns:
np.ndarray: The equivalence information as a list of integers,
where the same integer means equivalence and an integer is given
for each atom.
"""
return np.array(self.equivalent_atoms)
def set_equivalent_atoms(self, equivalent_atoms):
"""Used to set the list of indices marking the equivalence for the
atoms for the atoms in this system.
Args:
equivalent_atoms(sequence of int): list of indices marking the
equivalence for the atoms the atoms in this system.
"""
self.equivalent_atoms = np.array(equivalent_atoms)
| [
"numpy.array"
] | [((3268, 3298), 'numpy.array', 'np.array', (['self.wyckoff_letters'], {}), '(self.wyckoff_letters)\n', (3276, 3298), True, 'import numpy as np\n'), ((3594, 3619), 'numpy.array', 'np.array', (['wyckoff_letters'], {}), '(wyckoff_letters)\n', (3602, 3619), True, 'import numpy as np\n'), ((4033, 4064), 'numpy.array', 'np.array', (['self.equivalent_atoms'], {}), '(self.equivalent_atoms)\n', (4041, 4064), True, 'import numpy as np\n'), ((4441, 4467), 'numpy.array', 'np.array', (['equivalent_atoms'], {}), '(equivalent_atoms)\n', (4449, 4467), True, 'import numpy as np\n')] |
import csv
import os
import h5py
import numpy as np
from CNNectome.utils import config_loader
shift = {"A": 1498, "B": 1940, "C": 10954}
def compare(filepath1, filepath2, targetfile, cleft_id_shift, contained_ids):
"""
:param filepath1: csv-file that has the corrected cleft associations
:param filepath2: csv-file that should be corrected
:param targetfile: csv-file that should be created
:param cleft_id_shift
:return:
"""
file1 = open(filepath1, "r")
file2 = open(filepath2, "r")
target = open(targetfile, "w")
reader1 = csv.reader(file1)
reader2 = csv.reader(file2)
writer = csv.writer(target)
lookup_by_coord = dict()
for row in reader1:
if row[0] == "pre_label":
writer.writerow(row)
next(reader1)
break
for row in reader2:
if row[0] == "pre_label":
next(reader2)
break
for row in reader1:
pre_coord = (float(row[2]), float(row[3]), float(row[4]))
post_coord = (float(row[7]), float(row[8]), float(row[9]))
lookup_by_coord[(pre_coord, post_coord)] = int(row[10])
print(lookup_by_coord)
for row in reader2:
if int(row[10]) == -1:
pre_coord = (float(row[2]), float(row[3]), float(row[4]))
post_coord = (float(row[7]), float(row[8]), float(row[9]))
try:
cleft = lookup_by_coord[(pre_coord, post_coord)]
if cleft != -1:
if cleft == 21827 or cleft == 3580:
cleft_shifted = -2
else:
cleft_shifted = cleft - cleft_id_shift
if cleft_shifted not in contained_ids:
cleft_shifted = -3
else:
cleft_shifted = -1
except KeyError:
cleft_shifted = -4
pass
writer.writerow(row[:-2] + [cleft_shifted, ""])
else:
writer.writerow(row)
file1.close()
file2.close()
target.close()
def all_clefts(cleftfile):
hf = h5py.File(cleftfile, "r")
return np.unique(hf["volumes/labels/clefts"][:])
if __name__ == "__main__":
conf = config_loader.get_config()
file1 = os.path.join(conf["synapses"]["cremi17_data_path"], "cleft-partners_{0:}_2017.csv")
file2 = os.path.join(conf["synapses"]["cremi16_data_path"], "cleft-partners-{0:}-20160501.aligned.csv")
newfile = os.path.join(conf["synapses"]["cremi16_data_path"], "cleft-partners-{0:}-20160501.aligned.corrected.csv")
clefts = os.path.join(conf["synapses"]["cremi16_data_path"], "sample_{0:}_padded_20160501.aligned.0bg.hdf")
for sample in ["A", "B", "C"]:
contained_clefts = all_clefts(clefts.format(sample))
# contained_clefts=[1,2,3]
compare(
file1.format(sample),
file2.format(sample),
newfile.format(sample),
shift[sample],
contained_clefts,
)
| [
"h5py.File",
"csv.reader",
"csv.writer",
"os.path.join",
"CNNectome.utils.config_loader.get_config",
"numpy.unique"
] | [((572, 589), 'csv.reader', 'csv.reader', (['file1'], {}), '(file1)\n', (582, 589), False, 'import csv\n'), ((604, 621), 'csv.reader', 'csv.reader', (['file2'], {}), '(file2)\n', (614, 621), False, 'import csv\n'), ((635, 653), 'csv.writer', 'csv.writer', (['target'], {}), '(target)\n', (645, 653), False, 'import csv\n'), ((2123, 2148), 'h5py.File', 'h5py.File', (['cleftfile', '"""r"""'], {}), "(cleftfile, 'r')\n", (2132, 2148), False, 'import h5py\n'), ((2160, 2201), 'numpy.unique', 'np.unique', (["hf['volumes/labels/clefts'][:]"], {}), "(hf['volumes/labels/clefts'][:])\n", (2169, 2201), True, 'import numpy as np\n'), ((2242, 2268), 'CNNectome.utils.config_loader.get_config', 'config_loader.get_config', ([], {}), '()\n', (2266, 2268), False, 'from CNNectome.utils import config_loader\n'), ((2281, 2368), 'os.path.join', 'os.path.join', (["conf['synapses']['cremi17_data_path']", '"""cleft-partners_{0:}_2017.csv"""'], {}), "(conf['synapses']['cremi17_data_path'],\n 'cleft-partners_{0:}_2017.csv')\n", (2293, 2368), False, 'import os\n'), ((2377, 2476), 'os.path.join', 'os.path.join', (["conf['synapses']['cremi16_data_path']", '"""cleft-partners-{0:}-20160501.aligned.csv"""'], {}), "(conf['synapses']['cremi16_data_path'],\n 'cleft-partners-{0:}-20160501.aligned.csv')\n", (2389, 2476), False, 'import os\n'), ((2487, 2596), 'os.path.join', 'os.path.join', (["conf['synapses']['cremi16_data_path']", '"""cleft-partners-{0:}-20160501.aligned.corrected.csv"""'], {}), "(conf['synapses']['cremi16_data_path'],\n 'cleft-partners-{0:}-20160501.aligned.corrected.csv')\n", (2499, 2596), False, 'import os\n'), ((2606, 2708), 'os.path.join', 'os.path.join', (["conf['synapses']['cremi16_data_path']", '"""sample_{0:}_padded_20160501.aligned.0bg.hdf"""'], {}), "(conf['synapses']['cremi16_data_path'],\n 'sample_{0:}_padded_20160501.aligned.0bg.hdf')\n", (2618, 2708), False, 'import os\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import os.path
import gc
from skimage import io, color
import tensorflow as tf
from tensorflow.keras.layers import MaxPool1D, GlobalMaxPooling1D
from sklearn import svm
from sklearn.datasets._samples_generator import make_blobs
from tensorflow.keras.layers import MaxPool1D, GlobalMaxPooling1D
from sklearn.model_selection import train_test_split
import random
from sklearn import metrics
from sklearn.metrics import precision_score
import scipy.stats as stats
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
'''
Presets & Hyper-parameters
'''
CONFIGURATION_FILE_PATH = "./data/train/data_config.csv"
DATASET_PATH = "./data/train/"
pd.set_option('display.width', 200) # for display width
DYNAMIC_SCALEUP = False
INTERPOLATION_METHOD = "catrom"
CASE_PATH = "./catrom_static"
SAVE_FEATURE_IMAGE = True
FEATURE_LENGTH = 30 # n-dimensional data feature only use
NUMBER_OF_SAMPLES = 299 # number of augmented data
mu, sigma = 0, 1 # normal distribution random parameter for data augmentation
FEATURE_MAX_LENGTH = 115 # Maximum feature length
NUMBER_OF_RANDOM_SELECTION = 5
MAX_TRAIN_ITERATION = -1 # infinity
SVM_KERNEL_METHOD = 'linear'
NUMBER_OF_TESTING = 10
IMAGE_HEIGHT = 369
'''
1. Load configuration file
'''
data_config = pd.read_csv(CONFIGURATION_FILE_PATH, header=0, index_col=0)
fsr_dataframe = {}
seat_dataframe = {}
for idx in data_config.index:
fsr_filepath = DATASET_PATH+data_config.loc[idx, "fsr_matrix_1d_datafile"] # set FSR matrix data filepath
seat_filepath = DATASET_PATH+data_config.loc[idx, "seat_datafile"] # set Seat data filepath
print(idx, ") read data files : ", fsr_filepath, ",", seat_filepath)
fsr_dataframe[idx] = pd.read_csv(fsr_filepath, header=0, index_col=False).iloc[:,0:162] # read FSR matrix data file
seat_dataframe[idx] = pd.read_csv(seat_filepath, header=0, index_col=False) # read Seat data file
# clear unnecessary columns
del seat_dataframe[idx]['Measurement time'] # remove unnecessary column
del fsr_dataframe[idx]['Measurement Time (sec)'] # remove unnecessary column
'''
2. Source data segmentation
'''
fsr_dataframe_standard_segment = {}
fsr_dataframe_relax_segment = {}
seat_loadcell_dataframe_standard_segment = {}
seat_loadcell_dataframe_relax_segment = {}
for idx in data_config.index:
mtime = data_config.loc[idx, ['standard_s_mtime', "standard_e_mtime", "relax_s_mtime", "relax_e_mtime"]]
# seat loadcell segmentation
seat_loadcell_dataframe_standard_segment[idx] = seat_dataframe[idx][(seat_dataframe[idx]['mtime']>=mtime.standard_s_mtime) & (seat_dataframe[idx]['mtime']<=mtime.standard_e_mtime)]
seat_loadcell_dataframe_relax_segment[idx] = seat_dataframe[idx][(seat_dataframe[idx]['mtime']>=mtime.relax_s_mtime) & (seat_dataframe[idx]['mtime']<=mtime.relax_e_mtime)]
# fsr matrix segmentation
fsr_dataframe_standard_segment[idx] = fsr_dataframe[idx][(fsr_dataframe[idx]['mtime']>=mtime.standard_s_mtime) & (fsr_dataframe[idx]['mtime']<=mtime.standard_e_mtime)]
fsr_dataframe_relax_segment[idx] = fsr_dataframe[idx][(fsr_dataframe[idx]['mtime']>=mtime.relax_s_mtime) & (fsr_dataframe[idx]['mtime']<=mtime.relax_e_mtime)]
print("FSR Segments@Standard size : ", len(fsr_dataframe_standard_segment[idx]), ", FSR Segments@Relax size : ", len(fsr_dataframe_relax_segment[idx]))
print("Seat Segments@Standard size : ", len(seat_loadcell_dataframe_standard_segment[idx]), ", Seat Segments@Relax size : ", len(seat_loadcell_dataframe_relax_segment[idx]))
# height
source = data_config.loc[:, ['user_height', 'bestfit_angle_standard']]
corr = source.corr(method='pearson')
print('Pearson Correlation Coeff. (standard)\n', corr)
print('Psearson Correlation :', stats.pearsonr(source['user_height'], source['bestfit_angle_standard']))
source = data_config.loc[:, ['user_height', 'bestfit_angle_relax']]
corr = source.corr(method='pearson')
print('Pearson Correlation Coeff. (relax)\n', corr)
print('Psearson Correlation :', stats.pearsonr(source['user_height'], source['bestfit_angle_relax']))
# weight
source = data_config.loc[:, ['user_weight', 'bestfit_angle_standard']]
corr = source.corr(method='pearson')
print('Pearson Correlation Coeff. (standard)\n', corr)
print('Psearson Correlation :', stats.pearsonr(source['user_weight'], source['bestfit_angle_standard']))
source = data_config.loc[:, ['user_weight', 'bestfit_angle_relax']]
corr = source.corr(method='pearson')
print('Pearson Correlation Coeff. (relax)\n', corr)
print('Psearson Correlation :', stats.pearsonr(source['user_weight'], source['bestfit_angle_relax']))
# age
source = data_config.loc[:, ['user_age', 'bestfit_angle_standard']]
corr = source.corr(method='pearson')
print('Pearson Correlation Coeff. (standard)\n', corr)
print('Psearson Correlation :', stats.pearsonr(source['user_age'], source['bestfit_angle_standard']))
source = data_config.loc[:, ['user_age', 'bestfit_angle_relax']]
corr = source.corr(method='pearson')
print('Pearson Correlation Coeff. (relax)\n', corr)
print('Psearson Correlation :', stats.pearsonr(source['user_age'], source['bestfit_angle_relax']))
# bmi
source = data_config.loc[:, ['user_weight','user_height']]
bmi = source['user_weight']/(source['user_height']/100*source['user_height']/100)
target = data_config.loc[:, ['bestfit_angle_standard']]
target["bmi"] = bmi
corr = target.corr(method='pearson')
print('Pearson Correlation Coeff. (standard)\n', corr)
print('Psearson Correlation :', stats.pearsonr(target['bmi'], target['bestfit_angle_standard']))
target = data_config.loc[:, ['bestfit_angle_relax']]
target["bmi"] = bmi
corr = target.corr(method='pearson')
print('Pearson Correlation Coeff. (relax)\n', corr)
print('Psearson Correlation :', stats.pearsonr(target['bmi'], target['bestfit_angle_relax']))
# bmr
source = data_config.loc[:, ['user_weight','user_height', 'user_age']]
bmr = 66.47+(13.75*source['user_weight'])+(5*source['user_height'])-(6.76*source['user_age'])
target = data_config.loc[:, ['bestfit_angle_standard']]
target["bmr"] = bmr
corr = target.corr(method='pearson')
print('Pearson Correlation Coeff. (standard)\n', corr)
print('Psearson Correlation :', stats.pearsonr(target['bmr'], target['bestfit_angle_standard']))
target = data_config.loc[:, ['bestfit_angle_relax']]
target["bmr"] = bmr
corr = target.corr(method='pearson')
print('Pearson Correlation Coeff. (relax)\n', corr)
print('Psearson Correlation :', stats.pearsonr(target['bmr'], target['bestfit_angle_relax']))
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
X = data_config.loc[:, ['user_height', 'user_weight', 'user_age']]
bmr = 66.47+(13.75*X['user_weight'])+(5*X['user_height'])-(6.76*X['user_age'])
bmi = X['user_weight']/(X['user_height']/100*X['user_height']/100)
X['bmr'] = bmr
X['bmi'] = bmi
y = data_config.loc[:, ['bestfit_angle_standard']]
X_train, X_test, y_train, y_test = train_test_split(X, np.ravel(y), test_size=0.2, random_state=42)
pipelines = []
pipelines.append(('ScaledLR', Pipeline([('Scaler', StandardScaler()),('LR',LinearRegression())])))
pipelines.append(('ScaledLASSO', Pipeline([('Scaler', StandardScaler()),('LASSO', Lasso())])))
pipelines.append(('ScaledEN', Pipeline([('Scaler', StandardScaler()),('EN', ElasticNet())])))
pipelines.append(('ScaledKNN', Pipeline([('Scaler', StandardScaler()),('KNN', KNeighborsRegressor())])))
pipelines.append(('ScaledCART', Pipeline([('Scaler', StandardScaler()),('CART', DecisionTreeRegressor())])))
pipelines.append(('ScaledGBM', Pipeline([('Scaler', StandardScaler()),('GBM', GradientBoostingRegressor())])))
results = []
names = []
for name, model in pipelines:
kfold = KFold(n_splits=10, random_state=21, shuffle=True)
cv_results = cross_val_score(model, X_train, y_train, cv=kfold, scoring='neg_mean_squared_error')
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(cv_results)
print(msg) | [
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.preprocessing.StandardScaler",
"sklearn.tree.DecisionTreeRegressor",
"numpy.ravel",
"pandas.read_csv",
"sklearn.model_selection.cross_val_score",
"sklearn.linear_model.ElasticNet",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.model_selectio... | [((751, 786), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(200)'], {}), "('display.width', 200)\n", (764, 786), True, 'import pandas as pd\n'), ((1346, 1405), 'pandas.read_csv', 'pd.read_csv', (['CONFIGURATION_FILE_PATH'], {'header': '(0)', 'index_col': '(0)'}), '(CONFIGURATION_FILE_PATH, header=0, index_col=0)\n', (1357, 1405), True, 'import pandas as pd\n'), ((1903, 1956), 'pandas.read_csv', 'pd.read_csv', (['seat_filepath'], {'header': '(0)', 'index_col': '(False)'}), '(seat_filepath, header=0, index_col=False)\n', (1914, 1956), True, 'import pandas as pd\n'), ((3808, 3879), 'scipy.stats.pearsonr', 'stats.pearsonr', (["source['user_height']", "source['bestfit_angle_standard']"], {}), "(source['user_height'], source['bestfit_angle_standard'])\n", (3822, 3879), True, 'import scipy.stats as stats\n'), ((4071, 4139), 'scipy.stats.pearsonr', 'stats.pearsonr', (["source['user_height']", "source['bestfit_angle_relax']"], {}), "(source['user_height'], source['bestfit_angle_relax'])\n", (4085, 4139), True, 'import scipy.stats as stats\n'), ((4346, 4417), 'scipy.stats.pearsonr', 'stats.pearsonr', (["source['user_weight']", "source['bestfit_angle_standard']"], {}), "(source['user_weight'], source['bestfit_angle_standard'])\n", (4360, 4417), True, 'import scipy.stats as stats\n'), ((4609, 4677), 'scipy.stats.pearsonr', 'stats.pearsonr', (["source['user_weight']", "source['bestfit_angle_relax']"], {}), "(source['user_weight'], source['bestfit_angle_relax'])\n", (4623, 4677), True, 'import scipy.stats as stats\n'), ((4879, 4947), 'scipy.stats.pearsonr', 'stats.pearsonr', (["source['user_age']", "source['bestfit_angle_standard']"], {}), "(source['user_age'], source['bestfit_angle_standard'])\n", (4893, 4947), True, 'import scipy.stats as stats\n'), ((5136, 5201), 'scipy.stats.pearsonr', 'stats.pearsonr', (["source['user_age']", "source['bestfit_angle_relax']"], {}), "(source['user_age'], source['bestfit_angle_relax'])\n", (5150, 5201), True, 'import scipy.stats as stats\n'), ((5553, 5616), 'scipy.stats.pearsonr', 'stats.pearsonr', (["target['bmi']", "target['bestfit_angle_standard']"], {}), "(target['bmi'], target['bestfit_angle_standard'])\n", (5567, 5616), True, 'import scipy.stats as stats\n'), ((5814, 5874), 'scipy.stats.pearsonr', 'stats.pearsonr', (["target['bmi']", "target['bestfit_angle_relax']"], {}), "(target['bmi'], target['bestfit_angle_relax'])\n", (5828, 5874), True, 'import scipy.stats as stats\n'), ((6250, 6313), 'scipy.stats.pearsonr', 'stats.pearsonr', (["target['bmr']", "target['bestfit_angle_standard']"], {}), "(target['bmr'], target['bestfit_angle_standard'])\n", (6264, 6313), True, 'import scipy.stats as stats\n'), ((6511, 6571), 'scipy.stats.pearsonr', 'stats.pearsonr', (["target['bmr']", "target['bestfit_angle_relax']"], {}), "(target['bmr'], target['bestfit_angle_relax'])\n", (6525, 6571), True, 'import scipy.stats as stats\n'), ((7489, 7500), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (7497, 7500), True, 'import numpy as np\n'), ((8230, 8279), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'random_state': '(21)', 'shuffle': '(True)'}), '(n_splits=10, random_state=21, shuffle=True)\n', (8235, 8279), False, 'from sklearn.model_selection import KFold\n'), ((8297, 8386), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_train', 'y_train'], {'cv': 'kfold', 'scoring': '"""neg_mean_squared_error"""'}), "(model, X_train, y_train, cv=kfold, scoring=\n 'neg_mean_squared_error')\n", (8312, 8386), False, 'from sklearn.model_selection import cross_val_score\n'), ((1782, 1834), 'pandas.read_csv', 'pd.read_csv', (['fsr_filepath'], {'header': '(0)', 'index_col': '(False)'}), '(fsr_filepath, header=0, index_col=False)\n', (1793, 1834), True, 'import pandas as pd\n'), ((7601, 7617), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7615, 7617), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7625, 7643), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7641, 7643), False, 'from sklearn.linear_model import LinearRegression\n'), ((7703, 7719), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7717, 7719), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7731, 7738), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (7736, 7738), False, 'from sklearn.linear_model import Lasso\n'), ((7795, 7811), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7809, 7811), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7820, 7832), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {}), '()\n', (7830, 7832), False, 'from sklearn.linear_model import ElasticNet\n'), ((7890, 7906), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7904, 7906), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7916, 7937), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {}), '()\n', (7935, 7937), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((7996, 8012), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8010, 8012), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8023, 8046), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (8044, 8046), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((8104, 8120), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8118, 8120), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8130, 8157), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {}), '()\n', (8155, 8157), False, 'from sklearn.ensemble import GradientBoostingRegressor\n')] |
import numpy as np
from compas_slicer.slicers import BaseSlicer
import logging
import progressbar
from compas_slicer.parameters import get_param
from compas_slicer.pre_processing import assign_interpolation_distance_to_mesh_vertices
from compas_slicer.slicers.slice_utilities import ScalarFieldContours
from compas_slicer.geometry import VerticalLayersManager
logger = logging.getLogger('logger')
__all__ = ['InterpolationSlicer']
class InterpolationSlicer(BaseSlicer):
"""
Generates non-planar contours that interpolate user-defined boundaries.
Attributes
----------
mesh: :class: 'compas.datastructures.Mesh'
Input mesh, it must be a triangular mesh (i.e. no quads or n-gons allowed)
Note that the topology of the mesh matters, irregular tesselation can lead to undesired results.
We recommend to 1)re-topologize, 2) triangulate, and 3) weld your mesh in advance.
preprocessor: :class: 'compas_slicer.pre_processing.InterpolationSlicingPreprocessor'
parameters: dict
"""
def __init__(self, mesh, preprocessor=None, parameters=None):
logger.info('InterpolationSlicer')
BaseSlicer.__init__(self, mesh)
if preprocessor: # make sure the mesh of the preprocessor and the mesh of the slicer match
assert len(list(mesh.vertices())) == len(list(preprocessor.mesh.vertices()))
self.parameters = parameters if parameters else {}
self.preprocessor = preprocessor
self.n_multiplier = 1.0
def generate_paths(self):
""" Generates curved paths. """
assert self.preprocessor, 'You need to provide a pre-processor in order to generate paths.'
avg_layer_height = get_param(self.parameters, key='avg_layer_height', defaults_type='layers')
n = find_no_of_isocurves(self.preprocessor.target_LOW, self.preprocessor.target_HIGH, avg_layer_height)
params_list = get_interpolation_parameters_list(n)
logger.info('%d paths will be generated' % n)
max_dist = get_param(self.parameters, key='vertical_layers_max_centroid_dist', defaults_type='layers')
vertical_layers_manager = VerticalLayersManager(max_dist)
# create paths + layers
with progressbar.ProgressBar(max_value=len(params_list)) as bar:
for i, param in enumerate(params_list):
assign_interpolation_distance_to_mesh_vertices(self.mesh, param, self.preprocessor.target_LOW,
self.preprocessor.target_HIGH)
contours = ScalarFieldContours(self.mesh)
contours.compute()
contours.add_to_vertical_layers_manager(vertical_layers_manager)
bar.update(i) # advance progress bar
self.layers = vertical_layers_manager.layers
def find_no_of_isocurves(target_0, target_1, avg_layer_height=1.1):
""" Returns the average number of isocurves that can cover the get_distance from target_0 to target_1. """
avg_ds0 = target_0.get_avg_distances_from_other_target(target_1)
avg_ds1 = target_1.get_avg_distances_from_other_target(target_0)
number_of_curves = ((avg_ds0 + avg_ds1) * 0.5) / avg_layer_height
return max(1, int(number_of_curves))
def get_interpolation_parameters_list(number_of_curves):
""" Returns a list of #number_of_curves floats from 0.001 to 0.997. """
# t_list = [0.001]
t_list = []
a = list(np.arange(number_of_curves + 1) / (number_of_curves + 1))
a.pop(0)
t_list.extend(a)
t_list.append(0.997)
return t_list
if __name__ == "__main__":
pass
| [
"compas_slicer.parameters.get_param",
"compas_slicer.slicers.BaseSlicer.__init__",
"numpy.arange",
"compas_slicer.geometry.VerticalLayersManager",
"compas_slicer.pre_processing.assign_interpolation_distance_to_mesh_vertices",
"compas_slicer.slicers.slice_utilities.ScalarFieldContours",
"logging.getLogge... | [((370, 397), 'logging.getLogger', 'logging.getLogger', (['"""logger"""'], {}), "('logger')\n", (387, 397), False, 'import logging\n'), ((1152, 1183), 'compas_slicer.slicers.BaseSlicer.__init__', 'BaseSlicer.__init__', (['self', 'mesh'], {}), '(self, mesh)\n', (1171, 1183), False, 'from compas_slicer.slicers import BaseSlicer\n'), ((1706, 1780), 'compas_slicer.parameters.get_param', 'get_param', (['self.parameters'], {'key': '"""avg_layer_height"""', 'defaults_type': '"""layers"""'}), "(self.parameters, key='avg_layer_height', defaults_type='layers')\n", (1715, 1780), False, 'from compas_slicer.parameters import get_param\n'), ((2026, 2121), 'compas_slicer.parameters.get_param', 'get_param', (['self.parameters'], {'key': '"""vertical_layers_max_centroid_dist"""', 'defaults_type': '"""layers"""'}), "(self.parameters, key='vertical_layers_max_centroid_dist',\n defaults_type='layers')\n", (2035, 2121), False, 'from compas_slicer.parameters import get_param\n'), ((2152, 2183), 'compas_slicer.geometry.VerticalLayersManager', 'VerticalLayersManager', (['max_dist'], {}), '(max_dist)\n', (2173, 2183), False, 'from compas_slicer.geometry import VerticalLayersManager\n'), ((3447, 3478), 'numpy.arange', 'np.arange', (['(number_of_curves + 1)'], {}), '(number_of_curves + 1)\n', (3456, 3478), True, 'import numpy as np\n'), ((2358, 2488), 'compas_slicer.pre_processing.assign_interpolation_distance_to_mesh_vertices', 'assign_interpolation_distance_to_mesh_vertices', (['self.mesh', 'param', 'self.preprocessor.target_LOW', 'self.preprocessor.target_HIGH'], {}), '(self.mesh, param, self.\n preprocessor.target_LOW, self.preprocessor.target_HIGH)\n', (2404, 2488), False, 'from compas_slicer.pre_processing import assign_interpolation_distance_to_mesh_vertices\n'), ((2574, 2604), 'compas_slicer.slicers.slice_utilities.ScalarFieldContours', 'ScalarFieldContours', (['self.mesh'], {}), '(self.mesh)\n', (2593, 2604), False, 'from compas_slicer.slicers.slice_utilities import ScalarFieldContours\n')] |
import unittest
from pkg_resources import resource_filename
from collections import Counter
import numpy as np
from scipy import ndimage
from scipy.spatial import cKDTree as KDTree
from astropy.table import Table
from desimeter import detectspots
class TestDetectSpots(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''
Create test image based upon input spots
'''
cls.spotfile = resource_filename('desimeter', 'test/data/test-spots.csv')
cls.spots = spots = Table.read(cls.spotfile)
np.random.seed(1)
cls.img = np.random.normal(loc=1000.0, scale=35, size=(6000, 6000))
spot = np.zeros((15,15))
spot[7,7] = 1.0
spot = detectspots.gaussian_convolve(spot)
for x, y, counts in zip(spots['XPIX'], spots['YPIX'], spots['COUNTS']):
dx = x % 1
dy = y % 1
x = int(x)
y = int(y)
cls.img[y-7:y+8, x-7:x+8] += counts*ndimage.shift(spot, (dy,dx))
def setUp(self):
pass
@classmethod
def tearDownClass(cls):
pass
def test_detect(self):
spots = detectspots.detectspots(self.img)
self.assertTrue(len(spots) == len(self.spots))
tree = KDTree(np.array((self.spots['XPIX'], self.spots['YPIX'])).T)
distances, indices = tree.query(np.array((spots['XPIX'], spots['YPIX'])).T)
#- all spots were matched
self.assertEqual(len(set(indices)), len(indices))
#- loose check on distances because original image wasn't constructed
#- very exactly either; just check if we matched the right spot
self.assertLess(np.max(distances), 0.02)
def test_uint(self):
#- Should also work with uint data (from raw FVC images)
spots = detectspots.detectspots(self.img.astype(np.uint16))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.random.seed",
"desimeter.detectspots.gaussian_convolve",
"desimeter.detectspots.detectspots",
"scipy.ndimage.shift",
"numpy.zeros",
"pkg_resources.resource_filename",
"numpy.max",
"numpy.array",
"numpy.random.normal",
"astropy.table.Table.read"
] | [((1875, 1890), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1888, 1890), False, 'import unittest\n'), ((431, 489), 'pkg_resources.resource_filename', 'resource_filename', (['"""desimeter"""', '"""test/data/test-spots.csv"""'], {}), "('desimeter', 'test/data/test-spots.csv')\n", (448, 489), False, 'from pkg_resources import resource_filename\n'), ((518, 542), 'astropy.table.Table.read', 'Table.read', (['cls.spotfile'], {}), '(cls.spotfile)\n', (528, 542), False, 'from astropy.table import Table\n'), ((551, 568), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (565, 568), True, 'import numpy as np\n'), ((587, 644), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(1000.0)', 'scale': '(35)', 'size': '(6000, 6000)'}), '(loc=1000.0, scale=35, size=(6000, 6000))\n', (603, 644), True, 'import numpy as np\n'), ((661, 679), 'numpy.zeros', 'np.zeros', (['(15, 15)'], {}), '((15, 15))\n', (669, 679), True, 'import numpy as np\n'), ((718, 753), 'desimeter.detectspots.gaussian_convolve', 'detectspots.gaussian_convolve', (['spot'], {}), '(spot)\n', (747, 753), False, 'from desimeter import detectspots\n'), ((1141, 1174), 'desimeter.detectspots.detectspots', 'detectspots.detectspots', (['self.img'], {}), '(self.img)\n', (1164, 1174), False, 'from desimeter import detectspots\n'), ((1659, 1676), 'numpy.max', 'np.max', (['distances'], {}), '(distances)\n', (1665, 1676), True, 'import numpy as np\n'), ((974, 1003), 'scipy.ndimage.shift', 'ndimage.shift', (['spot', '(dy, dx)'], {}), '(spot, (dy, dx))\n', (987, 1003), False, 'from scipy import ndimage\n'), ((1253, 1303), 'numpy.array', 'np.array', (["(self.spots['XPIX'], self.spots['YPIX'])"], {}), "((self.spots['XPIX'], self.spots['YPIX']))\n", (1261, 1303), True, 'import numpy as np\n'), ((1347, 1387), 'numpy.array', 'np.array', (["(spots['XPIX'], spots['YPIX'])"], {}), "((spots['XPIX'], spots['YPIX']))\n", (1355, 1387), True, 'import numpy as np\n')] |
import numpy as np
import copy
import os
import json
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import Circle, FancyArrowPatch
from matplotlib.text import Text
def start(iter_num, horizon, parallel, iter):
dataPath = os.path.join(os.getcwd(), 'data', 'result')
if parallel:
filename = os.path.join(dataPath, "ibr_iter_parallel_" + str (iter_num) + "_horizon_" + \
str(horizon) + "_" + str(iter) + ".json")
else:
filename = os.path.join(dataPath, "ibr_iter_seq_" + str (iter_num) + "_horizon_" + \
str(horizon) + ".json")
with open(filename) as json_file:
record = json.load(json_file)
time = len(record.keys()) - 1
gridmap = np.matrix(record["gridmap"])
global ax, fig
fig = plt.figure()
#fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax = fig.add_subplot(111, aspect='equal')
trj1, = ax.plot([], [], 'ko', ms=2)
def init():
trj1.set_data([], [])
return trj1,
def animate(t):
for obj in ax.findobj(match = FancyArrowPatch):
obj.remove()
for obj in ax.findobj(match = Circle):
obj.remove()
for obj in ax.findobj(match = Text):
obj.set_visible(False)
ax.matshow(gridmap, cmap=plt.cm.Blues)
t = str(t)
position = record[t]["position"]
decision = record[t]["decision"]
ax.text(2.5, -1.5, 'time step ' + t)
for i in range(gridmap.shape[1]):
for j in range(gridmap.shape[0]):
c = round(gridmap[j,i], 3)
ax.text(i, j, str(c), va='center', ha='center')
for i in range(len(position)):
pos = position[i]
action = decision[i][0]
circle = Circle((pos[1], pos[0]), 0.2, color = 'r', fc=None)
ax.add_artist(circle)
new_pos = [pos[0] + action[0], pos[1] + action[1]]
e = FancyArrowPatch((pos[1], pos[0]), (new_pos[1], new_pos[0]),
arrowstyle='<-',
linewidth=2,
color='k')
ax.add_artist(e)
# dynamics of the map
gridmap[new_pos[0], new_pos[1]] -= record[t]["gain"][i]
return trj1,
ani = animation.FuncAnimation(fig, animate, frames=time-1,
interval=10, blit=True, init_func=init, repeat = False)
path = os.getcwd()
videopath = os.path.join(path, 'video')
if parallel:
filename = os.path.join(videopath, "ibr_iter_parallel_" + str (iter_num) + "_horizon_" + \
str(horizon) + "_" + str(iter) + '.mp4')
else:
filename = os.path.join(videopath, "ibr_iter_seq_" + str (iter_num) + "_horizon_" + \
str(horizon) + '.mp4')
ani.save(filename, fps=2)
plt.close()
if __name__=="__main__":
iter_num, horizon = 4, 2
parallel = True
start(iter_num, horizon, parallel, 0)
# for horizon in range(3, 6):
# for iter_ in range(2, 5):
# for parallel in [True, False]:
# start(iter_, horizon, parallel)
| [
"numpy.matrix",
"json.load",
"os.getcwd",
"matplotlib.pyplot.close",
"matplotlib.patches.FancyArrowPatch",
"matplotlib.animation.FuncAnimation",
"matplotlib.patches.Circle",
"matplotlib.pyplot.figure",
"matplotlib.use",
"os.path.join"
] | [((71, 94), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (85, 94), False, 'import matplotlib\n'), ((800, 828), 'numpy.matrix', 'np.matrix', (["record['gridmap']"], {}), "(record['gridmap'])\n", (809, 828), True, 'import numpy as np\n'), ((864, 876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (874, 876), True, 'import matplotlib.pyplot as plt\n'), ((2349, 2462), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'frames': '(time - 1)', 'interval': '(10)', 'blit': '(True)', 'init_func': 'init', 'repeat': '(False)'}), '(fig, animate, frames=time - 1, interval=10, blit=\n True, init_func=init, repeat=False)\n', (2372, 2462), True, 'import matplotlib.animation as animation\n'), ((2501, 2512), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2510, 2512), False, 'import os\n'), ((2530, 2557), 'os.path.join', 'os.path.join', (['path', '"""video"""'], {}), "(path, 'video')\n", (2542, 2557), False, 'import os\n'), ((2900, 2911), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2909, 2911), True, 'import matplotlib.pyplot as plt\n'), ((336, 347), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (345, 347), False, 'import os\n'), ((730, 750), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (739, 750), False, 'import json\n'), ((1885, 1934), 'matplotlib.patches.Circle', 'Circle', (['(pos[1], pos[0])', '(0.2)'], {'color': '"""r"""', 'fc': 'None'}), "((pos[1], pos[0]), 0.2, color='r', fc=None)\n", (1891, 1934), False, 'from matplotlib.patches import Circle, FancyArrowPatch\n'), ((2050, 2154), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['(pos[1], pos[0])', '(new_pos[1], new_pos[0])'], {'arrowstyle': '"""<-"""', 'linewidth': '(2)', 'color': '"""k"""'}), "((pos[1], pos[0]), (new_pos[1], new_pos[0]), arrowstyle='<-',\n linewidth=2, color='k')\n", (2065, 2154), False, 'from matplotlib.patches import Circle, FancyArrowPatch\n')] |
from __future__ import division
import cPickle
import numpy as np
import math
import random
import os as os
from scipy import misc
from skimage import color
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
#import matplotlib.pyplot as plot1
#def graph_plot(x, y, xlab, ylab):
#plot1.figure(num = 1, figsize =(15,10), dpi = 72)
#plot1.subplot(321)
#plot1.scatter(CS_Score,Res_OH)
# plot1.plot(x, y, 'g^')
# plot1.xlabel(xlab)
# plot1.ylabel(ylab)
# plot1.show()
def oneHotEncoding(target):
print ("oneHotEncoding")
print (np.shape(target))
t = np.zeros((len(target),10))
print (np.shape(t))
# print "entering for:"
for i in range(len(target)):
index = target[i]
# print target[i]
# print index
t[i][index] = 1
# print t
return t
def gradientErrorFunction(x, t, y):
print ("gradientErrorFunction:")
print (np.shape(y))
print (np.shape(t))
print (np.shape(x))
temp = y - t
xMat = np.matrix(x)
tempMat = np.matrix(temp)
deltaE = np.dot(tempMat.transpose(),xMat)
print (np.shape(deltaE))
return deltaE
def SGD_w(deltaE, eta, w):
print ("SGD_w:")
print (np.shape(deltaE))
print (np.shape(w))
# print len(deltaE)
# print len(deltaE[0])
# print len(w)
#print len(w[0])
# deltaE = (eta * deltaE)
wnew = w - (eta * deltaE)
# print len(deltaE)
# print len(deltaE[0])
print (np.shape(wnew))
print (len(wnew))
print (len(wnew[0]))
return wnew
def activationfn(x, w, b):
print ("activation:")
a = np.zeros(10)
print (np.shape(w))
print (np.shape(x))
xMat = np.matrix(x)
a = np.dot(w,xMat.transpose())
print (np.shape(a))
return a
def calculate_y(a):
print ("calculate_y:")
print (len(a))
print (np.shape(a))
sum_a = 0
c=max(a)
for i in range(len(a)):
sum_a = sum_a + (math.exp(a[i]-c))
y = np.zeros(len(a))
for i in range(len(a)):
y[i] = (math.exp(a[i]-c))/sum_a
sum_y = sum(y)
print (sum_y)
return y
def hiddenLayerActivation(x, w, b):
print ("hiddenLayerActivation:")
print (np.shape(w))
print (np.shape(x))
print (len(w))
print (np.shape(w[0]))
row,col = np.shape(w)
z = np.zeros(row)
# print "**************************************************"
for i in range(row):
for j in range(col):
# print x[j]
# print w[i][j], i, j
# print "print"
z[i] = z[i] + (w[i][j] * x[j])
z[i] = z[i] + b
#print "##########################################"
return z
def hFunction(z):
hz = np.zeros(len(z))
hdashA = np.zeros(len(z))
for i in range(len(z)):
hz[i] = 1/(1 + math.exp(-z[i]))
hdashA[i] = hz[i] * (1 - hz[i])
return hz,hdashA
def gradientErrorFunctionNNLayer2(y, t, z):
print ("gradientErrorFunctionNNLayer2:")
d = y - t
dk = np.matrix(d)
print (np.shape(dk))
zmat = np.matrix(z)
print (np.shape(zmat))
error_dk = np.dot(dk.transpose(), zmat)
# row, col = np.shape(error)
# error_dk = np.zeros((row,col))
# for i in range(row):
# temp = error[i][:]
# print (temp)
# print np.shape(temp)
# print len(temp[0][:])
#temp1 = temp[0][:]
# print temp1
# for j in range(col):
# error_dk[i][j] = temp1[j]
print (np.shape(error_dk))
print (len(error_dk))
print (len(error_dk[0]))
return dk.transpose(), error_dk
def gradientErrorFunctionNNLayer1(hdashA, w, dk, x):
print ("gradientErrorFunctionNNLayer1:")
print (np.shape(hdashA))
print (np.shape(w))
print (len(w))
print (len(w[0]))
print (np.shape(dk))
dj = np.zeros(len(hdashA))
for j in range(len(dj)):
sum_w = 0
for k in range(len(w)):
sum_w = sum_w + (w[k][j] * dk[k])
dj[j] = hdashA[j] * sum_w
print (np.shape(dj))
xmat = np.matrix(x)
print (np.shape(xmat))
djmat = np.matrix(dj)
print (np.shape(djmat))
error_dj = np.dot(djmat.transpose(), xmat)
print (np.shape(error_dj))
print (len(error_dj))
print (len(error_dj[0]))
return djmat.transpose(), error_dj
def softmax(y):
print ("SoftMax:")
print (np.shape(y))
maximum = -1.0
value = -1
for i in range(len(y)):
if(maximum < y[i]):
maximum = y[i]
value = i
# print value
# print "end softmax"
return value
def logRegression(x, t, b):
print ("logregression:")
print (len(x[0]))
w = np.ones((10,len(x[0])))
eta = 0.01
count = 0
for j in range(5):
for i in range(len(x)):
a = activationfn(x[i][:], w, b)
y = calculate_y(a)
deltaE = gradientErrorFunction(x[i][:],t[i][:],y)
w = SGD_w(deltaE, eta, w)
count = count + 1
print ("count:")
print (count)
return w
def logRegressionValidate(x, t, w, b):
print ("logRegressionValidate:")
found = 0.0
y_value = np.zeros(len(x))
for i in range(len(x)):
a = activationfn(x[i][:], w, b)
y = calculate_y(a)
value = softmax(y)
# print t[i]
y_value[i] = value
if(value==t[i]):
found = found + 1.0
print ("found:")
print (found)
accuracy = (found/len(t))*100
print ("accuracy:")
print (accuracy)
return y, y_value, accuracy
def logRegressionTest(x, w, b):
print ("logRegressionTest:")
y_value = np.zeros(len(x))
for i in range(len(x)):
a = activationfn(x[i][:], w, b)
y = calculate_y(a)
value = softmax(y)
# print t[i]
y_value[i] = value
return y, y_value
def neuralnetwork(x, t, b):
print ("neuralnetwork:")
eta = 0.01
# x = np.insert(input_x,0,0,axis =1)
print (np.shape(x))
w1 = np.ones((100,len(x[0])))
print (np.shape(w1))
print (len(w1[0]))
for i in range(len(w1)):
for j in range(len(w1[0])):
w1[i][j] = random.randrange(0,100,1)
w1[i][j] = w1[i][j] / 10000
w2 = np.ones((10,100))
print (np.shape(w2))
for i in range(len(w2)):
for j in range(len(w2[0])):
w2[i][j] = random.randrange(0,100,1)
w2[i][j] = w2[i][j] / 10000
for i in range(len(x)):
z = hiddenLayerActivation(x[i][:], w1, b)
#z = np.insert(z,0,0)
hz, hdashA = hFunction(z)
a = hiddenLayerActivation(hz, w2, b)
y = calculate_y(a)
dk, error_dk = gradientErrorFunctionNNLayer2(y, t[i], z)
dj, error_dj = gradientErrorFunctionNNLayer1(hdashA, w2, dk, x[i][:])
w1 = SGD_w(error_dj, eta, w1)
w2 = SGD_w(error_dk, eta, w2)
return w1, w2
def neuralnetworkValidate(input_x, t, w1, w2, b):
print ("neuralnetworkValidate:")
# x = np.insert(input_x,0,0,axis =1)
x = np.matrix(input_x)
print (np.shape(x))
found = 0.0
y_value = np.zeros(len(x))
for i in range(len(x)):
z = hiddenLayerActivation(x[i][:], w1, b)
#z = np.insert(z,0,0)
hz, hdashA = hFunction(z)
a = hiddenLayerActivation(hz, w2, b)
y = calculate_y(a)
value = softmax(y)
y_value[i] = value
if(value==t[i]):
found = found + 1.0
accuracy = (found/len(t))*100
print ("accuracy NN:")
print (accuracy)
return y, y_value, accuracy
def neuralnetworkTest(input_x, w1, w2, b):
print ("neuralnetworkTest:")
# x = np.insert(input_x,0,0,axis =1)
x = np.matrix(input_x)
print (np.shape(x))
y_value = np.zeros(len(x))
for i in range(len(x)):
z = hiddenLayerActivation(x[i][:], w1, b)
#z = np.insert(z,0,0)
hz, hdashA = hFunction(z)
a = hiddenLayerActivation(hz, w2, b)
y = calculate_y(a)
value = softmax(y)
y_value[i] = value
return y, y_value
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def cnn():
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.matmul(x,W) + b
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
for i in range(1000):
batch = mnist.train.next_batch(100)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%1000 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
if __name__ == "__main__":
print ("UBitName = jruvikam")
print ("personNumber = 50207613")
pickleFile = open('mnist.pkl','rb')
train_set_MNIST, valid_set_MNIST, test_set_MNIST = cPickle.load(pickleFile)
train_x_MNIST = train_set_MNIST[0]
train_target_MNIST = train_set_MNIST[1]
train_t_MNIST = oneHotEncoding(train_target_MNIST)
valid_x_MNIST = valid_set_MNIST[0]
valid_target_MNIST = valid_set_MNIST[1]
test_x_MNIST = test_set_MNIST[0]
test_target_MNIST = test_set_MNIST[1]
b = 1
# TUNE HYPERPARAMETER ETA
w_logRegress_MNIST = logRegression(train_x_MNIST, train_t_MNIST, b)
yOneHot_validate_MNIST, y_value_validate_MNIST, accuracy_validate_MNIST = logRegressionValidate(valid_x_MNIST, valid_target_MNIST, w_logRegress_MNIST, b)
yOneHot_test_MNIST, y_value_test_MNIST = logRegressionTest(test_x_MNIST, w_logRegress_MNIST, b)
print ("accuracy MNIST validation:")
print (accuracy_validate_MNIST)
path = "USPSdata/Numerals/"
count = 0
validate_x_USPS = np.zeros((1,784))
target_set_USPS = np.zeros((1,1))
print (np.shape(validate_x_USPS))
for i in range(10):
new_path = path
new_path = new_path + str(i) + "/"
for name in os.listdir(new_path):
final_path = new_path
final_path = final_path + name
# print count
#print final_path
if ".list" not in name:
if (name != "Thumbs.db"):
# if count < 5:
img = misc.imread(final_path)
gray_img = color.rgb2gray(img)
resized_img = misc.imresize(gray_img,(28,28))
# print "resized img:"
# print len(resized_img)
# print np.shape(resized_img)
flat_img = np.ravel(resized_img)
validate_x_USPS = np.insert(validate_x_USPS,len(validate_x_USPS),flat_img,axis=0)
target_set_USPS = np.insert(target_set_USPS,len(target_set_USPS),int(i),axis=0)
#print "resized img:"
#print len(flat_img)
#print np.shape(flat_img)
count = count + 1
if((count%1000) == 0):
print (count)
# else:
# break
print ("count:")
print (count)
validate_x_USPS = np.delete(validate_x_USPS,0,axis=0)
target_set_USPS = np.delete(target_set_USPS,0,axis=0)
yOneHot_validate_USPS, y_value_validate_USPS, accuracy_validate_USPS = logRegressionValidate(validate_x_USPS, target_set_USPS, w_logRegress_MNIST, b)
path = "USPSdata/Test/"
count = 0
test_x_USPS = np.zeros((1,784))
for i in range(10):
new_path = path
for name in os.listdir(new_path):
final_path = new_path
final_path = final_path + name
# print count
#print final_path
if ".list" not in name:
if (name != "Thumbs.db"):
# if count < 5:
img = misc.imread(final_path)
gray_img = color.rgb2gray(img)
resized_img = misc.imresize(gray_img,(28,28))
# print "resized img:"
# print len(resized_img)
# print np.shape(resized_img)
flat_img = np.ravel(resized_img)
test_x_USPS = np.insert(test_x_USPS,len(validate_x_USPS),flat_img,axis=0)
#print "resized img:"
#print len(flat_img)
#print np.shape(flat_img)
count = count + 1
if((count%1000) == 0):
print (count)
# else:
# break
print ("count:")
print (count)
test_x_USPS = np.delete(test_x_USPS,0,axis=0)
yOneHot_test_USPS, y_value_test_USPS = logRegressionTest(test_x_USPS, w_logRegress_MNIST, b)
cnn()
print ("accuracy USPS validation:")
print (accuracy_validate_USPS)
print ("accuracy MNIST validation:")
print (accuracy_validate_MNIST)
# w1_nn_MNIST, w2_nn_MNIST = neuralnetwork(train_x_MNIST, train_t_MNIST, b)
# yOneHot_nn_MNIST, y_value_nn_MNIST, accuracy_nn_MNIST = neuralnetwork(valid_x_MNIST, valid_target_MNIST, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_test_nn_MNIST, y_value_test_nn_MNIST = neuralnetwork(test_x_MNIST, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_nn_USPS, y_value_nn_USPS, accuracy_nn_USPS = neuralnetwork(validate_x_USPS, target_set_USPS, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_test_nn_USPS, y_value_test_nn_USPS = neuralnetwork(test_x_USPS, w1_nn_MNIST, w2_nn_MNIST, b)
print ("PROGRAM COMPLETED")
| [
"numpy.ravel",
"tensorflow.reshape",
"numpy.ones",
"cPickle.load",
"numpy.shape",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.InteractiveSession",
"tensorflow.truncated_normal",
"skimage.color.rgb2gray",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"... | [((1026, 1038), 'numpy.matrix', 'np.matrix', (['x'], {}), '(x)\n', (1035, 1038), True, 'import numpy as np\n'), ((1053, 1068), 'numpy.matrix', 'np.matrix', (['temp'], {}), '(temp)\n', (1062, 1068), True, 'import numpy as np\n'), ((1638, 1650), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1646, 1650), True, 'import numpy as np\n'), ((1715, 1727), 'numpy.matrix', 'np.matrix', (['x'], {}), '(x)\n', (1724, 1727), True, 'import numpy as np\n'), ((2367, 2378), 'numpy.shape', 'np.shape', (['w'], {}), '(w)\n', (2375, 2378), True, 'import numpy as np\n'), ((2393, 2406), 'numpy.zeros', 'np.zeros', (['row'], {}), '(row)\n', (2401, 2406), True, 'import numpy as np\n'), ((3089, 3101), 'numpy.matrix', 'np.matrix', (['d'], {}), '(d)\n', (3098, 3101), True, 'import numpy as np\n'), ((3144, 3156), 'numpy.matrix', 'np.matrix', (['z'], {}), '(z)\n', (3153, 3156), True, 'import numpy as np\n'), ((4164, 4176), 'numpy.matrix', 'np.matrix', (['x'], {}), '(x)\n', (4173, 4176), True, 'import numpy as np\n'), ((4216, 4229), 'numpy.matrix', 'np.matrix', (['dj'], {}), '(dj)\n', (4225, 4229), True, 'import numpy as np\n'), ((6379, 6397), 'numpy.ones', 'np.ones', (['(10, 100)'], {}), '((10, 100))\n', (6386, 6397), True, 'import numpy as np\n'), ((7189, 7207), 'numpy.matrix', 'np.matrix', (['input_x'], {}), '(input_x)\n', (7198, 7207), True, 'import numpy as np\n'), ((7880, 7898), 'numpy.matrix', 'np.matrix', (['input_x'], {}), '(input_x)\n', (7889, 7898), True, 'import numpy as np\n'), ((8314, 8352), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (8333, 8352), True, 'import tensorflow as tf\n'), ((8362, 8382), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (8373, 8382), True, 'import tensorflow as tf\n'), ((8422, 8451), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (8433, 8451), True, 'import tensorflow as tf\n'), ((8461, 8481), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (8472, 8481), True, 'import tensorflow as tf\n'), ((8510, 8566), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (8522, 8566), True, 'import tensorflow as tf\n'), ((8598, 8673), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (8612, 8673), True, 'import tensorflow as tf\n'), ((8722, 8775), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data"""'], {'one_hot': '(True)'}), "('MNIST_data', one_hot=True)\n", (8747, 8775), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((8787, 8810), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (8808, 8810), True, 'import tensorflow as tf\n'), ((8819, 8864), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 784]'}), '(tf.float32, shape=[None, 784])\n', (8833, 8864), True, 'import tensorflow as tf\n'), ((8874, 8918), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 10]'}), '(tf.float32, shape=[None, 10])\n', (8888, 8918), True, 'import tensorflow as tf\n'), ((9694, 9724), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 28, 28, 1]'], {}), '(x, [-1, 28, 28, 1])\n', (9704, 9724), True, 'import tensorflow as tf\n'), ((10114, 10151), 'tensorflow.reshape', 'tf.reshape', (['h_pool2', '[-1, 7 * 7 * 64]'], {}), '(h_pool2, [-1, 7 * 7 * 64])\n', (10124, 10151), True, 'import tensorflow as tf\n'), ((10232, 10258), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (10246, 10258), True, 'import tensorflow as tf\n'), ((10276, 10307), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h_fc1', 'keep_prob'], {}), '(h_fc1, keep_prob)\n', (10289, 10307), True, 'import tensorflow as tf\n'), ((11460, 11484), 'cPickle.load', 'cPickle.load', (['pickleFile'], {}), '(pickleFile)\n', (11472, 11484), False, 'import cPickle\n'), ((12320, 12338), 'numpy.zeros', 'np.zeros', (['(1, 784)'], {}), '((1, 784))\n', (12328, 12338), True, 'import numpy as np\n'), ((12360, 12376), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (12368, 12376), True, 'import numpy as np\n'), ((13764, 13801), 'numpy.delete', 'np.delete', (['validate_x_USPS', '(0)'], {'axis': '(0)'}), '(validate_x_USPS, 0, axis=0)\n', (13773, 13801), True, 'import numpy as np\n'), ((13822, 13859), 'numpy.delete', 'np.delete', (['target_set_USPS', '(0)'], {'axis': '(0)'}), '(target_set_USPS, 0, axis=0)\n', (13831, 13859), True, 'import numpy as np\n'), ((14088, 14106), 'numpy.zeros', 'np.zeros', (['(1, 784)'], {}), '((1, 784))\n', (14096, 14106), True, 'import numpy as np\n'), ((15321, 15354), 'numpy.delete', 'np.delete', (['test_x_USPS', '(0)'], {'axis': '(0)'}), '(test_x_USPS, 0, axis=0)\n', (15330, 15354), True, 'import numpy as np\n'), ((589, 605), 'numpy.shape', 'np.shape', (['target'], {}), '(target)\n', (597, 605), True, 'import numpy as np\n'), ((653, 664), 'numpy.shape', 'np.shape', (['t'], {}), '(t)\n', (661, 664), True, 'import numpy as np\n'), ((937, 948), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (945, 948), True, 'import numpy as np\n'), ((961, 972), 'numpy.shape', 'np.shape', (['t'], {}), '(t)\n', (969, 972), True, 'import numpy as np\n'), ((985, 996), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (993, 996), True, 'import numpy as np\n'), ((1131, 1147), 'numpy.shape', 'np.shape', (['deltaE'], {}), '(deltaE)\n', (1139, 1147), True, 'import numpy as np\n'), ((1227, 1243), 'numpy.shape', 'np.shape', (['deltaE'], {}), '(deltaE)\n', (1235, 1243), True, 'import numpy as np\n'), ((1256, 1267), 'numpy.shape', 'np.shape', (['w'], {}), '(w)\n', (1264, 1267), True, 'import numpy as np\n'), ((1493, 1507), 'numpy.shape', 'np.shape', (['wnew'], {}), '(wnew)\n', (1501, 1507), True, 'import numpy as np\n'), ((1667, 1678), 'numpy.shape', 'np.shape', (['w'], {}), '(w)\n', (1675, 1678), True, 'import numpy as np\n'), ((1691, 1702), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1699, 1702), True, 'import numpy as np\n'), ((1774, 1785), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (1782, 1785), True, 'import numpy as np\n'), ((1882, 1893), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (1890, 1893), True, 'import numpy as np\n'), ((2260, 2271), 'numpy.shape', 'np.shape', (['w'], {}), '(w)\n', (2268, 2271), True, 'import numpy as np\n'), ((2284, 2295), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (2292, 2295), True, 'import numpy as np\n'), ((2327, 2341), 'numpy.shape', 'np.shape', (['w[0]'], {}), '(w[0])\n', (2335, 2341), True, 'import numpy as np\n'), ((3116, 3128), 'numpy.shape', 'np.shape', (['dk'], {}), '(dk)\n', (3124, 3128), True, 'import numpy as np\n'), ((3173, 3187), 'numpy.shape', 'np.shape', (['zmat'], {}), '(zmat)\n', (3181, 3187), True, 'import numpy as np\n'), ((3592, 3610), 'numpy.shape', 'np.shape', (['error_dk'], {}), '(error_dk)\n', (3600, 3610), True, 'import numpy as np\n'), ((3825, 3841), 'numpy.shape', 'np.shape', (['hdashA'], {}), '(hdashA)\n', (3833, 3841), True, 'import numpy as np\n'), ((3854, 3865), 'numpy.shape', 'np.shape', (['w'], {}), '(w)\n', (3862, 3865), True, 'import numpy as np\n'), ((3919, 3931), 'numpy.shape', 'np.shape', (['dk'], {}), '(dk)\n', (3927, 3931), True, 'import numpy as np\n'), ((4139, 4151), 'numpy.shape', 'np.shape', (['dj'], {}), '(dj)\n', (4147, 4151), True, 'import numpy as np\n'), ((4188, 4202), 'numpy.shape', 'np.shape', (['xmat'], {}), '(xmat)\n', (4196, 4202), True, 'import numpy as np\n'), ((4241, 4256), 'numpy.shape', 'np.shape', (['djmat'], {}), '(djmat)\n', (4249, 4256), True, 'import numpy as np\n'), ((4316, 4334), 'numpy.shape', 'np.shape', (['error_dj'], {}), '(error_dj)\n', (4324, 4334), True, 'import numpy as np\n'), ((4485, 4496), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4493, 4496), True, 'import numpy as np\n'), ((6114, 6125), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (6122, 6125), True, 'import numpy as np\n'), ((6178, 6190), 'numpy.shape', 'np.shape', (['w1'], {}), '(w1)\n', (6186, 6190), True, 'import numpy as np\n'), ((6408, 6420), 'numpy.shape', 'np.shape', (['w2'], {}), '(w2)\n', (6416, 6420), True, 'import numpy as np\n'), ((7219, 7230), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (7227, 7230), True, 'import numpy as np\n'), ((7910, 7921), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (7918, 7921), True, 'import numpy as np\n'), ((8939, 8958), 'tensorflow.zeros', 'tf.zeros', (['[784, 10]'], {}), '([784, 10])\n', (8947, 8958), True, 'import tensorflow as tf\n'), ((8979, 8993), 'tensorflow.zeros', 'tf.zeros', (['[10]'], {}), '([10])\n', (8987, 8993), True, 'import tensorflow as tf\n'), ((9008, 9041), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9039, 9041), True, 'import tensorflow as tf\n'), ((9051, 9066), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (9060, 9066), True, 'import tensorflow as tf\n'), ((9105, 9151), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', (['y', 'y_'], {}), '(y, y_)\n', (9144, 9151), True, 'import tensorflow as tf\n'), ((9405, 9420), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (9414, 9420), True, 'import tensorflow as tf\n'), ((9421, 9437), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (9430, 9437), True, 'import tensorflow as tf\n'), ((9468, 9507), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (9475, 9507), True, 'import tensorflow as tf\n'), ((10399, 10427), 'tensorflow.matmul', 'tf.matmul', (['h_fc1_drop', 'W_fc2'], {}), '(h_fc1_drop, W_fc2)\n', (10408, 10427), True, 'import tensorflow as tf\n'), ((10476, 10527), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', (['y_conv', 'y_'], {}), '(y_conv, y_)\n', (10515, 10527), True, 'import tensorflow as tf\n'), ((10633, 10653), 'tensorflow.argmax', 'tf.argmax', (['y_conv', '(1)'], {}), '(y_conv, 1)\n', (10642, 10653), True, 'import tensorflow as tf\n'), ((10654, 10670), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (10663, 10670), True, 'import tensorflow as tf\n'), ((10701, 10740), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (10708, 10740), True, 'import tensorflow as tf\n'), ((10755, 10788), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10786, 10788), True, 'import tensorflow as tf\n'), ((12387, 12412), 'numpy.shape', 'np.shape', (['validate_x_USPS'], {}), '(validate_x_USPS)\n', (12395, 12412), True, 'import numpy as np\n'), ((12533, 12553), 'os.listdir', 'os.listdir', (['new_path'], {}), '(new_path)\n', (12543, 12553), True, 'import os as os\n'), ((14183, 14203), 'os.listdir', 'os.listdir', (['new_path'], {}), '(new_path)\n', (14193, 14203), True, 'import os as os\n'), ((1981, 1999), 'math.exp', 'math.exp', (['(a[i] - c)'], {}), '(a[i] - c)\n', (1989, 1999), False, 'import math\n'), ((2082, 2100), 'math.exp', 'math.exp', (['(a[i] - c)'], {}), '(a[i] - c)\n', (2090, 2100), False, 'import math\n'), ((6304, 6331), 'random.randrange', 'random.randrange', (['(0)', '(100)', '(1)'], {}), '(0, 100, 1)\n', (6320, 6331), False, 'import random\n'), ((6511, 6538), 'random.randrange', 'random.randrange', (['(0)', '(100)', '(1)'], {}), '(0, 100, 1)\n', (6527, 6538), False, 'import random\n'), ((9170, 9208), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.5)'], {}), '(0.5)\n', (9203, 9208), True, 'import tensorflow as tf\n'), ((10171, 10201), 'tensorflow.matmul', 'tf.matmul', (['h_pool2_flat', 'W_fc1'], {}), '(h_pool2_flat, W_fc1)\n', (10180, 10201), True, 'import tensorflow as tf\n'), ((10546, 10576), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), '(0.0001)\n', (10568, 10576), True, 'import tensorflow as tf\n'), ((2894, 2909), 'math.exp', 'math.exp', (['(-z[i])'], {}), '(-z[i])\n', (2902, 2909), False, 'import math\n'), ((12826, 12849), 'scipy.misc.imread', 'misc.imread', (['final_path'], {}), '(final_path)\n', (12837, 12849), False, 'from scipy import misc\n'), ((12881, 12900), 'skimage.color.rgb2gray', 'color.rgb2gray', (['img'], {}), '(img)\n', (12895, 12900), False, 'from skimage import color\n'), ((12935, 12968), 'scipy.misc.imresize', 'misc.imresize', (['gray_img', '(28, 28)'], {}), '(gray_img, (28, 28))\n', (12948, 12968), False, 'from scipy import misc\n'), ((13133, 13154), 'numpy.ravel', 'np.ravel', (['resized_img'], {}), '(resized_img)\n', (13141, 13154), True, 'import numpy as np\n'), ((14476, 14499), 'scipy.misc.imread', 'misc.imread', (['final_path'], {}), '(final_path)\n', (14487, 14499), False, 'from scipy import misc\n'), ((14531, 14550), 'skimage.color.rgb2gray', 'color.rgb2gray', (['img'], {}), '(img)\n', (14545, 14550), False, 'from skimage import color\n'), ((14585, 14618), 'scipy.misc.imresize', 'misc.imresize', (['gray_img', '(28, 28)'], {}), '(gray_img, (28, 28))\n', (14598, 14618), False, 'from scipy import misc\n'), ((14783, 14804), 'numpy.ravel', 'np.ravel', (['resized_img'], {}), '(resized_img)\n', (14791, 14804), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as st
from scipy.sparse.linalg import eigs
from scipy.spatial.distance import cdist
import sklearn as sk
from sklearn.decomposition import PCA
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.model_selection import cross_val_predict
from os.path import basename
from .util import is_pos_def
class SubspaceAlignedClassifier(object):
"""
Class of classifiers based on Subspace Alignment.
Methods contain the alignment itself, classifiers and general utilities.
Examples
--------
| >>>> X = np.random.randn(10, 2)
| >>>> y = np.vstack((-np.ones((5,)), np.ones((5,))))
| >>>> Z = np.random.randn(10, 2)
| >>>> clf = SubspaceAlignedClassifier()
| >>>> clf.fit(X, y, Z)
| >>>> preds = clf.predict(Z)
"""
def __init__(self, loss='logistic', l2=1.0, num_components=1):
"""
Select a particular type of subspace aligned classifier.
Parameters
----------
loss : str
loss function for weighted classifier, options: 'logistic',
'quadratic', 'hinge' (def: 'logistic')
l2 : float
l2-regularization parameter value (def:0.01)
num_components : int
number of transfer components to maintain (def: 1)
Returns
-------
None
"""
self.loss = loss
self.l2 = l2
self.num_components = num_components
# Initialize untrained classifiers
if self.loss == 'logistic':
# Logistic regression model
self.clf = LogisticRegression()
elif self.loss == 'quadratic':
# Least-squares model
self.clf = LinearRegression()
elif self.loss == 'hinge':
# Linear support vector machine
self.clf = LinearSVC()
else:
# Other loss functions are not implemented
raise NotImplementedError('Loss function not implemented.')
# Whether model has been trained
self.is_trained = False
# Dimensionality of training data
self.train_data_dim = ''
def subspace_alignment(self, X, Z, num_components=1):
"""
Compute subspace and alignment matrix.
Parameters
----------
X : array
source data set (N samples by D features)
Z : array
target data set (M samples by D features)
num_components : int
number of components (def: 1)
Returns
-------
V : array
transformation matrix (D features by D features)
CX : array
source principal component coefficients
CZ : array
target principal component coefficients
"""
# Data shapes
N, DX = X.shape
M, DZ = Z.shape
# Assert equivalent dimensionalities
if not DX == DZ:
raise ValueError('Dimensionalities of X and Z should be equal.')
# Compute principal components
CX = PCA(n_components=num_components, whiten=True).fit(X).components_.T
CZ = PCA(n_components=num_components, whiten=True).fit(Z).components_.T
# Aligned source components
V = np.dot(CX.T, CZ)
# Return transformation matrix and principal component coefficients
return V, CX, CZ
def fit(self, X, y, Z):
"""
Fit/train a classifier on data mapped onto transfer components.
Parameters
----------
X : array
source data (N samples by D features)
y : array
source labels (N samples by 1)
Z : array
target data (M samples by D features)
Returns
-------
None
"""
# Data shapes
N, DX = X.shape
M, DZ = Z.shape
# Assert equivalent dimensionalities
if not DX == DZ:
raise ValueError('Dimensionalities of X and Z should be equal.')
# Transfer component analysis
V, CX, CZ = self.subspace_alignment(X, Z,
num_components=self.num_components)
# Store target subspace
self.target_subspace = CZ
# Map source data onto source principal components
X = np.dot(X, CX)
# Align source data to target subspace
X = np.dot(X, V)
# Train a weighted classifier
if self.loss == 'logistic':
# Logistic regression model with sample weights
self.clf.fit(X, y)
elif self.loss == 'quadratic':
# Least-squares model with sample weights
self.clf.fit(X, y)
elif self.loss == 'hinge':
# Linear support vector machine with sample weights
self.clf.fit(X, y)
else:
# Other loss functions are not implemented
raise NotImplementedError
# Mark classifier as trained
self.is_trained = True
# Store training data dimensionality
self.train_data_dim = DX
def predict(self, Z, whiten=False):
"""
Make predictions on new dataset.
Parameters
----------
Z : array
new data set (M samples by D features)
whiten : boolean
whether to whiten new data (def: false)
Returns
-------
preds : array
label predictions (M samples by 1)
"""
# Data shape
M, D = Z.shape
# If classifier is trained, check for same dimensionality
if self.is_trained:
if not self.train_data_dim == D:
raise ValueError('''Test data is of different dimensionality
than training data.''')
# Check for need to whiten data beforehand
if whiten:
Z = st.zscore(Z)
# Map new target data onto target subspace
Z = np.dot(Z, self.target_subspace)
# Call scikit's predict function
preds = self.clf.predict(Z)
# For quadratic loss function, correct predictions
if self.loss == 'quadratic':
preds = (np.sign(preds)+1)/2.
# Return predictions array
return preds
def get_params(self):
"""Get classifier parameters."""
return self.clf.get_params()
def is_trained(self):
"""Check whether classifier is trained."""
return self.is_trained
| [
"scipy.stats.zscore",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.LogisticRegression",
"sklearn.decomposition.PCA",
"numpy.sign",
"sklearn.svm.LinearSVC",
"numpy.dot"
] | [((3450, 3466), 'numpy.dot', 'np.dot', (['CX.T', 'CZ'], {}), '(CX.T, CZ)\n', (3456, 3466), True, 'import numpy as np\n'), ((4542, 4555), 'numpy.dot', 'np.dot', (['X', 'CX'], {}), '(X, CX)\n', (4548, 4555), True, 'import numpy as np\n'), ((4619, 4631), 'numpy.dot', 'np.dot', (['X', 'V'], {}), '(X, V)\n', (4625, 4631), True, 'import numpy as np\n'), ((6236, 6267), 'numpy.dot', 'np.dot', (['Z', 'self.target_subspace'], {}), '(Z, self.target_subspace)\n', (6242, 6267), True, 'import numpy as np\n'), ((1750, 1770), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1768, 1770), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((6156, 6168), 'scipy.stats.zscore', 'st.zscore', (['Z'], {}), '(Z)\n', (6165, 6168), True, 'import scipy.stats as st\n'), ((1870, 1888), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1886, 1888), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((1994, 2005), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (2003, 2005), False, 'from sklearn.svm import LinearSVC\n'), ((6471, 6485), 'numpy.sign', 'np.sign', (['preds'], {}), '(preds)\n', (6478, 6485), True, 'import numpy as np\n'), ((3250, 3295), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'num_components', 'whiten': '(True)'}), '(n_components=num_components, whiten=True)\n', (3253, 3295), False, 'from sklearn.decomposition import PCA\n'), ((3331, 3376), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'num_components', 'whiten': '(True)'}), '(n_components=num_components, whiten=True)\n', (3334, 3376), False, 'from sklearn.decomposition import PCA\n')] |
import numpy as np
from scipy.ndimage import interpolation
from ocear.preprocess.utils import clip_borders
MAX_SKEW = 3
SKEW_STEPS = 32
def _skew_angle(image):
"""
Estimate skew angle where the horizontal variance in pixel intensity is
highest; the higher the variance, the "straighter up" the letters should
stand.
"""
estimates = []
for angle in np.linspace(-MAX_SKEW, MAX_SKEW, SKEW_STEPS + 1):
variance = np.mean(
interpolation.rotate(image, angle, order=0, mode='constant'),
axis=1
).var()
estimates.append((variance, angle))
return max(estimates)[1]
def skew(image):
"""
Rotate image by an estimated skew.
"""
# increase contrast for better skew estimation
img = np.amax(image) - image
img = img - np.amin(img)
# estimate skew angle
angle = _skew_angle(clip_borders(img))
img = interpolation.rotate(img, angle, reshape=False)
return np.amax(img) - img
| [
"numpy.amin",
"ocear.preprocess.utils.clip_borders",
"scipy.ndimage.interpolation.rotate",
"numpy.amax",
"numpy.linspace"
] | [((380, 428), 'numpy.linspace', 'np.linspace', (['(-MAX_SKEW)', 'MAX_SKEW', '(SKEW_STEPS + 1)'], {}), '(-MAX_SKEW, MAX_SKEW, SKEW_STEPS + 1)\n', (391, 428), True, 'import numpy as np\n'), ((906, 953), 'scipy.ndimage.interpolation.rotate', 'interpolation.rotate', (['img', 'angle'], {'reshape': '(False)'}), '(img, angle, reshape=False)\n', (926, 953), False, 'from scipy.ndimage import interpolation\n'), ((775, 789), 'numpy.amax', 'np.amax', (['image'], {}), '(image)\n', (782, 789), True, 'import numpy as np\n'), ((814, 826), 'numpy.amin', 'np.amin', (['img'], {}), '(img)\n', (821, 826), True, 'import numpy as np\n'), ((877, 894), 'ocear.preprocess.utils.clip_borders', 'clip_borders', (['img'], {}), '(img)\n', (889, 894), False, 'from ocear.preprocess.utils import clip_borders\n'), ((965, 977), 'numpy.amax', 'np.amax', (['img'], {}), '(img)\n', (972, 977), True, 'import numpy as np\n'), ((470, 530), 'scipy.ndimage.interpolation.rotate', 'interpolation.rotate', (['image', 'angle'], {'order': '(0)', 'mode': '"""constant"""'}), "(image, angle, order=0, mode='constant')\n", (490, 530), False, 'from scipy.ndimage import interpolation\n')] |
import numpy as np
import torch
from sklearn.metrics import confusion_matrix, roc_auc_score
from argus.metrics.metric import Metric
class MultiAUC(Metric):
name = 'multi_auc'
better = 'max'
def __init__(self, num_classes=11):
self.num_classes = num_classes
def reset(self):
self.y_pred = []
self.y_true = []
def update(self, step_output: dict):
pred = step_output['prediction'].cpu().numpy()
trg = step_output['target'].cpu().numpy()
self.y_pred.append(pred)
self.y_true.append(trg)
def compute(self):
self.y_pred = np.concatenate(self.y_pred)
self.y_true = np.concatenate(self.y_true)
aucs = []
for i in range(self.num_classes):
aucs.append(roc_auc_score(self.y_true[:, i], self.y_pred[:, i]))
return np.mean(aucs), aucs
def epoch_complete(self, state):
with torch.no_grad():
score, aucs = self.compute()
name_prefix = f"{state.phase}_" if state.phase else ''
state.metrics[name_prefix + self.name] = score
state.logger.info(f'AUC: {aucs}')
| [
"numpy.mean",
"torch.no_grad",
"numpy.concatenate",
"sklearn.metrics.roc_auc_score"
] | [((612, 639), 'numpy.concatenate', 'np.concatenate', (['self.y_pred'], {}), '(self.y_pred)\n', (626, 639), True, 'import numpy as np\n'), ((662, 689), 'numpy.concatenate', 'np.concatenate', (['self.y_true'], {}), '(self.y_true)\n', (676, 689), True, 'import numpy as np\n'), ((843, 856), 'numpy.mean', 'np.mean', (['aucs'], {}), '(aucs)\n', (850, 856), True, 'import numpy as np\n'), ((914, 929), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (927, 929), False, 'import torch\n'), ((775, 826), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['self.y_true[:, i]', 'self.y_pred[:, i]'], {}), '(self.y_true[:, i], self.y_pred[:, i])\n', (788, 826), False, 'from sklearn.metrics import confusion_matrix, roc_auc_score\n')] |
import csv
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
def read_file(filename):
"""
reads the file using csv library and returns rows in the file
"""
lines = []
with open(filename) as csvfile:
data_rows = csv.reader(csvfile)
for row in data_rows:
lines.append(row)
return lines
def crop_images(X, y):
"""
This method calculates the top and bottom percentages and crops the image
Resulting shape is (72, 320, 3)
No. of Output Images = No. of Input Images
"""
images = []
steering_angles = []
top_percent = 0.4
bottom_percent = 0.15
for i in range(len(X)):
ind_img = X[i]
top = int(np.ceil(ind_img.shape[0] * top_percent))
bottom = ind_img.shape[0] - int(np.ceil(ind_img.shape[0] * bottom_percent))
cropped_img = ind_img[top:bottom, :]
images.append(cropped_img)
steering_angles.append(y[i])
return images, steering_angles
#Without resizing gave better results, hence don't use this
def resize_images(X, y):
"""
This method resizes the images to height=66, widht=200
No. of Output Images = No. of Input Images
"""
images = []
steering_angles = []
for i in range(len(X)):
resized = cv2.resize(X[i], (200, 66))
images.append(resized)
steering_angles.append(y[i])
return images, steering_angles
def apply_gamma(X, y):
"""
This method applies gamma filter to the input images
Observe the gamma images are added to the original data set
No. of Output Images = 2 * (No. of Input Images)
"""
images = []
steering_angles = []
for i in range(len(X)):
gamma = np.random.uniform(0.7, 1.7)
inv_gamma = 1 / gamma
map_table = np.array([((i/255.0)**inv_gamma)*255 for i in np.arange(0,256)])
transformed_img = cv2.LUT(X[i], map_table)
images.append(X[i])
steering_angles.append(y[i])
images.append(transformed_img)
steering_angles.append(y[i])
return images, steering_angles
def vary_brightness(X, y):
"""
This method alters the brightness of the image by a random value
uses HSV color space as V represents brightness
No. of Output Images = No. of Input Images
"""
images = []
steering_angles = []
for i in range(len(X)):
# HSV (Hue, Saturation, Value) - Value is brightness
hsv_img = cv2.cvtColor(X[i], cv2.COLOR_RGB2HSV)
random_value = 1.0 + 0.6 * (np.random.rand() - 0.5)
hsv_img[:,:,2] = hsv_img[:,:,2] * random_value
transformed_img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB)
images.append(transformed_img)
steering_angles.append(y[i])
return images, steering_angles
def flip_images_and_add(X, y):
"""
This method flips the input images
Flips are done only for those images where steering angles are outside the range of (-0.1, +0,1)
This means straight or near straight steering angle images are not flipped as it doens't add any value
No. of Output Images > No. of Input Images
"""
#print('size before', len(X))
images = []
steering_angles = []
for i in range(len(X)):
#print('less or greater {}'.format(y[i]))
images.append(X[i])
steering_angles.append(y[i])
#Flip only those images where there are curves
if y[i] < -0.1 or y[i] > 0.1 :
images.append(cv2.flip(X[i], 1))
steering_angles.append(y[i] * -1.0)
return images, steering_angles
def translate(X, y, range_x, range_y):
"""
This method randomly translates the image in any direction
and calculates the corresponding change in the steering angle
"""
images = []
steering_angles = []
for i in range(len(X)):
trans_x = range_x * (np.random.rand() - 0.5)
trans_y = range_y * (np.random.rand() - 0.5)
transformed_angle = y[i] + trans_x * 0.002
trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]])
height, width = X[i].shape[:2]
transformed_img = cv2.warpAffine(X[i], trans_m, (width, height))
images.append(X[i])
steering_angles.append(y[i])
images.append(transformed_img)
steering_angles.append(transformed_angle)
return images, steering_angles
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
def data_generator(rows, validation_flag, batch_size):
"""
This is the Python Generator that reads values in chunks
and makes it possible to run in modest CPUs
"""
correction_factor = 0.20
path = 'trainingdata/IMG/'
len_rows = len(rows)
rows = shuffle(rows)
while 1:
for offset in range(0, len_rows, batch_size):
batch_rows = rows[offset:offset+batch_size]
images = []
steering_values = []
#print('rows in batch', len(batch_rows))
for line in batch_rows:
center_image_path = line[0]
left_image_path = line[1]
right_image_path = line[2]
center_image_name = center_image_path.split('/')[-1] #Last token [-1] is the image
left_image_name = left_image_path.split('/')[-1]
right_image_name = right_image_path.split('/')[-1]
center_image_bgr = cv2.imread(path+center_image_name)
left_image_bgr = cv2.imread(path+left_image_name)
right_image_bgr = cv2.imread(path+right_image_name)
#Converting from BGR to RGB space as simulator reads RGB space
center_image = cv2.cvtColor(center_image_bgr, cv2.COLOR_BGR2RGB)
left_image = cv2.cvtColor(left_image_bgr, cv2.COLOR_BGR2RGB)
right_image = cv2.cvtColor(right_image_bgr, cv2.COLOR_BGR2RGB)
steering_value = float(line[3])
left_steering_value = steering_value + correction_factor
right_steering_value = steering_value - correction_factor
images.append(cv2.GaussianBlur(center_image, (3, 3), 0))
# images.append(center_image)
steering_values.append(steering_value)
images.append(cv2.GaussianBlur(left_image, (3, 3), 0))
# images.append(left_image)
steering_values.append(left_steering_value)
images.append(cv2.GaussianBlur(right_image, (3, 3), 0))
# images.append(right_image)
steering_values.append(right_steering_value)
X_train, y_train = images, steering_values
X_train, y_train = shuffle(X_train, y_train)
#Augmenting & Pre-processing
#X_train, y_train = crop_images(X_train, y_train)
#X_train, y_train = resize_images(X_train, y_train)
X_train, y_train = translate(X_train, y_train, 100, 10)
X_train, y_train = flip_images_and_add(X_train, y_train)
X_train, y_train = vary_brightness(X_train, y_train)
X_train, y_train = shuffle(X_train, y_train)
X_train = np.array(X_train)
y_train = np.array(y_train)
yield X_train, y_train
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D
#Architecture based on NVIDIA
def train_model(train_generator, valid_generator, len_train, len_valid):
"""
This method contains the definition of the model
It also calls methods to train and validate the data set
"""
print('Training started...')
model = Sequential()
#model.add(Lambda(lambda x: (x / 255) - 0.5, input_shape=(72, 320, 3)))
model.add(Lambda(lambda x: (x / 255) - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
#model.add(Reshape((55, 135)))
model.add(Convolution2D(24, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Convolution2D(36, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Convolution2D(48, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Dropout(0.5))
model.add(Convolution2D(64, 3, 3, activation='elu'))
model.add(Convolution2D(64, 3, 3, activation='elu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512, activation='elu'))
model.add(Dense(64, activation='elu'))
model.add(Dropout(0.3))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
model.summary()
start_time = time.time()
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, samples_per_epoch= len_train, validation_data=valid_generator, nb_val_samples=len_valid, nb_epoch=10)
print('Training complete!')
print('Total time for training {:.3f}'.format(time.time() - start_time))
model.save('model.h5')
def mainfn():
"""
This is the main function that kicks-off the process
"""
data_rows = read_file('./trainingdata/driving_log.csv')
print('Length of the csv file {}'.format(len(data_rows)))
rows_train, rows_valid = train_test_split(data_rows, test_size=0.2)
#print('splitting done {} {}'.format(len(rows_train), len(rows_valid)))
train_generator = data_generator(rows_train, False, batch_size = 32)
valid_generator = data_generator(rows_valid, True, batch_size = 32)
#print('generator invoked train {} valid {}'.format(train_generator, valid_generator))
train_model(train_generator, valid_generator, len(rows_train), len(rows_valid))
#Calling the mainfn() to kick-off the process
mainfn() | [
"cv2.GaussianBlur",
"csv.reader",
"keras.layers.Cropping2D",
"sklearn.model_selection.train_test_split",
"cv2.warpAffine",
"numpy.arange",
"cv2.cvtColor",
"keras.layers.Flatten",
"cv2.LUT",
"cv2.resize",
"numpy.ceil",
"keras.layers.Dropout",
"cv2.flip",
"numpy.random.uniform",
"numpy.flo... | [((4725, 4738), 'sklearn.utils.shuffle', 'shuffle', (['rows'], {}), '(rows)\n', (4732, 4738), False, 'from sklearn.utils import shuffle\n'), ((7881, 7893), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7891, 7893), False, 'from keras.models import Sequential\n'), ((8785, 8796), 'time.time', 'time.time', ([], {}), '()\n', (8794, 8796), False, 'import time\n'), ((9374, 9416), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_rows'], {'test_size': '(0.2)'}), '(data_rows, test_size=0.2)\n', (9390, 9416), False, 'from sklearn.model_selection import train_test_split\n'), ((264, 283), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (274, 283), False, 'import csv\n'), ((1297, 1324), 'cv2.resize', 'cv2.resize', (['X[i]', '(200, 66)'], {}), '(X[i], (200, 66))\n', (1307, 1324), False, 'import cv2\n'), ((1732, 1759), 'numpy.random.uniform', 'np.random.uniform', (['(0.7)', '(1.7)'], {}), '(0.7, 1.7)\n', (1749, 1759), True, 'import numpy as np\n'), ((1901, 1925), 'cv2.LUT', 'cv2.LUT', (['X[i]', 'map_table'], {}), '(X[i], map_table)\n', (1908, 1925), False, 'import cv2\n'), ((2462, 2499), 'cv2.cvtColor', 'cv2.cvtColor', (['X[i]', 'cv2.COLOR_RGB2HSV'], {}), '(X[i], cv2.COLOR_RGB2HSV)\n', (2474, 2499), False, 'import cv2\n'), ((2643, 2683), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv_img', 'cv2.COLOR_HSV2RGB'], {}), '(hsv_img, cv2.COLOR_HSV2RGB)\n', (2655, 2683), False, 'import cv2\n'), ((4012, 4058), 'numpy.float32', 'np.float32', (['[[1, 0, trans_x], [0, 1, trans_y]]'], {}), '([[1, 0, trans_x], [0, 1, trans_y]])\n', (4022, 4058), True, 'import numpy as np\n'), ((4124, 4170), 'cv2.warpAffine', 'cv2.warpAffine', (['X[i]', 'trans_m', '(width, height)'], {}), '(X[i], trans_m, (width, height))\n', (4138, 4170), False, 'import cv2\n'), ((7984, 8042), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255 - 0.5, input_shape=(160, 320, 3))\n', (7990, 8042), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape\n'), ((8060, 8099), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (8070, 8099), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape\n'), ((8150, 8209), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(24)', '(5)', '(5)'], {'activation': '"""elu"""', 'subsample': '(2, 2)'}), "(24, 5, 5, activation='elu', subsample=(2, 2))\n", (8163, 8209), False, 'from keras.layers.convolutional import Convolution2D\n'), ((8225, 8284), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(36)', '(5)', '(5)'], {'activation': '"""elu"""', 'subsample': '(2, 2)'}), "(36, 5, 5, activation='elu', subsample=(2, 2))\n", (8238, 8284), False, 'from keras.layers.convolutional import Convolution2D\n'), ((8300, 8359), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(48)', '(5)', '(5)'], {'activation': '"""elu"""', 'subsample': '(2, 2)'}), "(48, 5, 5, activation='elu', subsample=(2, 2))\n", (8313, 8359), False, 'from keras.layers.convolutional import Convolution2D\n'), ((8375, 8387), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8382, 8387), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape\n'), ((8403, 8444), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'activation': '"""elu"""'}), "(64, 3, 3, activation='elu')\n", (8416, 8444), False, 'from keras.layers.convolutional import Convolution2D\n'), ((8460, 8501), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'activation': '"""elu"""'}), "(64, 3, 3, activation='elu')\n", (8473, 8501), False, 'from keras.layers.convolutional import Convolution2D\n'), ((8517, 8529), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8524, 8529), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape\n'), ((8545, 8554), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8552, 8554), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape\n'), ((8570, 8598), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""elu"""'}), "(512, activation='elu')\n", (8575, 8598), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape\n'), ((8614, 8641), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""elu"""'}), "(64, activation='elu')\n", (8619, 8641), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape\n'), ((8657, 8669), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (8664, 8669), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape\n'), ((8685, 8712), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""elu"""'}), "(10, activation='elu')\n", (8690, 8712), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape\n'), ((8728, 8736), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (8733, 8736), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Reshape\n'), ((725, 764), 'numpy.ceil', 'np.ceil', (['(ind_img.shape[0] * top_percent)'], {}), '(ind_img.shape[0] * top_percent)\n', (732, 764), True, 'import numpy as np\n'), ((6836, 6861), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (6843, 6861), False, 'from sklearn.utils import shuffle\n'), ((7263, 7288), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (7270, 7288), False, 'from sklearn.utils import shuffle\n'), ((7312, 7329), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (7320, 7329), True, 'import numpy as np\n'), ((7352, 7369), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (7360, 7369), True, 'import numpy as np\n'), ((806, 848), 'numpy.ceil', 'np.ceil', (['(ind_img.shape[0] * bottom_percent)'], {}), '(ind_img.shape[0] * bottom_percent)\n', (813, 848), True, 'import numpy as np\n'), ((3480, 3497), 'cv2.flip', 'cv2.flip', (['X[i]', '(1)'], {}), '(X[i], 1)\n', (3488, 3497), False, 'import cv2\n'), ((3866, 3882), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3880, 3882), True, 'import numpy as np\n'), ((3919, 3935), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3933, 3935), True, 'import numpy as np\n'), ((5422, 5458), 'cv2.imread', 'cv2.imread', (['(path + center_image_name)'], {}), '(path + center_image_name)\n', (5432, 5458), False, 'import cv2\n'), ((5492, 5526), 'cv2.imread', 'cv2.imread', (['(path + left_image_name)'], {}), '(path + left_image_name)\n', (5502, 5526), False, 'import cv2\n'), ((5559, 5594), 'cv2.imread', 'cv2.imread', (['(path + right_image_name)'], {}), '(path + right_image_name)\n', (5569, 5594), False, 'import cv2\n'), ((5720, 5769), 'cv2.cvtColor', 'cv2.cvtColor', (['center_image_bgr', 'cv2.COLOR_BGR2RGB'], {}), '(center_image_bgr, cv2.COLOR_BGR2RGB)\n', (5732, 5769), False, 'import cv2\n'), ((5801, 5848), 'cv2.cvtColor', 'cv2.cvtColor', (['left_image_bgr', 'cv2.COLOR_BGR2RGB'], {}), '(left_image_bgr, cv2.COLOR_BGR2RGB)\n', (5813, 5848), False, 'import cv2\n'), ((5879, 5927), 'cv2.cvtColor', 'cv2.cvtColor', (['right_image_bgr', 'cv2.COLOR_BGR2RGB'], {}), '(right_image_bgr, cv2.COLOR_BGR2RGB)\n', (5891, 5927), False, 'import cv2\n'), ((9070, 9081), 'time.time', 'time.time', ([], {}), '()\n', (9079, 9081), False, 'import time\n'), ((1856, 1873), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (1865, 1873), True, 'import numpy as np\n'), ((2536, 2552), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2550, 2552), True, 'import numpy as np\n'), ((6205, 6246), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['center_image', '(3, 3)', '(0)'], {}), '(center_image, (3, 3), 0)\n', (6221, 6246), False, 'import cv2\n'), ((6380, 6419), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['left_image', '(3, 3)', '(0)'], {}), '(left_image, (3, 3), 0)\n', (6396, 6419), False, 'import cv2\n'), ((6572, 6612), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['right_image', '(3, 3)', '(0)'], {}), '(right_image, (3, 3), 0)\n', (6588, 6612), False, 'import cv2\n')] |
import numpy as np
from abc import ABC
from scipy.optimize._differentialevolution import DifferentialEvolutionSolver
from scipy.sparse import csc_matrix, csr_matrix
from bayesian_decision_tree.base import BaseTree
from bayesian_decision_tree.hyperplane_optimization import HyperplaneOptimizationFunction, ScipyOptimizer
class BaseHyperplaneTree(BaseTree, ABC):
"""
Abstract base class of all Bayesian decision tree models using arbitrarily-oriented hyperplane splits
(classification and regression). Performs medium-level fitting and prediction tasks and outsources
the low-level work to subclasses.
"""
def __init__(self, partition_prior, prior, delta, prune, child_type, is_regression, optimizer, split_precision, level):
BaseTree.__init__(self, partition_prior, prior, delta, prune, child_type, is_regression, split_precision, level)
self.optimizer = optimizer
def _fit(self, X, y, verbose, feature_names, side_name):
n_data = X.shape[0]
n_dim = X.shape[1]
prior = self._get_prior(n_data, n_dim)
if verbose:
name = 'level {} {}'.format(self.level, side_name)
print('Training {} with {:10} data points'.format(name, n_data))
dense = isinstance(X, np.ndarray)
if not dense and isinstance(X, csr_matrix):
# column accesses coming up, so convert to CSC sparse matrix format
X = csc_matrix(X)
log_p_data_no_split = self._compute_log_p_data_no_split(y, prior)
optimizer = self.optimizer
if optimizer is None:
# default to 'Differential Evolution' which works well and is reasonably fast
optimizer = ScipyOptimizer(DifferentialEvolutionSolver, 666)
# the function to optimize (depends on X and y, hence we need to instantiate it for every data set anew)
optimization_function = HyperplaneOptimizationFunction(
X,
y,
prior,
self._compute_log_p_data_split,
log_p_data_no_split,
optimizer.search_space_is_unit_hypercube,
self.split_precision)
# create and run optimizer
optimizer.solve(optimization_function)
self.optimization_function = optimization_function
# retrieve best hyperplane split from optimization function
self._erase_split_info_base()
self._erase_split_info()
if optimization_function.best_hyperplane_normal is not None:
# split data and target to recursively train children
projections = X @ optimization_function.best_hyperplane_normal \
- np.dot(optimization_function.best_hyperplane_normal, optimization_function.best_hyperplane_origin)
indices1 = np.where(projections < 0)[0]
indices2 = np.where(projections >= 0)[0]
if len(indices1) > 0 and len(indices2) > 0:
"""
Note: The reason why indices1 or indices2 could be empty is that the optimizer might find a
'split' that puts all data one one side and nothing on the other side, and that 'split' has
a higher log probability than 'log_p_data_no_split' because of the partition prior
overwhelming the data likelihoods (which are of course identical between the 'all data' and
the 'everything on one side split' scenarios)s.
"""
X1 = X[indices1]
X2 = X[indices2]
y1 = y[indices1]
y2 = y[indices2]
n_data1 = X1.shape[0]
n_data2 = X2.shape[0]
# compute posteriors of children and priors for further splitting
prior_child1 = self._compute_posterior(y1, prior, delta=0)
prior_child2 = self._compute_posterior(y2, prior, delta=0)
# store split info, create children and continue training them if there's data left to split
self.best_hyperplane_normal_ = optimization_function.best_hyperplane_normal
self.best_hyperplane_origin_ = optimization_function.best_hyperplane_origin
self.log_p_data_no_split_ = optimization_function.log_p_data_no_split
self.best_log_p_data_split_ = optimization_function.best_log_p_data_split
self.child1_ = self.child_type(self.partition_prior, prior_child1, self.delta,
self.prune, optimizer, self.split_precision, self.level+1)
self.child2_ = self.child_type(self.partition_prior, prior_child2, self.delta,
self.prune, optimizer, self.split_precision, self.level+1)
self.child1_._erase_split_info_base()
self.child2_._erase_split_info_base()
self.child1_._erase_split_info()
self.child2_._erase_split_info()
# fit children if there is more than one data point (i.e., there is
# something to split) and if the targets differ (no point otherwise)
if n_data1 > 1 and len(np.unique(y1)) > 1:
self.child1_._fit(X1, y1, verbose, feature_names, 'back ')
else:
self.child1_.posterior_ = self._compute_posterior(y1, prior)
self.child1_.n_data_ = n_data1
if n_data2 > 1 and len(np.unique(y2)) > 1:
self.child2_._fit(X2, y2, verbose, feature_names, 'front')
else:
self.child2_.posterior_ = self._compute_posterior(y2, prior)
self.child2_.n_data_ = n_data2
# compute posterior
self.n_dim_ = X.shape[1]
self.n_data_ = n_data
self.posterior_ = self._compute_posterior(y, prior)
def _compute_child1_and_child2_indices(self, X, indices, dense):
projections = X[indices] @ self.best_hyperplane_normal_ - np.dot(self.best_hyperplane_normal_, self.best_hyperplane_origin_)
indices1 = np.where(projections < 0)[0]
indices2 = np.where(projections >= 0)[0]
return indices1, indices2
def is_leaf(self):
self._ensure_is_fitted()
return self.best_hyperplane_normal_ is None
def feature_importance(self):
self._ensure_is_fitted()
feature_importance = np.zeros(self.n_dim_)
self._update_feature_importance(feature_importance)
feature_importance /= feature_importance.sum()
return feature_importance
def _update_feature_importance(self, feature_importance):
if self.is_leaf():
return
else:
log_p_gain = self.best_log_p_data_split_ - self.log_p_data_no_split_
hyperplane_normal = self.best_hyperplane_normal_
# the more the normal vector is oriented along a given dimension's axis the more
# important that dimension is, so weight log_p_gain with hyperplane_normal[i_dim]
# (its absolute value in fact because the sign of the direction is irrelevant)
feature_importance += log_p_gain * np.abs(hyperplane_normal)
if self.child1_ is not None:
self.child1_._update_feature_importance(feature_importance)
self.child2_._update_feature_importance(feature_importance)
def _erase_split_info(self):
self.best_hyperplane_normal_ = None
self.best_hyperplane_origin_ = None
def __str__(self):
if not self.is_fitted():
return 'Unfitted model'
return self._str([], '\u251C', '\u2514', '\u2502', '\u2265', None)
def _str(self, anchor, VERT_RIGHT, DOWN_RIGHT, BAR, GEQ, is_back_child):
anchor_str = ''.join(' ' + a for a in anchor)
s = ''
if is_back_child is not None:
s += anchor_str + ' {:5s}: '.format('back' if is_back_child else 'front')
if self.is_leaf():
s += 'y={}, n={}'.format(self._predict_leaf(), self.n_data_)
if not self.is_regression:
s += ', p(y)={}'.format(self._compute_posterior_mean())
else:
s += 'HP(origin={}, normal={})'.format(self.best_hyperplane_origin_, self.best_hyperplane_normal_)
# 'back' child (the child that is on the side of the hyperplane opposite to the normal vector, or projection < 0)
s += '\n'
anchor_child1 = [VERT_RIGHT] if len(anchor) == 0 else (anchor[:-1] + [(BAR if is_back_child else ' '), VERT_RIGHT])
s += self.child1_._str(anchor_child1, VERT_RIGHT, DOWN_RIGHT, BAR, GEQ, True)
# 'front' child (the child that is on same side of the hyperplane as the normal vector, or projection >= 0)
s += '\n'
anchor_child2 = [DOWN_RIGHT] if len(anchor) == 0 else (anchor[:-1] + [(BAR if is_back_child else ' '), DOWN_RIGHT])
s += self.child2_._str(anchor_child2, VERT_RIGHT, DOWN_RIGHT, BAR, GEQ, False)
return s
| [
"numpy.abs",
"bayesian_decision_tree.base.BaseTree.__init__",
"numpy.zeros",
"scipy.sparse.csc_matrix",
"bayesian_decision_tree.hyperplane_optimization.HyperplaneOptimizationFunction",
"numpy.where",
"numpy.dot",
"bayesian_decision_tree.hyperplane_optimization.ScipyOptimizer",
"numpy.unique"
] | [((760, 876), 'bayesian_decision_tree.base.BaseTree.__init__', 'BaseTree.__init__', (['self', 'partition_prior', 'prior', 'delta', 'prune', 'child_type', 'is_regression', 'split_precision', 'level'], {}), '(self, partition_prior, prior, delta, prune, child_type,\n is_regression, split_precision, level)\n', (777, 876), False, 'from bayesian_decision_tree.base import BaseTree\n'), ((1889, 2058), 'bayesian_decision_tree.hyperplane_optimization.HyperplaneOptimizationFunction', 'HyperplaneOptimizationFunction', (['X', 'y', 'prior', 'self._compute_log_p_data_split', 'log_p_data_no_split', 'optimizer.search_space_is_unit_hypercube', 'self.split_precision'], {}), '(X, y, prior, self._compute_log_p_data_split,\n log_p_data_no_split, optimizer.search_space_is_unit_hypercube, self.\n split_precision)\n', (1919, 2058), False, 'from bayesian_decision_tree.hyperplane_optimization import HyperplaneOptimizationFunction, ScipyOptimizer\n'), ((6417, 6438), 'numpy.zeros', 'np.zeros', (['self.n_dim_'], {}), '(self.n_dim_)\n', (6425, 6438), True, 'import numpy as np\n'), ((1425, 1438), 'scipy.sparse.csc_matrix', 'csc_matrix', (['X'], {}), '(X)\n', (1435, 1438), False, 'from scipy.sparse import csc_matrix, csr_matrix\n'), ((1694, 1742), 'bayesian_decision_tree.hyperplane_optimization.ScipyOptimizer', 'ScipyOptimizer', (['DifferentialEvolutionSolver', '(666)'], {}), '(DifferentialEvolutionSolver, 666)\n', (1708, 1742), False, 'from bayesian_decision_tree.hyperplane_optimization import HyperplaneOptimizationFunction, ScipyOptimizer\n'), ((6011, 6077), 'numpy.dot', 'np.dot', (['self.best_hyperplane_normal_', 'self.best_hyperplane_origin_'], {}), '(self.best_hyperplane_normal_, self.best_hyperplane_origin_)\n', (6017, 6077), True, 'import numpy as np\n'), ((6097, 6122), 'numpy.where', 'np.where', (['(projections < 0)'], {}), '(projections < 0)\n', (6105, 6122), True, 'import numpy as np\n'), ((6145, 6171), 'numpy.where', 'np.where', (['(projections >= 0)'], {}), '(projections >= 0)\n', (6153, 6171), True, 'import numpy as np\n'), ((2658, 2761), 'numpy.dot', 'np.dot', (['optimization_function.best_hyperplane_normal', 'optimization_function.best_hyperplane_origin'], {}), '(optimization_function.best_hyperplane_normal, optimization_function.\n best_hyperplane_origin)\n', (2664, 2761), True, 'import numpy as np\n'), ((2780, 2805), 'numpy.where', 'np.where', (['(projections < 0)'], {}), '(projections < 0)\n', (2788, 2805), True, 'import numpy as np\n'), ((2832, 2858), 'numpy.where', 'np.where', (['(projections >= 0)'], {}), '(projections >= 0)\n', (2840, 2858), True, 'import numpy as np\n'), ((7180, 7205), 'numpy.abs', 'np.abs', (['hyperplane_normal'], {}), '(hyperplane_normal)\n', (7186, 7205), True, 'import numpy as np\n'), ((5177, 5190), 'numpy.unique', 'np.unique', (['y1'], {}), '(y1)\n', (5186, 5190), True, 'import numpy as np\n'), ((5470, 5483), 'numpy.unique', 'np.unique', (['y2'], {}), '(y2)\n', (5479, 5483), True, 'import numpy as np\n')] |
"""Creates random numbers"""
from h2oaicore.transformer_utils import CustomTransformer
import datatable as dt
import numpy as np
class MyRandomTransformer(CustomTransformer):
_is_reproducible = False
def __init__(self, seed=12345, **kwargs):
super().__init__(**kwargs)
self.seed = seed
def fit_transform(self, X: dt.Frame, y: np.array = None):
return self.transform(X)
def transform(self, X: dt.Frame):
np.random.seed(self.seed)
return np.random.rand(*X.shape)
| [
"numpy.random.rand",
"numpy.random.seed"
] | [((456, 481), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (470, 481), True, 'import numpy as np\n'), ((497, 521), 'numpy.random.rand', 'np.random.rand', (['*X.shape'], {}), '(*X.shape)\n', (511, 521), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 11:47:57 2012
@author: eendebakpt
"""
# %% Load necessary packages """
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
oadir = '/home/eendebakpt/misc/oa/oacode/'
import oapackage
def tickfontsize(fontsize=14, ax=None):
if ax is None:
ax = plt.gca()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
plt.draw()
def nonregularProperties(sols):
D = [A.Defficiency() for A in sols]
nn = len(sols)
A3 = np.zeros(nn, )
A4 = np.zeros(nn, )
for i, al in enumerate(sols):
g = al.GWLP()
A3[i] = g[3]
A4[i] = g[4]
return D, A3, A4
# %% Load data
if 1:
# for paper
arrayclass = oapackage.arraydata_t(2, 36, 2, 7)
basedir = '/home/eendebakpt/misc/homepage/oapage/tpages/'
basedir = '/home/eendebakpt/misc/oapage2/tpages/'
oafile = 'class-%s-t%dselection.oa' % (arrayclass.idstr(), arrayclass.strength)
afile = os.path.join(basedir, 'classdata-special-%s-t%d' % (arrayclass.idstr(), arrayclass.strength), oafile)
else:
arrayclass = oapackage.arraydata_t(2, 36, 2, 5) # for paper
arrayclass = oapackage.arraydata_t(2, 40, 2, 5) # for paper
basedir = '/home/eendebakpt/misc/homepage/oapage/tpages/'
oafile = 'class-%s-t%dselection.oa' % (arrayclass.idstr(), arrayclass.strength)
afile = os.path.join(basedir, 'abdata-%s-t%d' % (arrayclass.idstr(), arrayclass.strength), oafile)
sols = oapackage.readarrayfile(afile)
print('read %d arrays' % len(sols))
# %% Calculate properties
D, A3, A4 = nonregularProperties(sols)
# %%
def formatFigure(fig=None, paperfig=True):
if fig is None:
fig = plt.gcf()
else:
plt.figure(fig)
if paperfig:
plt.title('')
# %% Show
lstr = arrayclass.latexstr()
tstr = 'Selection of %d arrays in $%s$' % (len(sols), lstr)
plt.figure(10)
plt.clf()
plt.plot(A3, D, '.b', markersize=12)
plt.xlabel('$A_3$', fontsize=14)
plt.ylabel('D-efficiency', fontsize=14)
plt.title(tstr, fontsize=17)
plt.figure(11)
plt.clf()
plt.plot(A3 + A4, D, '.b', markersize=12)
plt.xlabel('$A_3+A_4$', fontsize=14)
plt.ylabel('D-efficiency', fontsize=14)
plt.title(tstr, fontsize=17)
plt.figure(12)
plt.clf()
plt.plot(A3, A4, '.b', markersize=12)
plt.xlabel('$A_3$', fontsize=14)
plt.ylabel('$A_4$', fontsize=14)
plt.title(tstr, fontsize=17)
rcolor = [.8, 0, 0]
bcolor = [0, .3, 1]
fw = 'medium'
fw = 'normal'
# fw='semibold'
fig = plt.figure(20)
plt.clf()
plt.plot(D, A3, '.', color=bcolor, markersize=12, label='$A_3$')
plt.plot(D, A3 + A4, '.', color=rcolor, markersize=12, label='$A_3+A_4$')
plt.xlabel('D-efficiency', fontsize=22, fontweight=fw)
plt.ylabel('$A_3$, $A_3+A_4$', fontsize=22, fontweight=fw)
plt.title(tstr, fontsize=22)
ax = plt.gca()
tickfontsize(16)
legendh = plt.legend(numpoints=1, fontsize=18)
oapackage.niceplot(ax, fig=fig, legend=legendh)
formatFigure()
oapackage.oahelper.tilefigs([10, 11, 12, 20], [2, 2])
# %%
plt.title('')
import tempfile
picturedir = tempfile.mkdtemp(prefix='pictures-A3-A4-D')
idstr = arrayclass.idstr().replace('.', '-d-') + '-t%d' % arrayclass.strength
plt.figure(10)
plt.savefig(os.path.join(picturedir, 'A3-D-%s.png' % idstr))
plt.figure(11)
plt.savefig(os.path.join(picturedir, 'A34-D-%s.png' % idstr))
plt.figure(20)
plt.savefig(os.path.join(picturedir, 'A3-A34-%s.png' % idstr))
print('written figures to %s' % picturedir)
# %%
| [
"matplotlib.pyplot.title",
"oapackage.niceplot",
"oapackage.oahelper.tilefigs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.figure",
"tempfile.mkdtemp",
"oapackage.readarrayf... | [((1619, 1649), 'oapackage.readarrayfile', 'oapackage.readarrayfile', (['afile'], {}), '(afile)\n', (1642, 1649), False, 'import oapackage\n'), ((2024, 2038), 'matplotlib.pyplot.figure', 'plt.figure', (['(10)'], {}), '(10)\n', (2034, 2038), True, 'import matplotlib.pyplot as plt\n'), ((2039, 2048), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2046, 2048), True, 'import matplotlib.pyplot as plt\n'), ((2049, 2085), 'matplotlib.pyplot.plot', 'plt.plot', (['A3', 'D', '""".b"""'], {'markersize': '(12)'}), "(A3, D, '.b', markersize=12)\n", (2057, 2085), True, 'import matplotlib.pyplot as plt\n'), ((2086, 2118), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$A_3$"""'], {'fontsize': '(14)'}), "('$A_3$', fontsize=14)\n", (2096, 2118), True, 'import matplotlib.pyplot as plt\n'), ((2119, 2158), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""D-efficiency"""'], {'fontsize': '(14)'}), "('D-efficiency', fontsize=14)\n", (2129, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2159, 2187), 'matplotlib.pyplot.title', 'plt.title', (['tstr'], {'fontsize': '(17)'}), '(tstr, fontsize=17)\n', (2168, 2187), True, 'import matplotlib.pyplot as plt\n'), ((2189, 2203), 'matplotlib.pyplot.figure', 'plt.figure', (['(11)'], {}), '(11)\n', (2199, 2203), True, 'import matplotlib.pyplot as plt\n'), ((2204, 2213), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2211, 2213), True, 'import matplotlib.pyplot as plt\n'), ((2214, 2255), 'matplotlib.pyplot.plot', 'plt.plot', (['(A3 + A4)', 'D', '""".b"""'], {'markersize': '(12)'}), "(A3 + A4, D, '.b', markersize=12)\n", (2222, 2255), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2292), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$A_3+A_4$"""'], {'fontsize': '(14)'}), "('$A_3+A_4$', fontsize=14)\n", (2266, 2292), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2332), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""D-efficiency"""'], {'fontsize': '(14)'}), "('D-efficiency', fontsize=14)\n", (2303, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2361), 'matplotlib.pyplot.title', 'plt.title', (['tstr'], {'fontsize': '(17)'}), '(tstr, fontsize=17)\n', (2342, 2361), True, 'import matplotlib.pyplot as plt\n'), ((2364, 2378), 'matplotlib.pyplot.figure', 'plt.figure', (['(12)'], {}), '(12)\n', (2374, 2378), True, 'import matplotlib.pyplot as plt\n'), ((2379, 2388), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2386, 2388), True, 'import matplotlib.pyplot as plt\n'), ((2389, 2426), 'matplotlib.pyplot.plot', 'plt.plot', (['A3', 'A4', '""".b"""'], {'markersize': '(12)'}), "(A3, A4, '.b', markersize=12)\n", (2397, 2426), True, 'import matplotlib.pyplot as plt\n'), ((2427, 2459), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$A_3$"""'], {'fontsize': '(14)'}), "('$A_3$', fontsize=14)\n", (2437, 2459), True, 'import matplotlib.pyplot as plt\n'), ((2460, 2492), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$A_4$"""'], {'fontsize': '(14)'}), "('$A_4$', fontsize=14)\n", (2470, 2492), True, 'import matplotlib.pyplot as plt\n'), ((2493, 2521), 'matplotlib.pyplot.title', 'plt.title', (['tstr'], {'fontsize': '(17)'}), '(tstr, fontsize=17)\n', (2502, 2521), True, 'import matplotlib.pyplot as plt\n'), ((2615, 2629), 'matplotlib.pyplot.figure', 'plt.figure', (['(20)'], {}), '(20)\n', (2625, 2629), True, 'import matplotlib.pyplot as plt\n'), ((2630, 2639), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2637, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2640, 2704), 'matplotlib.pyplot.plot', 'plt.plot', (['D', 'A3', '"""."""'], {'color': 'bcolor', 'markersize': '(12)', 'label': '"""$A_3$"""'}), "(D, A3, '.', color=bcolor, markersize=12, label='$A_3$')\n", (2648, 2704), True, 'import matplotlib.pyplot as plt\n'), ((2705, 2778), 'matplotlib.pyplot.plot', 'plt.plot', (['D', '(A3 + A4)', '"""."""'], {'color': 'rcolor', 'markersize': '(12)', 'label': '"""$A_3+A_4$"""'}), "(D, A3 + A4, '.', color=rcolor, markersize=12, label='$A_3+A_4$')\n", (2713, 2778), True, 'import matplotlib.pyplot as plt\n'), ((2779, 2833), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""D-efficiency"""'], {'fontsize': '(22)', 'fontweight': 'fw'}), "('D-efficiency', fontsize=22, fontweight=fw)\n", (2789, 2833), True, 'import matplotlib.pyplot as plt\n'), ((2834, 2892), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$A_3$, $A_3+A_4$"""'], {'fontsize': '(22)', 'fontweight': 'fw'}), "('$A_3$, $A_3+A_4$', fontsize=22, fontweight=fw)\n", (2844, 2892), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2921), 'matplotlib.pyplot.title', 'plt.title', (['tstr'], {'fontsize': '(22)'}), '(tstr, fontsize=22)\n', (2902, 2921), True, 'import matplotlib.pyplot as plt\n'), ((2927, 2936), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2934, 2936), True, 'import matplotlib.pyplot as plt\n'), ((2964, 3000), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'numpoints': '(1)', 'fontsize': '(18)'}), '(numpoints=1, fontsize=18)\n', (2974, 3000), True, 'import matplotlib.pyplot as plt\n'), ((3002, 3049), 'oapackage.niceplot', 'oapackage.niceplot', (['ax'], {'fig': 'fig', 'legend': 'legendh'}), '(ax, fig=fig, legend=legendh)\n', (3020, 3049), False, 'import oapackage\n'), ((3066, 3119), 'oapackage.oahelper.tilefigs', 'oapackage.oahelper.tilefigs', (['[10, 11, 12, 20]', '[2, 2]'], {}), '([10, 11, 12, 20], [2, 2])\n', (3093, 3119), False, 'import oapackage\n'), ((3128, 3141), 'matplotlib.pyplot.title', 'plt.title', (['""""""'], {}), "('')\n", (3137, 3141), True, 'import matplotlib.pyplot as plt\n'), ((3172, 3215), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""pictures-A3-A4-D"""'}), "(prefix='pictures-A3-A4-D')\n", (3188, 3215), False, 'import tempfile\n'), ((3294, 3308), 'matplotlib.pyplot.figure', 'plt.figure', (['(10)'], {}), '(10)\n', (3304, 3308), True, 'import matplotlib.pyplot as plt\n'), ((3370, 3384), 'matplotlib.pyplot.figure', 'plt.figure', (['(11)'], {}), '(11)\n', (3380, 3384), True, 'import matplotlib.pyplot as plt\n'), ((3447, 3461), 'matplotlib.pyplot.figure', 'plt.figure', (['(20)'], {}), '(20)\n', (3457, 3461), True, 'import matplotlib.pyplot as plt\n'), ((544, 554), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (552, 554), True, 'import matplotlib.pyplot as plt\n'), ((658, 670), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (666, 670), True, 'import numpy as np\n'), ((682, 694), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (690, 694), True, 'import numpy as np\n'), ((874, 908), 'oapackage.arraydata_t', 'oapackage.arraydata_t', (['(2)', '(36)', '(2)', '(7)'], {}), '(2, 36, 2, 7)\n', (895, 908), False, 'import oapackage\n'), ((1247, 1281), 'oapackage.arraydata_t', 'oapackage.arraydata_t', (['(2)', '(36)', '(2)', '(5)'], {}), '(2, 36, 2, 5)\n', (1268, 1281), False, 'import oapackage\n'), ((1313, 1347), 'oapackage.arraydata_t', 'oapackage.arraydata_t', (['(2)', '(40)', '(2)', '(5)'], {}), '(2, 40, 2, 5)\n', (1334, 1347), False, 'import oapackage\n'), ((3321, 3368), 'os.path.join', 'os.path.join', (['picturedir', "('A3-D-%s.png' % idstr)"], {}), "(picturedir, 'A3-D-%s.png' % idstr)\n", (3333, 3368), False, 'import os\n'), ((3397, 3445), 'os.path.join', 'os.path.join', (['picturedir', "('A34-D-%s.png' % idstr)"], {}), "(picturedir, 'A34-D-%s.png' % idstr)\n", (3409, 3445), False, 'import os\n'), ((3474, 3523), 'os.path.join', 'os.path.join', (['picturedir', "('A3-A34-%s.png' % idstr)"], {}), "(picturedir, 'A3-A34-%s.png' % idstr)\n", (3486, 3523), False, 'import os\n'), ((358, 367), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (365, 367), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1848), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1846, 1848), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1882), 'matplotlib.pyplot.figure', 'plt.figure', (['fig'], {}), '(fig)\n', (1877, 1882), True, 'import matplotlib.pyplot as plt\n'), ((1908, 1921), 'matplotlib.pyplot.title', 'plt.title', (['""""""'], {}), "('')\n", (1917, 1921), True, 'import matplotlib.pyplot as plt\n')] |
#
# OpenPilot parsers
# from https://github.com/littlemountainman/modeld/tree/master/tools/lib
#
import numpy as np
MAX_DISTANCE = 140.
LANE_OFFSET = 1.8
MAX_REL_V = 10.
LEAD_X_SCALE = 10
LEAD_Y_SCALE = 10
def sigmoid(x):
return 1. / (1. + np.exp(-x))
def softplus(x):
# fix numerical stability
#return np.log1p(np.exp(x))
return np.log1p(np.exp(-np.abs(x))) + np.maximum(x,0)
def softmax(x):
x = np.copy(x)
axis = 1 if len(x.shape) > 1 else 0
x -= np.max(x, axis=axis, keepdims=True)
if x.dtype == np.float32 or x.dtype == np.float64:
np.exp(x, out=x)
else:
x = np.exp(x)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def parser(outs):
PATH_DISTANCE = 192
out_dict = {}
path, ll, rl, lead, long_x, long_v, long_a, desire_state, meta, desire_pred, pose = outs
old_scale = True
if path is not None:
if path.shape[1] == PATH_DISTANCE*2 + 1:
out_dict['path'] = path[:, :PATH_DISTANCE]
out_dict['path_stds'] = softplus(path[:, PATH_DISTANCE:2*PATH_DISTANCE])
out_dict['path_stds'][int(path[0,-1]):] = 1e3
elif path.shape[1] == PATH_DISTANCE*2:
out_dict['path'] = path[:, :PATH_DISTANCE]
out_dict['path_stds'] = softplus(path[:, PATH_DISTANCE:2*PATH_DISTANCE])
else:
path_reshaped = path[:,:-1].reshape((path.shape[0], -1, PATH_DISTANCE*2 + 1))
out_dict['paths'] = path_reshaped[:, :, :PATH_DISTANCE]
out_dict['paths_stds'] = softplus(path_reshaped[:, :, PATH_DISTANCE:PATH_DISTANCE*2])
out_dict['path_weights'] = softmax(path_reshaped[:,:,-1])
lidx = np.argmax(out_dict['path_weights'])
out_dict['path'] = path_reshaped[:, lidx, :PATH_DISTANCE]
out_dict['path_stds'] = softplus(path_reshaped[:, lidx, PATH_DISTANCE:PATH_DISTANCE*2])
if ll is not None:
out_dict['lll'] = ll[:, :PATH_DISTANCE] + LANE_OFFSET
out_dict['lll_prob'] = sigmoid(ll[:, -1])
out_dict['lll_stds'] = softplus(ll[:, PATH_DISTANCE:-2])
out_dict['lll_stds'][int(ll[0,-2]):] = 1e3
if rl is not None:
out_dict['rll'] = rl[:, :PATH_DISTANCE] - LANE_OFFSET
out_dict['rll_prob'] = sigmoid(rl[:, -1])
out_dict['rll_stds'] = softplus(rl[:, PATH_DISTANCE:-2])
out_dict['rll_stds'][int(rl[0,-2]):] = 1e3
if lead is not None:
if old_scale:
LEAD_X_SCALE = 140
LEAD_Y_SCALE = 10
LEAD_V_SCALE = 10
else:
LEAD_V_SCALE = 1
LEAD_X_SCALE = 10
LEAD_Y_SCALE = 10
# LEAD MDN
lead_reshaped = lead[:,:-3].reshape((-1,5,11))
lead_weights = softmax(lead_reshaped[:,:,8])
lidx = np.argmax(lead_weights[0])
out_dict['lead_xyva'] = np.column_stack([lead_reshaped[:,lidx, 0] * LEAD_X_SCALE,
lead_reshaped[:,lidx, 1] * LEAD_Y_SCALE,
lead_reshaped[:,lidx, 2] * LEAD_V_SCALE,
lead_reshaped[:,lidx, 3]])
out_dict['lead_xyva_std'] = np.column_stack([softplus(lead_reshaped[:,lidx, 4]) * LEAD_X_SCALE,
softplus(lead_reshaped[:,lidx, 5]) * LEAD_Y_SCALE,
softplus(lead_reshaped[:,lidx, 6]) * LEAD_V_SCALE,
softplus(lead_reshaped[:,lidx, 7])])
out_dict['lead_prob'] = sigmoid(lead[:, -3])
lead_weights_2s = softmax(lead_reshaped[:,:,9])
lidx = np.argmax(lead_weights_2s[0])
out_dict['lead_xyva_2s'] = np.column_stack([lead_reshaped[:,lidx, 0] * LEAD_X_SCALE,
lead_reshaped[:,lidx, 1] * LEAD_Y_SCALE,
lead_reshaped[:,lidx, 2] * LEAD_V_SCALE,
lead_reshaped[:,lidx, 3]])
out_dict['lead_xyva_std_2s'] = np.column_stack([softplus(lead_reshaped[:,lidx, 4]) * LEAD_X_SCALE,
softplus(lead_reshaped[:,lidx, 5]) * LEAD_Y_SCALE,
softplus(lead_reshaped[:,lidx, 6]) * LEAD_V_SCALE,
softplus(lead_reshaped[:,lidx, 7])])
out_dict['lead_prob_2s'] = sigmoid(lead[:, -2])
out_dict['lead_all'] = lead
"""
if speed is not None:
out_dict['speed'] = speed
"""
if meta is not None:
out_dict['meta'] = meta
if desire_pred is not None:
out_dict['desire'] = desire_pred
if desire_state is not None:
out_dict['desire_state'] = desire_state
if long_x is not None:
out_dict['long_x'] = long_x
if long_v is not None:
out_dict['long_v'] = long_v
if long_a is not None:
out_dict['long_a'] = long_a
if pose is not None:
out_dict['trans'] = pose[:,:3]
out_dict['trans_std'] = softplus(pose[:,6:9]) + 1e-6
out_dict['rot'] = pose[:,3:6] * np.pi / 180.0
out_dict['rot_std'] = (softplus(pose[:,9:12]) + 1e-6) * np.pi / 180.0
return out_dict
| [
"numpy.sum",
"numpy.maximum",
"numpy.copy",
"numpy.argmax",
"numpy.abs",
"numpy.max",
"numpy.exp",
"numpy.column_stack"
] | [((413, 423), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (420, 423), True, 'import numpy as np\n'), ((469, 504), 'numpy.max', 'np.max', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (475, 504), True, 'import numpy as np\n'), ((612, 647), 'numpy.sum', 'np.sum', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (618, 647), True, 'import numpy as np\n'), ((2670, 2696), 'numpy.argmax', 'np.argmax', (['lead_weights[0]'], {}), '(lead_weights[0])\n', (2679, 2696), True, 'import numpy as np\n'), ((2725, 2903), 'numpy.column_stack', 'np.column_stack', (['[lead_reshaped[:, lidx, 0] * LEAD_X_SCALE, lead_reshaped[:, lidx, 1] *\n LEAD_Y_SCALE, lead_reshaped[:, lidx, 2] * LEAD_V_SCALE, lead_reshaped[:,\n lidx, 3]]'], {}), '([lead_reshaped[:, lidx, 0] * LEAD_X_SCALE, lead_reshaped[:,\n lidx, 1] * LEAD_Y_SCALE, lead_reshaped[:, lidx, 2] * LEAD_V_SCALE,\n lead_reshaped[:, lidx, 3]])\n', (2740, 2903), True, 'import numpy as np\n'), ((3525, 3554), 'numpy.argmax', 'np.argmax', (['lead_weights_2s[0]'], {}), '(lead_weights_2s[0])\n', (3534, 3554), True, 'import numpy as np\n'), ((3586, 3764), 'numpy.column_stack', 'np.column_stack', (['[lead_reshaped[:, lidx, 0] * LEAD_X_SCALE, lead_reshaped[:, lidx, 1] *\n LEAD_Y_SCALE, lead_reshaped[:, lidx, 2] * LEAD_V_SCALE, lead_reshaped[:,\n lidx, 3]]'], {}), '([lead_reshaped[:, lidx, 0] * LEAD_X_SCALE, lead_reshaped[:,\n lidx, 1] * LEAD_Y_SCALE, lead_reshaped[:, lidx, 2] * LEAD_V_SCALE,\n lead_reshaped[:, lidx, 3]])\n', (3601, 3764), True, 'import numpy as np\n'), ((374, 390), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (384, 390), True, 'import numpy as np\n'), ((562, 578), 'numpy.exp', 'np.exp', (['x'], {'out': 'x'}), '(x, out=x)\n', (568, 578), True, 'import numpy as np\n'), ((595, 604), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (601, 604), True, 'import numpy as np\n'), ((246, 256), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (252, 256), True, 'import numpy as np\n'), ((1616, 1651), 'numpy.argmax', 'np.argmax', (["out_dict['path_weights']"], {}), "(out_dict['path_weights'])\n", (1625, 1651), True, 'import numpy as np\n'), ((360, 369), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (366, 369), True, 'import numpy as np\n')] |
import taichi as ti
import taichi_three as t3
from taichi_three.mciso import MCISO, Voxelizer
import numpy as np
ti.init(arch=ti.opengl)
vol = np.load('assets/smoke.npy')
mciso = MCISO(vol.shape[0], use_sparse=False)
scene = t3.Scene()
mesh = t3.DynamicMesh(n_faces=mciso.N_res, n_pos=mciso.N_res, n_nrm=mciso.N_res)
model = t3.Model(mesh)
scene.add_model(model)
camera = t3.Camera()
scene.add_camera(camera)
scene.add_light(t3.Light([0.4, -1.5, -1.8], 0.8))
scene.add_light(t3.AmbientLight(0.22))
@ti.kernel
def update_mesh():
mesh.n_faces[None] = mciso.Js_n[None]
for i in range(mciso.Js_n[None]):
for t in ti.static(range(3)):
mesh.faces[i][t, 0] = mciso.Jts[i][t]
mesh.faces[i][t, 2] = mciso.Jts[i][t]
mesh.pos[i] = (mciso.vs[i] + 0.5) / mciso.N * 2 - 1
mesh.nrm[i] = mciso.ns[i]
mciso.clear()
mciso.m.from_numpy(vol * 4)
print(mciso.m.to_numpy().max())
mciso.march()
update_mesh()
gui = ti.GUI('MCISO', camera.res)
while gui.running:
gui.get_event(None)
camera.from_mouse(gui)
if gui.is_pressed(gui.ESCAPE):
gui.running = False
scene.render()
gui.set_image(camera.img)
gui.show()
| [
"numpy.load",
"taichi.GUI",
"taichi_three.Camera",
"taichi.init",
"taichi_three.Model",
"taichi_three.AmbientLight",
"taichi_three.DynamicMesh",
"taichi_three.mciso.MCISO",
"taichi_three.Light",
"taichi_three.Scene"
] | [((114, 137), 'taichi.init', 'ti.init', ([], {'arch': 'ti.opengl'}), '(arch=ti.opengl)\n', (121, 137), True, 'import taichi as ti\n'), ((146, 173), 'numpy.load', 'np.load', (['"""assets/smoke.npy"""'], {}), "('assets/smoke.npy')\n", (153, 173), True, 'import numpy as np\n'), ((183, 220), 'taichi_three.mciso.MCISO', 'MCISO', (['vol.shape[0]'], {'use_sparse': '(False)'}), '(vol.shape[0], use_sparse=False)\n', (188, 220), False, 'from taichi_three.mciso import MCISO, Voxelizer\n'), ((231, 241), 'taichi_three.Scene', 't3.Scene', ([], {}), '()\n', (239, 241), True, 'import taichi_three as t3\n'), ((249, 322), 'taichi_three.DynamicMesh', 't3.DynamicMesh', ([], {'n_faces': 'mciso.N_res', 'n_pos': 'mciso.N_res', 'n_nrm': 'mciso.N_res'}), '(n_faces=mciso.N_res, n_pos=mciso.N_res, n_nrm=mciso.N_res)\n', (263, 322), True, 'import taichi_three as t3\n'), ((331, 345), 'taichi_three.Model', 't3.Model', (['mesh'], {}), '(mesh)\n', (339, 345), True, 'import taichi_three as t3\n'), ((378, 389), 'taichi_three.Camera', 't3.Camera', ([], {}), '()\n', (387, 389), True, 'import taichi_three as t3\n'), ((957, 984), 'taichi.GUI', 'ti.GUI', (['"""MCISO"""', 'camera.res'], {}), "('MCISO', camera.res)\n", (963, 984), True, 'import taichi as ti\n'), ((431, 463), 'taichi_three.Light', 't3.Light', (['[0.4, -1.5, -1.8]', '(0.8)'], {}), '([0.4, -1.5, -1.8], 0.8)\n', (439, 463), True, 'import taichi_three as t3\n'), ((481, 502), 'taichi_three.AmbientLight', 't3.AmbientLight', (['(0.22)'], {}), '(0.22)\n', (496, 502), True, 'import taichi_three as t3\n')] |
import cv2
import os
import numpy as np
import argparse
import collections
import torch
import itertools
from tqdm import tqdm
from preprocessing import transform
from reconstruction import NMFCRenderer
IMG_EXTENSIONS = ['.png']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_image_paths_dict(dir):
# Returns dict: {name: [path1, path2, ...], ...}
image_files = {}
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
basename = os.path.basename(root)
for fname in fnames:
if is_image_file(fname) and basename in ['real', 'fake']:
path = os.path.join(root, fname)
seq_name = os.path.basename(root).split('_')[0]
if seq_name not in image_files:
image_files[seq_name] = [path]
else:
image_files[seq_name].append(path)
# Sort paths for each sequence
for k, v in image_files.items():
image_files[k] = sorted(v)
# Return directory sorted for keys (identity names)
return collections.OrderedDict(sorted(image_files.items()))
def paths_exist(image_pths):
return all([os.path.exists(image_path) for image_path in image_pths])
def print_args(parser, args):
message = ''
message += '----------------- Arguments ---------------\n'
for k, v in sorted(vars(args).items()):
comment = ''
default = parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '-------------------------------------------'
print(message)
def l1_dist(v1, v2):
return np.abs(v1 - v2).sum()
def euler_dist(e1, e2):
d0 = abs(e1[0]-e2[0])
if d0 > 180:
d0 = 360 - d0
d1 = abs(e1[1]-e2[1])
if d1 > 180:
d1 = 360 - d1
d2 = abs(e1[2]-e2[2])
if d2 > 180:
d2 = 360 - d2
return (d0 + d1 + d2) / 3
def get_within_distances(lst):
pairs = itertools.combinations(lst, 2)
max = 0
min = np.float('inf')
avg = []
for pair in pairs:
dst = l1_dist(pair[0], pair[1])
if dst < min:
min = dst
if dst > max:
max = dst
avg.append(dst)
avg = np.mean(avg)
return min, max, avg
def compute_distance_of_average_identities(ident_list1, ident_list2):
avg_ident1, avg_ident2 = np.mean(ident_list1, axis=0), np.mean(ident_list2, axis=0)
return l1_dist(avg_ident1, avg_ident2)
def compute_average_expesion_distance(expr_list1, expr_list2):
return np.mean([l1_dist(expr1, expr2) \
for expr1, expr2 in zip(expr_list1, expr_list2)])
def compute_average_rotation_distance(cam_list1, cam_list2):
# Rotation parameters to Euler angles.
angles_list1 = [transform.matrix2angle(cam[1]) for cam in cam_list1]
angles_list2 = [transform.matrix2angle(cam[1]) for cam in cam_list2]
return np.mean([euler_dist(ang1, ang2) \
for ang1, ang2 in zip(angles_list1, angles_list2)])
def main():
print('Computation of L1 distance between average identity coeffs (DAI-L1)\n')
print('Computation of average L1 distance between expression coeffs (AED-L1)\n')
print('Computation of average L1 distance between rotation parameters (ARD-L1)\n')
parser = argparse.ArgumentParser()
parser.add_argument('--results_dir', type=str, default='results/head2head_obama/latest_epoch/videos_test/obama',
help='Path to the results directory.')
parser.add_argument('--gpu_id', type=int, default='0', help='Negative value to use CPU, or greater equal than zero for GPU id.')
args = parser.parse_args()
# Figure out the device
args.gpu_id = int(args.gpu_id)
if args.gpu_id < 0:
args.gpu_id = -1
elif torch.cuda.is_available():
if args.gpu_id >= torch.cuda.device_count():
args.gpu_id = 0
else:
print('GPU device not available. Exit.')
exit(0)
# Print Arguments
print_args(parser, args)
# Create the directory of image paths.
images_dict = get_image_paths_dict(args.results_dir)
# Make sure we have two folders, one with real and one withs fake frames.
assert 'real' in images_dict and 'fake' in images_dict and \
len(images_dict.keys()) == 2, 'Results directory has wrong structure'
# Initialize the NMFC renderer.
renderer = NMFCRenderer(args)
# Iterate through the images_dict
identities_dict = {}
expressions_dict = {}
camera_dict = {}
for name, image_pths in images_dict.items():
if paths_exist(image_pths):
success, reconstruction_output = renderer.reconstruct(image_pths)
if success:
identities_dict[name] = reconstruction_output[1]
expressions_dict[name] = reconstruction_output[2]
camera_dict[name] = reconstruction_output[0]
else:
print('Reconstruction on %s failed.' % name)
break
# If the two expression sequences have been computed, find average L1 dist.
if len(identities_dict.keys()) == 2:
# Identity
dai_L1 = compute_distance_of_average_identities(identities_dict['real'],
identities_dict['fake'])
# Distance Between Average Identities (DAI-L1)
print('(L1) distance between average identities from real and fake sequences (DAI-L1): %0.4f' % (dai_L1))
#dsts_real = get_within_distances(identities_dict['real'])
#print('Within real sequence min %0.4f, max %0.4f, mean %0.4f' % dsts_real)
#dsts_fake = get_within_distances(identities_dict['fake'])
#print('Within fake sequence min %0.4f, max %0.4f, mean %0.4f' % dsts_fake)
# Expression
aed_L1 = compute_average_expesion_distance(expressions_dict['real'],
expressions_dict['fake'])
# Average Expression Distance (AED-L1)
print('Average expression (L1) distance between real and fake sequences (AED-L1): %0.4f' % (aed_L1))
# Pose
ard_L1 = compute_average_rotation_distance(camera_dict['real'],
camera_dict['fake'])
# Average Rotation Parameters Distance (ARD-L1)
print('Average rotation (L1) distance between real and fake sequences (ARD-L1): %0.4f' % (ard_L1))
# Clean
renderer.clear()
if __name__=='__main__':
main()
| [
"numpy.abs",
"argparse.ArgumentParser",
"os.path.basename",
"os.path.isdir",
"os.walk",
"reconstruction.NMFCRenderer",
"numpy.float",
"os.path.exists",
"preprocessing.transform.matrix2angle",
"torch.cuda.device_count",
"itertools.combinations",
"numpy.mean",
"torch.cuda.is_available",
"os.... | [((454, 472), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (467, 472), False, 'import os\n'), ((2117, 2147), 'itertools.combinations', 'itertools.combinations', (['lst', '(2)'], {}), '(lst, 2)\n', (2139, 2147), False, 'import itertools\n'), ((2170, 2185), 'numpy.float', 'np.float', (['"""inf"""'], {}), "('inf')\n", (2178, 2185), True, 'import numpy as np\n'), ((2384, 2396), 'numpy.mean', 'np.mean', (['avg'], {}), '(avg)\n', (2391, 2396), True, 'import numpy as np\n'), ((3435, 3460), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3458, 3460), False, 'import argparse\n'), ((4536, 4554), 'reconstruction.NMFCRenderer', 'NMFCRenderer', (['args'], {}), '(args)\n', (4548, 4554), False, 'from reconstruction import NMFCRenderer\n'), ((544, 556), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (551, 556), False, 'import os\n'), ((578, 600), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (594, 600), False, 'import os\n'), ((2522, 2550), 'numpy.mean', 'np.mean', (['ident_list1'], {'axis': '(0)'}), '(ident_list1, axis=0)\n', (2529, 2550), True, 'import numpy as np\n'), ((2552, 2580), 'numpy.mean', 'np.mean', (['ident_list2'], {'axis': '(0)'}), '(ident_list2, axis=0)\n', (2559, 2580), True, 'import numpy as np\n'), ((2919, 2949), 'preprocessing.transform.matrix2angle', 'transform.matrix2angle', (['cam[1]'], {}), '(cam[1])\n', (2941, 2949), False, 'from preprocessing import transform\n'), ((2992, 3022), 'preprocessing.transform.matrix2angle', 'transform.matrix2angle', (['cam[1]'], {}), '(cam[1])\n', (3014, 3022), False, 'from preprocessing import transform\n'), ((3926, 3951), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3949, 3951), False, 'import torch\n'), ((1262, 1288), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (1276, 1288), False, 'import os\n'), ((1801, 1816), 'numpy.abs', 'np.abs', (['(v1 - v2)'], {}), '(v1 - v2)\n', (1807, 1816), True, 'import numpy as np\n'), ((723, 748), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (735, 748), False, 'import os\n'), ((3979, 4004), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4002, 4004), False, 'import torch\n'), ((776, 798), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (792, 798), False, 'import os\n')] |
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as fnn
from model.cvae_feed_info import CVAEFeedInfo
from model.model_utils import get_bi_rnn_encode, dynamic_rnn
class CVAEStaticInfo:
def __init__(self, model_config, vocab_class):
self.vocab = vocab_class.vocab
self.rev_vocab = vocab_class.rev_vocab
self.vocab_size = len(self.vocab)
self.topic_vocab = vocab_class.topic_vocab
self.topic_vocab_size = len(self.topic_vocab)
self.da_vocab = vocab_class.dialog_act_vocab
self.da_vocab_size = len(self.da_vocab)
self.pad_id = self.rev_vocab['<pad>']
self.go_id = self.rev_vocab['<s>']
self.eos_id = self.rev_vocab['</s>']
self.max_tokenized_sent_size = model_config['max_tokenized_sent_size']
self.ctx_cell_size = model_config['ctx_cell_size']
self.sent_cell_size = model_config['sent_cell_size']
self.dec_cell_size = model_config['dec_cell_size']
self.latent_size = model_config['latent_size']
self.embed_size = model_config['embed_size']
self.sent_type = model_config['sent_type']
self.keep_prob = model_config['keep_prob']
self.num_layer = model_config['num_layer']
self.use_hcf = model_config['use_hcf']
self.device = torch.device(model_config['device'])
self.dec_keep_prob = model_config['dec_keep_prob']
self.topic_embed_size = model_config['topic_embed_size']
self.da_size = model_config['da_size']
self.da_embed_size = model_config['da_embed_size']
self.da_hidden_size = model_config['da_hidden_size']
self.meta_embed_size = model_config['meta_embed_size']
self.bow_hidden_size = model_config['bow_hidden_size']
self.act_hidden_size = model_config['act_hidden_size']
self.topic_embedding = nn.Embedding(self.topic_vocab_size, self.topic_embed_size)
self.da_embedding = nn.Embedding(self.da_vocab_size, self.da_embed_size)
self.word_embedding = nn.Embedding(self.vocab_size, self.embed_size, padding_idx=self.pad_id)
if vocab_class.word2vec is not None:
self.word_embedding.from_pretrained(
torch.FloatTensor(vocab_class.word2vec),
padding_idx=self.pad_id
)
if self.sent_type == 'bi-rnn':
self.bi_sent_cell = nn.GRU(
input_size=self.embed_size,
hidden_size=self.sent_cell_size,
num_layers=self.num_layer,
dropout=1 - self.keep_prob,
bidirectional=True
)
else:
raise ValueError('Unknown sent_type... Only use bi-rnn type.')
input_embedding_size = output_embedding_size = self.sent_cell_size * 2
joint_embedding_size = input_embedding_size + 2
# Only GRU Model
self.enc_cell = nn.GRU(
input_size=joint_embedding_size,
hidden_size=self.ctx_cell_size,
num_layers=self.num_layer,
dropout=0,
bidirectional=False,
batch_first=True
)
# nn.Linear args --> input size, output size, bias(default true)
self.attribute_fc1 = nn.Sequential(
nn.Linear(self.da_embed_size, self.da_hidden_size),
nn.Tanh()
)
cond_embedding_size = self.topic_embed_size + (2 * self.meta_embed_size) + self.ctx_cell_size
recog_input_size = cond_embedding_size + output_embedding_size
if self.use_hcf:
recog_input_size += self.da_embed_size
self.recog_mulogvar_net = nn.Linear(recog_input_size, self.latent_size * 2)
self.prior_mulogvar_net = nn.Sequential(
nn.Linear(cond_embedding_size, np.maximum(self.latent_size * 2, 100)),
nn.Tanh(),
nn.Linear(np.maximum(self.latent_size * 2, 100), self.latent_size * 2)
)
# BOW Loss Function
gen_input_size = cond_embedding_size + self.latent_size
self.bow_project = nn.Sequential(
nn.Linear(gen_input_size, self.bow_hidden_size),
nn.Tanh(),
nn.Dropout(1 - self.keep_prob),
nn.Linear(self.bow_hidden_size, self.vocab_size)
)
# Y Loss Function
self.da_project = None
if self.use_hcf:
self.da_project = nn.Sequential(
nn.Linear(gen_input_size, self.act_hidden_size),
nn.Tanh(),
nn.Dropout(1 - self.keep_prob),
nn.Linear(self.act_hidden_size, self.da_size)
)
dec_input_size = gen_input_size + self.da_embed_size
else:
dec_input_size = gen_input_size
# Decoder
if self.num_layer > 1:
self.dec_init_state_net = nn.ModuleList(
[nn.Linear(dec_input_size, self.dec_cell_size) for _ in range(self.num_layer)]
)
else:
self.dec_init_state_net = nn.Linear(dec_input_size, self.dec_cell_size)
dec_input_embedding_size = self.embed_size
if self.use_hcf:
dec_input_embedding_size += self.da_hidden_size
self.dec_cell = nn.GRU(
input_size=dec_input_embedding_size,
hidden_size=self.dec_cell_size,
num_layers=self.num_layer,
dropout=1 - self.keep_prob,
bidirectional=False,
batch_first=True
)
self.dec_cell_project = nn.Linear(self.dec_cell_size, self.vocab_size)
def get_encoder_state(self, f_info: CVAEFeedInfo):
input_contexts = f_info.input_contexts.view(-1, f_info.max_seq_len)
relation_embedded = self.topic_embedding(f_info.topics)
input_embedded = self.word_embedding(input_contexts)
if self.sent_type == 'bi-rnn':
input_embedding, sent_size = get_bi_rnn_encode(
embedding=input_embedded,
cell=self.bi_sent_cell,
max_len=self.max_tokenized_sent_size
)
else:
raise ValueError("unk sent_type. select one in [bow, rnn, bi-rnn]")
input_embedding = input_embedding.view(-1, f_info.max_dialog_len, sent_size)
if self.keep_prob < 1.0:
input_embedding = fnn.dropout(input_embedding, 1 - self.keep_prob, f_info.is_train)
floor_one_hot = f_info.floors.new_zeros((f_info.floors.numel(), 2), dtype=torch.float)
floor_one_hot.data.scatter_(1, f_info.floors.view(-1, 1), 1)
floor_one_hot = floor_one_hot.view(-1, f_info.max_dialog_len, 2)
joint_embedding = torch.cat([input_embedding, floor_one_hot], 2)
_, enc_last_state = dynamic_rnn(
cell=self.enc_cell,
inputs=joint_embedding,
sequence_length=f_info.context_lens,
max_len=self.max_tokenized_sent_size
)
if self.num_layer > 1:
enc_last_state = torch.cat([_ for _ in torch.unbind(enc_last_state)], 1)
else:
enc_last_state = enc_last_state.squeeze(0)
return enc_last_state
| [
"torch.nn.Dropout",
"torch.nn.GRU",
"numpy.maximum",
"torch.nn.Tanh",
"torch.nn.Embedding",
"torch.nn.functional.dropout",
"torch.cat",
"model.model_utils.dynamic_rnn",
"torch.FloatTensor",
"model.model_utils.get_bi_rnn_encode",
"torch.nn.Linear",
"torch.device",
"torch.unbind"
] | [((1331, 1367), 'torch.device', 'torch.device', (["model_config['device']"], {}), "(model_config['device'])\n", (1343, 1367), False, 'import torch\n'), ((1880, 1938), 'torch.nn.Embedding', 'nn.Embedding', (['self.topic_vocab_size', 'self.topic_embed_size'], {}), '(self.topic_vocab_size, self.topic_embed_size)\n', (1892, 1938), True, 'import torch.nn as nn\n'), ((1967, 2019), 'torch.nn.Embedding', 'nn.Embedding', (['self.da_vocab_size', 'self.da_embed_size'], {}), '(self.da_vocab_size, self.da_embed_size)\n', (1979, 2019), True, 'import torch.nn as nn\n'), ((2050, 2121), 'torch.nn.Embedding', 'nn.Embedding', (['self.vocab_size', 'self.embed_size'], {'padding_idx': 'self.pad_id'}), '(self.vocab_size, self.embed_size, padding_idx=self.pad_id)\n', (2062, 2121), True, 'import torch.nn as nn\n'), ((2911, 3068), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'joint_embedding_size', 'hidden_size': 'self.ctx_cell_size', 'num_layers': 'self.num_layer', 'dropout': '(0)', 'bidirectional': '(False)', 'batch_first': '(True)'}), '(input_size=joint_embedding_size, hidden_size=self.ctx_cell_size,\n num_layers=self.num_layer, dropout=0, bidirectional=False, batch_first=True\n )\n', (2917, 3068), True, 'import torch.nn as nn\n'), ((3640, 3689), 'torch.nn.Linear', 'nn.Linear', (['recog_input_size', '(self.latent_size * 2)'], {}), '(recog_input_size, self.latent_size * 2)\n', (3649, 3689), True, 'import torch.nn as nn\n'), ((5209, 5387), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'dec_input_embedding_size', 'hidden_size': 'self.dec_cell_size', 'num_layers': 'self.num_layer', 'dropout': '(1 - self.keep_prob)', 'bidirectional': '(False)', 'batch_first': '(True)'}), '(input_size=dec_input_embedding_size, hidden_size=self.dec_cell_size,\n num_layers=self.num_layer, dropout=1 - self.keep_prob, bidirectional=\n False, batch_first=True)\n', (5215, 5387), True, 'import torch.nn as nn\n'), ((5493, 5539), 'torch.nn.Linear', 'nn.Linear', (['self.dec_cell_size', 'self.vocab_size'], {}), '(self.dec_cell_size, self.vocab_size)\n', (5502, 5539), True, 'import torch.nn as nn\n'), ((6621, 6667), 'torch.cat', 'torch.cat', (['[input_embedding, floor_one_hot]', '(2)'], {}), '([input_embedding, floor_one_hot], 2)\n', (6630, 6667), False, 'import torch\n'), ((6696, 6831), 'model.model_utils.dynamic_rnn', 'dynamic_rnn', ([], {'cell': 'self.enc_cell', 'inputs': 'joint_embedding', 'sequence_length': 'f_info.context_lens', 'max_len': 'self.max_tokenized_sent_size'}), '(cell=self.enc_cell, inputs=joint_embedding, sequence_length=\n f_info.context_lens, max_len=self.max_tokenized_sent_size)\n', (6707, 6831), False, 'from model.model_utils import get_bi_rnn_encode, dynamic_rnn\n'), ((2399, 2545), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'self.embed_size', 'hidden_size': 'self.sent_cell_size', 'num_layers': 'self.num_layer', 'dropout': '(1 - self.keep_prob)', 'bidirectional': '(True)'}), '(input_size=self.embed_size, hidden_size=self.sent_cell_size,\n num_layers=self.num_layer, dropout=1 - self.keep_prob, bidirectional=True)\n', (2405, 2545), True, 'import torch.nn as nn\n'), ((3272, 3322), 'torch.nn.Linear', 'nn.Linear', (['self.da_embed_size', 'self.da_hidden_size'], {}), '(self.da_embed_size, self.da_hidden_size)\n', (3281, 3322), True, 'import torch.nn as nn\n'), ((3336, 3345), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3343, 3345), True, 'import torch.nn as nn\n'), ((3834, 3843), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3841, 3843), True, 'import torch.nn as nn\n'), ((4085, 4132), 'torch.nn.Linear', 'nn.Linear', (['gen_input_size', 'self.bow_hidden_size'], {}), '(gen_input_size, self.bow_hidden_size)\n', (4094, 4132), True, 'import torch.nn as nn\n'), ((4146, 4155), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (4153, 4155), True, 'import torch.nn as nn\n'), ((4169, 4199), 'torch.nn.Dropout', 'nn.Dropout', (['(1 - self.keep_prob)'], {}), '(1 - self.keep_prob)\n', (4179, 4199), True, 'import torch.nn as nn\n'), ((4213, 4261), 'torch.nn.Linear', 'nn.Linear', (['self.bow_hidden_size', 'self.vocab_size'], {}), '(self.bow_hidden_size, self.vocab_size)\n', (4222, 4261), True, 'import torch.nn as nn\n'), ((5003, 5048), 'torch.nn.Linear', 'nn.Linear', (['dec_input_size', 'self.dec_cell_size'], {}), '(dec_input_size, self.dec_cell_size)\n', (5012, 5048), True, 'import torch.nn as nn\n'), ((5878, 5988), 'model.model_utils.get_bi_rnn_encode', 'get_bi_rnn_encode', ([], {'embedding': 'input_embedded', 'cell': 'self.bi_sent_cell', 'max_len': 'self.max_tokenized_sent_size'}), '(embedding=input_embedded, cell=self.bi_sent_cell, max_len\n =self.max_tokenized_sent_size)\n', (5895, 5988), False, 'from model.model_utils import get_bi_rnn_encode, dynamic_rnn\n'), ((6290, 6355), 'torch.nn.functional.dropout', 'fnn.dropout', (['input_embedding', '(1 - self.keep_prob)', 'f_info.is_train'], {}), '(input_embedding, 1 - self.keep_prob, f_info.is_train)\n', (6301, 6355), True, 'import torch.nn.functional as fnn\n'), ((2233, 2272), 'torch.FloatTensor', 'torch.FloatTensor', (['vocab_class.word2vec'], {}), '(vocab_class.word2vec)\n', (2250, 2272), False, 'import torch\n'), ((3782, 3819), 'numpy.maximum', 'np.maximum', (['(self.latent_size * 2)', '(100)'], {}), '(self.latent_size * 2, 100)\n', (3792, 3819), True, 'import numpy as np\n'), ((3867, 3904), 'numpy.maximum', 'np.maximum', (['(self.latent_size * 2)', '(100)'], {}), '(self.latent_size * 2, 100)\n', (3877, 3904), True, 'import numpy as np\n'), ((4416, 4463), 'torch.nn.Linear', 'nn.Linear', (['gen_input_size', 'self.act_hidden_size'], {}), '(gen_input_size, self.act_hidden_size)\n', (4425, 4463), True, 'import torch.nn as nn\n'), ((4481, 4490), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (4488, 4490), True, 'import torch.nn as nn\n'), ((4508, 4538), 'torch.nn.Dropout', 'nn.Dropout', (['(1 - self.keep_prob)'], {}), '(1 - self.keep_prob)\n', (4518, 4538), True, 'import torch.nn as nn\n'), ((4556, 4601), 'torch.nn.Linear', 'nn.Linear', (['self.act_hidden_size', 'self.da_size'], {}), '(self.act_hidden_size, self.da_size)\n', (4565, 4601), True, 'import torch.nn as nn\n'), ((4859, 4904), 'torch.nn.Linear', 'nn.Linear', (['dec_input_size', 'self.dec_cell_size'], {}), '(dec_input_size, self.dec_cell_size)\n', (4868, 4904), True, 'import torch.nn as nn\n'), ((6968, 6996), 'torch.unbind', 'torch.unbind', (['enc_last_state'], {}), '(enc_last_state)\n', (6980, 6996), False, 'import torch\n')] |
from typing import List, Tuple
import numpy as np
import torch
import torchvision
from torch import nn
from .. import coordinates
from .. import process
from ..carla_utils.manager import TickState
from .spatial_softargmax import SpatialSoftargmax
class TaillessResnet34(nn.Module):
"""
Resnet from torchvision without the average pooling, flattening, and
fully-connected layers.
"""
def __init__(self):
super().__init__()
self._resnet = torch.hub.load(
"pytorch/vision:v0.6.0", "resnet34", pretrained=True
)
def forward(self, image):
image = self._resnet.conv1(image)
image = self._resnet.bn1(image)
image = self._resnet.relu(image)
image = self._resnet.maxpool(image)
image = self._resnet.layer1(image)
image = self._resnet.layer2(image)
image = self._resnet.layer3(image)
image = self._resnet.layer4(image)
return image
class Image(nn.Module):
"""
This network produces a 2-tuple of outputs.
The first output is a list (containing `Image.OUTPUTS` members) of the location
predictions: a number of X and Y coordinate pairs (dimensionality of
`[N, Image.COORDINATE_STEPS, 2]` with `N` the number of examples in the batch) which
are the soft argmax of ego (camera perspective) heatmaps of predicted next
locations.
The second output is a list (containg `Image.OUTPUTS` members) of the heatmaps
themselves (dimensionality
`[N, Image.COORDINATE_STEPS, Image.HEATMAP_HEIGHT, Image.HEATMAP_WIDTH]`).
The X and Y coordinate pairs are in range of `[-1, 1]`. The resolution is configured
through `Image.HEATMAP_WIDTH` and `Image.HEATMAP_HEIGHT`.
"""
OUTPUTS: int = 4
HEATMAP_WIDTH: int = 384 // 4
HEATMAP_HEIGHT: int = 160 // 4
COORDINATE_STEPS: int = 5
SPEED_FEATURE_MAPS: int = 128
def __init__(self):
super().__init__()
self.resnet = TaillessResnet34()
self.higher = nn.Sequential(
nn.BatchNorm2d(512 + Image.SPEED_FEATURE_MAPS),
nn.ConvTranspose2d(512 + Image.SPEED_FEATURE_MAPS, 256, 3, 2, 1, 1),
nn.ReLU(True),
nn.BatchNorm2d(256),
nn.ConvTranspose2d(256, 128, 3, 2, 1, 1),
nn.ReLU(True),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128, 64, 3, 2, 1, 1),
nn.ReLU(True),
)
self.location_prediction = nn.ModuleList(
[
nn.Sequential(
nn.BatchNorm2d(64),
nn.Conv2d(64, Image.COORDINATE_STEPS, 1, 1, 0),
SpatialSoftargmax(
Image.HEATMAP_HEIGHT,
Image.HEATMAP_WIDTH,
Image.COORDINATE_STEPS,
# temperature=1.0,
),
)
for _ in range(Image.OUTPUTS)
]
)
def forward(
self, image: torch.Tensor, speed: torch.Tensor
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
resnet_out = self.resnet(image)
speed = speed[:, None, None, None].repeat((1, Image.SPEED_FEATURE_MAPS, 5, 12))
higher_in = torch.cat((resnet_out, speed), dim=1)
higher_out = self.higher(higher_in)
[location_predictions, location_heatmaps] = zip(
*[
location_prediction(higher_out)
for location_prediction in self.location_prediction
]
)
return list(location_predictions), list(location_heatmaps)
class Agent:
def __init__(self, model: nn.Module):
self._transform_to_tensor = torchvision.transforms.ToTensor()
self._transform_normalize = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], inplace=True
)
self._model = model
self._img_size = torch.tensor([384, 160])
def step(
self, state: TickState
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Send the state through the underlying model, and return its output
as predicted ego coordinate waypoints.
:return: A 4-tuple, where:
- the first element is the predicted target locations of the commanded
action in ego top-down coordinates;
- the second element is the predicted heatmap of the commanded action;
- the third element is the raw model (expected-value) image coordinate
output (dimensions: `[Image.OUTPUTS, Image.COORDINATE_STEPS, 2]`); and
- the fourth element is the raw heatmap (2-D softmax, _not_ argmax or
expected-value) model output (dimensions: `[Image.OUTPUTS,
Image.COORDINATE_STEPS, Image.HEATMAP_HEIGHT, Image.HEATMAP_WIDTH]`).
"""
image = self._transform_normalize(
self._transform_to_tensor(state.rgb.copy()).to(process.torch_device)
).unsqueeze(0)
speed = torch.tensor(
[state.speed], device=process.torch_device, dtype=torch.float32
)
with torch.no_grad():
predictions, heatmaps = self._model.forward(image, speed)
# Take only the first minibatch result (the agent runs with a minibatch of
# size 1).
predictions = torch.stack([p[0, ...].cpu().detach() for p in predictions])
heatmaps = torch.stack([h[0, ...].cpu().detach() for h in heatmaps])
# Transform from [-1, +1] range to [0, img_size] for both axes.
transformed_predictions = predictions + 1
transformed_predictions[..., 0] = (
transformed_predictions[..., 0] * 0.5 * self._img_size[0]
)
transformed_predictions[..., 1] = (
transformed_predictions[..., 1] * 0.25 * self._img_size[1]
) + self._img_size[1] / 2.0
# Get the singular heatmap and prediction based on the commanded action.
heatmap_out = heatmaps[int(state.command) - 1, ...].numpy()
locations = transformed_predictions[int(state.command) - 1, ...]
targets = np.zeros((5, 2))
for idx, [image_x, image_y] in enumerate(locations.tolist()):
ego_x, ego_y = coordinates.image_coordinate_to_ego_coordinate(
image_x, image_y
)
targets[idx] = [ego_x, ego_y]
return (
targets,
heatmap_out,
predictions.numpy(),
heatmaps.numpy(),
)
| [
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torchvision.transforms.Normalize",
"numpy.zeros",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.hub.load",
"torch.no_grad",
"torch.tensor",
"torchvision.transforms.ToTensor"
] | [((479, 547), 'torch.hub.load', 'torch.hub.load', (['"""pytorch/vision:v0.6.0"""', '"""resnet34"""'], {'pretrained': '(True)'}), "('pytorch/vision:v0.6.0', 'resnet34', pretrained=True)\n", (493, 547), False, 'import torch\n'), ((3246, 3283), 'torch.cat', 'torch.cat', (['(resnet_out, speed)'], {'dim': '(1)'}), '((resnet_out, speed), dim=1)\n', (3255, 3283), False, 'import torch\n'), ((3703, 3736), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (3734, 3736), False, 'import torchvision\n'), ((3773, 3879), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]', 'inplace': '(True)'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, \n 0.224, 0.225], inplace=True)\n', (3805, 3879), False, 'import torchvision\n'), ((3952, 3976), 'torch.tensor', 'torch.tensor', (['[384, 160]'], {}), '([384, 160])\n', (3964, 3976), False, 'import torch\n'), ((5056, 5133), 'torch.tensor', 'torch.tensor', (['[state.speed]'], {'device': 'process.torch_device', 'dtype': 'torch.float32'}), '([state.speed], device=process.torch_device, dtype=torch.float32)\n', (5068, 5133), False, 'import torch\n'), ((6174, 6190), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (6182, 6190), True, 'import numpy as np\n'), ((2039, 2085), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512 + Image.SPEED_FEATURE_MAPS)'], {}), '(512 + Image.SPEED_FEATURE_MAPS)\n', (2053, 2085), False, 'from torch import nn\n'), ((2099, 2166), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(512 + Image.SPEED_FEATURE_MAPS)', '(256)', '(3)', '(2)', '(1)', '(1)'], {}), '(512 + Image.SPEED_FEATURE_MAPS, 256, 3, 2, 1, 1)\n', (2117, 2166), False, 'from torch import nn\n'), ((2180, 2193), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2187, 2193), False, 'from torch import nn\n'), ((2207, 2226), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (2221, 2226), False, 'from torch import nn\n'), ((2240, 2280), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(128)', '(3)', '(2)', '(1)', '(1)'], {}), '(256, 128, 3, 2, 1, 1)\n', (2258, 2280), False, 'from torch import nn\n'), ((2294, 2307), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2301, 2307), False, 'from torch import nn\n'), ((2321, 2340), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2335, 2340), False, 'from torch import nn\n'), ((2354, 2393), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(64)', '(3)', '(2)', '(1)', '(1)'], {}), '(128, 64, 3, 2, 1, 1)\n', (2372, 2393), False, 'from torch import nn\n'), ((2407, 2420), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2414, 2420), False, 'from torch import nn\n'), ((5169, 5184), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5182, 5184), False, 'import torch\n'), ((2548, 2566), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2562, 2566), False, 'from torch import nn\n'), ((2588, 2634), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'Image.COORDINATE_STEPS', '(1)', '(1)', '(0)'], {}), '(64, Image.COORDINATE_STEPS, 1, 1, 0)\n', (2597, 2634), False, 'from torch import nn\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Suppress Privacy model test.
"""
import pytest
import numpy as np
from mindspore import nn
from mindspore import context
from mindspore.train.callback import ModelCheckpoint
from mindspore.train.callback import CheckpointConfig
from mindspore.train.callback import LossMonitor
from mindspore.nn.metrics import Accuracy
import mindspore.dataset as ds
from mindarmour.privacy.sup_privacy import SuppressModel
from mindarmour.privacy.sup_privacy import SuppressMasker
from mindarmour.privacy.sup_privacy import SuppressPrivacyFactory
from mindarmour.privacy.sup_privacy import MaskLayerDes
from tests.ut.python.utils.mock_net import Net as LeNet5
def dataset_generator():
"""mock training data."""
batches = 10
batch_size = 32
data = np.random.random((batches*batch_size, 1, 32, 32)).astype(
np.float32)
label = np.random.randint(0, 10, batches*batch_size).astype(np.int32)
for i in range(batches):
yield data[i*batch_size:(i + 1)*batch_size],\
label[i*batch_size:(i + 1)*batch_size]
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_suppress_model_with_pynative_mode():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
networks_l5 = LeNet5()
epochs = 5
batch_num = 10
mask_times = 10
lr = 0.01
masklayers_lenet5 = []
masklayers_lenet5.append(MaskLayerDes("conv1.weight", 0, False, False, -1))
suppress_ctrl_instance = SuppressPrivacyFactory().create(networks_l5,
masklayers_lenet5,
policy="local_train",
end_epoch=epochs,
batch_num=batch_num,
start_epoch=1,
mask_times=mask_times,
lr=lr,
sparse_end=0.50,
sparse_start=0.0)
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
net_opt = nn.SGD(networks_l5.trainable_params(), lr)
model_instance = SuppressModel(
network=networks_l5,
loss_fn=net_loss,
optimizer=net_opt,
metrics={"Accuracy": Accuracy()})
model_instance.link_suppress_ctrl(suppress_ctrl_instance)
suppress_masker = SuppressMasker(model=model_instance, suppress_ctrl=suppress_ctrl_instance)
config_ck = CheckpointConfig(save_checkpoint_steps=batch_num, keep_checkpoint_max=10)
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
directory="./trained_ckpt_file/",
config=config_ck)
ds_train = ds.GeneratorDataset(dataset_generator, ['data', 'label'])
model_instance.train(epochs, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker],
dataset_sink_mode=False)
| [
"mindspore.context.set_context",
"mindspore.train.callback.CheckpointConfig",
"mindspore.nn.SoftmaxCrossEntropyWithLogits",
"tests.ut.python.utils.mock_net.Net",
"mindarmour.privacy.sup_privacy.SuppressMasker",
"mindspore.train.callback.ModelCheckpoint",
"mindspore.train.callback.LossMonitor",
"mindsp... | [((1843, 1914), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.PYNATIVE_MODE', 'device_target': '"""Ascend"""'}), "(mode=context.PYNATIVE_MODE, device_target='Ascend')\n", (1862, 1914), False, 'from mindspore import context\n'), ((1933, 1941), 'tests.ut.python.utils.mock_net.Net', 'LeNet5', ([], {}), '()\n', (1939, 1941), True, 'from tests.ut.python.utils.mock_net import Net as LeNet5\n'), ((2915, 2978), 'mindspore.nn.SoftmaxCrossEntropyWithLogits', 'nn.SoftmaxCrossEntropyWithLogits', ([], {'sparse': '(True)', 'reduction': '"""mean"""'}), "(sparse=True, reduction='mean')\n", (2947, 2978), False, 'from mindspore import nn\n'), ((3280, 3354), 'mindarmour.privacy.sup_privacy.SuppressMasker', 'SuppressMasker', ([], {'model': 'model_instance', 'suppress_ctrl': 'suppress_ctrl_instance'}), '(model=model_instance, suppress_ctrl=suppress_ctrl_instance)\n', (3294, 3354), False, 'from mindarmour.privacy.sup_privacy import SuppressMasker\n'), ((3371, 3444), 'mindspore.train.callback.CheckpointConfig', 'CheckpointConfig', ([], {'save_checkpoint_steps': 'batch_num', 'keep_checkpoint_max': '(10)'}), '(save_checkpoint_steps=batch_num, keep_checkpoint_max=10)\n', (3387, 3444), False, 'from mindspore.train.callback import CheckpointConfig\n'), ((3462, 3560), 'mindspore.train.callback.ModelCheckpoint', 'ModelCheckpoint', ([], {'prefix': '"""checkpoint_lenet"""', 'directory': '"""./trained_ckpt_file/"""', 'config': 'config_ck'}), "(prefix='checkpoint_lenet', directory='./trained_ckpt_file/',\n config=config_ck)\n", (3477, 3560), False, 'from mindspore.train.callback import ModelCheckpoint\n'), ((3638, 3695), 'mindspore.dataset.GeneratorDataset', 'ds.GeneratorDataset', (['dataset_generator', "['data', 'label']"], {}), "(dataset_generator, ['data', 'label'])\n", (3657, 3695), True, 'import mindspore.dataset as ds\n'), ((2066, 2115), 'mindarmour.privacy.sup_privacy.MaskLayerDes', 'MaskLayerDes', (['"""conv1.weight"""', '(0)', '(False)', '(False)', '(-1)'], {}), "('conv1.weight', 0, False, False, -1)\n", (2078, 2115), False, 'from mindarmour.privacy.sup_privacy import MaskLayerDes\n'), ((1344, 1395), 'numpy.random.random', 'np.random.random', (['(batches * batch_size, 1, 32, 32)'], {}), '((batches * batch_size, 1, 32, 32))\n', (1360, 1395), True, 'import numpy as np\n'), ((1434, 1480), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(batches * batch_size)'], {}), '(0, 10, batches * batch_size)\n', (1451, 1480), True, 'import numpy as np\n'), ((2146, 2170), 'mindarmour.privacy.sup_privacy.SuppressPrivacyFactory', 'SuppressPrivacyFactory', ([], {}), '()\n', (2168, 2170), False, 'from mindarmour.privacy.sup_privacy import SuppressPrivacyFactory\n'), ((3183, 3193), 'mindspore.nn.metrics.Accuracy', 'Accuracy', ([], {}), '()\n', (3191, 3193), False, 'from mindspore.nn.metrics import Accuracy\n'), ((3763, 3776), 'mindspore.train.callback.LossMonitor', 'LossMonitor', ([], {}), '()\n', (3774, 3776), False, 'from mindspore.train.callback import LossMonitor\n')] |
'''
PackHacks Rock Paper Scissors
A computer-vision based version of rock-paper-scissors
'''
# Gesture recognition tutorial: https://gogul09.github.io/software/hand-gesture-recognition-p1
import cv2
import numpy as np
from keras.models import load_model
bg = None
def run_avg(image, weight):
global bg
if bg is None:
bg = image.copy().astype("float")
return
cv2.accumulateWeighted(image, bg, weight)
def segment(image, threshold=10):
global bg
diff = cv2.absdiff(bg.astype("uint8"), image)
thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1]
thresholded = cv2.GaussianBlur(thresholded,(5,5),0)
(_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) == 0:
return None
else:
segmented = max(cnts, key=cv2.contourArea)
return (thresholded, segmented)
if __name__ == "__main__":
model = load_model("model.h5")
accumWeight = 0.5
im_count = 0
camera = cv2.VideoCapture(0)
x, y, r = 300, 300, 200
# region of interest (ROI) coordinates
top, right, bottom, left = x-r, y-r, x+r, y+r
num_frames = 0
while(True):
(grabbed, frame) = camera.read()
frame = cv2.flip(frame, 1)
clone = frame.copy()
(height, width) = frame.shape[:2]
roi = frame[top:bottom, right:left]
# convert the roi to grayscale and blur it
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# to get the background, keep looking till a threshold is reached
# so that our weighted average model gets calibrated
if num_frames < 30:
run_avg(gray, accumWeight)
if num_frames == 29:
print("Ready to go!")
else:
# segment the hand region
hand = segment(gray)
if hand is not None:
(thresholded, segmented) = hand
ep = 0.01*cv2.arcLength(segmented,True)
segmented = cv2.approxPolyDP(segmented,ep,True)
convex_hull = cv2.convexHull(segmented)
cv2.rectangle(clone, (left, top), (right, bottom), (0,0,0), thickness=cv2.FILLED)
cv2.drawContours(clone, [convex_hull + (right, top)], -1, (0, 255, 0), thickness=cv2.FILLED)
cv2.drawContours(clone, [segmented + (right, top)], -1, (0, 0, 255), thickness=cv2.FILLED)
preds = model.predict(cv2.resize(clone[top:bottom, right:left], (64, 64)).reshape((-1, 64, 64, 3)))[0]
index = np.argmax(preds)
text = ["rock", "paper", "scissor"][index] + " " + str(round(preds[index], 2))
print(text)
cv2.rectangle(clone, (left, top), (right, bottom), (255,0,0), 2)
# increment the number of frames
num_frames += 1
cv2.imshow("Video Feed", clone)
# observe the keypress by the user
keypress = cv2.waitKey(1) & 0xFF
if keypress == ord("q"):
break
path = None
# Creating data files based on user input
if keypress == ord("r"):
path = "r" + str(im_count) + ".png"
elif keypress == ord("p"):
path = "p" + str(im_count) + ".png"
elif keypress == ord("s"):
path = "s" + str(im_count) + ".png"
if path is not None:
cv2.imwrite("data/" + path, clone[top:bottom, right:left])
print ("saved", path)
im_count += 1
# free up memory
camera.release()
cv2.destroyAllWindows()
| [
"keras.models.load_model",
"cv2.GaussianBlur",
"cv2.approxPolyDP",
"cv2.cvtColor",
"cv2.accumulateWeighted",
"cv2.threshold",
"cv2.waitKey",
"cv2.imshow",
"cv2.imwrite",
"numpy.argmax",
"cv2.arcLength",
"cv2.VideoCapture",
"cv2.drawContours",
"cv2.convexHull",
"cv2.rectangle",
"cv2.fli... | [((410, 451), 'cv2.accumulateWeighted', 'cv2.accumulateWeighted', (['image', 'bg', 'weight'], {}), '(image, bg, weight)\n', (432, 451), False, 'import cv2\n'), ((651, 691), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['thresholded', '(5, 5)', '(0)'], {}), '(thresholded, (5, 5), 0)\n', (667, 691), False, 'import cv2\n'), ((984, 1006), 'keras.models.load_model', 'load_model', (['"""model.h5"""'], {}), "('model.h5')\n", (994, 1006), False, 'from keras.models import load_model\n'), ((1066, 1085), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1082, 1085), False, 'import cv2\n'), ((3754, 3777), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3775, 3777), False, 'import cv2\n'), ((574, 628), 'cv2.threshold', 'cv2.threshold', (['diff', 'threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(diff, threshold, 255, cv2.THRESH_BINARY)\n', (587, 628), False, 'import cv2\n'), ((1313, 1331), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (1321, 1331), False, 'import cv2\n'), ((1522, 1559), 'cv2.cvtColor', 'cv2.cvtColor', (['roi', 'cv2.COLOR_BGR2GRAY'], {}), '(roi, cv2.COLOR_BGR2GRAY)\n', (1534, 1559), False, 'import cv2\n'), ((1576, 1609), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (1592, 1609), False, 'import cv2\n'), ((2895, 2961), 'cv2.rectangle', 'cv2.rectangle', (['clone', '(left, top)', '(right, bottom)', '(255, 0, 0)', '(2)'], {}), '(clone, (left, top), (right, bottom), (255, 0, 0), 2)\n', (2908, 2961), False, 'import cv2\n'), ((3038, 3069), 'cv2.imshow', 'cv2.imshow', (['"""Video Feed"""', 'clone'], {}), "('Video Feed', clone)\n", (3048, 3069), False, 'import cv2\n'), ((3136, 3150), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3147, 3150), False, 'import cv2\n'), ((3582, 3640), 'cv2.imwrite', 'cv2.imwrite', (["('data/' + path)", 'clone[top:bottom, right:left]'], {}), "('data/' + path, clone[top:bottom, right:left])\n", (3593, 3640), False, 'import cv2\n'), ((2148, 2185), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['segmented', 'ep', '(True)'], {}), '(segmented, ep, True)\n', (2164, 2185), False, 'import cv2\n'), ((2217, 2242), 'cv2.convexHull', 'cv2.convexHull', (['segmented'], {}), '(segmented)\n', (2231, 2242), False, 'import cv2\n'), ((2262, 2350), 'cv2.rectangle', 'cv2.rectangle', (['clone', '(left, top)', '(right, bottom)', '(0, 0, 0)'], {'thickness': 'cv2.FILLED'}), '(clone, (left, top), (right, bottom), (0, 0, 0), thickness=cv2\n .FILLED)\n', (2275, 2350), False, 'import cv2\n'), ((2361, 2457), 'cv2.drawContours', 'cv2.drawContours', (['clone', '[convex_hull + (right, top)]', '(-1)', '(0, 255, 0)'], {'thickness': 'cv2.FILLED'}), '(clone, [convex_hull + (right, top)], -1, (0, 255, 0),\n thickness=cv2.FILLED)\n', (2377, 2457), False, 'import cv2\n'), ((2471, 2565), 'cv2.drawContours', 'cv2.drawContours', (['clone', '[segmented + (right, top)]', '(-1)', '(0, 0, 255)'], {'thickness': 'cv2.FILLED'}), '(clone, [segmented + (right, top)], -1, (0, 0, 255),\n thickness=cv2.FILLED)\n', (2487, 2565), False, 'import cv2\n'), ((2709, 2725), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (2718, 2725), True, 'import numpy as np\n'), ((2089, 2119), 'cv2.arcLength', 'cv2.arcLength', (['segmented', '(True)'], {}), '(segmented, True)\n', (2102, 2119), False, 'import cv2\n'), ((2603, 2654), 'cv2.resize', 'cv2.resize', (['clone[top:bottom, right:left]', '(64, 64)'], {}), '(clone[top:bottom, right:left], (64, 64))\n', (2613, 2654), False, 'import cv2\n')] |
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
mean = 0
variance = 1
sigma = np.sqrt(variance) # this is the standard deviation
x = np.linspace(-3,3,100)
plt.plot(x, mlab.normpdf(x,mean,sigma))
plt.show() | [
"matplotlib.mlab.normpdf",
"matplotlib.pyplot.show",
"numpy.linspace",
"numpy.sqrt"
] | [((113, 130), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (120, 130), True, 'import numpy as np\n'), ((168, 191), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(100)'], {}), '(-3, 3, 100)\n', (179, 191), True, 'import numpy as np\n'), ((231, 241), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (239, 241), True, 'import matplotlib.pyplot as plt\n'), ((202, 230), 'matplotlib.mlab.normpdf', 'mlab.normpdf', (['x', 'mean', 'sigma'], {}), '(x, mean, sigma)\n', (214, 230), True, 'import matplotlib.mlab as mlab\n')] |
import numpy as np
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
from time_me import *
vals = [i for i in range(1000) if i % 7 != 0]
coach = TimeLimitCoach(0.5)
queries = np.random.randint(0, 1000, 1000)
@coach.trial()
def _(cls):
o = cls(vals)
ret = 0
for q in queries:
if q in o:
ret += 1
return ret
_(frozenset)
_(set)
#_(list)
#_(tuple)
_(dict.fromkeys, __name__ = 'dict')
coach.compare().bar()
| [
"matplotlib.use",
"numpy.random.randint"
] | [((73, 97), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (87, 97), False, 'import matplotlib\n'), ((206, 238), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)', '(1000)'], {}), '(0, 1000, 1000)\n', (223, 238), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Test MLP class for regression
@author: avaldes
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from mlp import MLP
def f1(x):
return 1 / (1 + x**2)
def f2(x):
return np.sin(x)
""" Best results for adagrad in first function and for RMS_prop in both are
using L1 normalization with beta=0.001. Best results for the other functions
are without regularization, using the hyperparameters we have hardcoded
in this script
"""
nb_data = 500
x_data = np.linspace(-5, 5, nb_data).reshape(nb_data, 1)
t_data1 = f1(x_data)
t_data2 = f2(x_data)
D = 1
K = 1
K_list = [D, 100, K] # list of dimensions of layers
activation_functions = [MLP.sigmoid] * 1 + [MLP.identity]
diff_activation_functions = [MLP.dsigmoid] * 1
methods = ['SGD',
'momentum',
'nesterov',
'adagrad',
'adadelta',
'RMS_prop',
'adam']
# methods = ['nesterov']
fig, ax = plt.subplots(2, 7)
for t_data_nb, t_data in enumerate([t_data1, t_data2]):
for method_nb, method in enumerate(methods):
mlp = MLP(K_list,
activation_functions,
diff_activation_functions,
init_seed=6)
print(method)
mlp.train(x_data, t_data,
epochs=1000, batch_size=100,
eta=0.01,
method=method,
gamma=0.9,
beta=0,
beta_1=0.99,
beta_2=0.999,
initialize_weights=True,
print_cost=True)
mlp.get_activations_and_units(x_data)
error = mlp.cost_L2(mlp.y, t_data)
curr_ax = ax[t_data_nb, method_nb]
curr_ax.plot(x_data, mlp.y)
curr_ax.plot(x_data, t_data, ',')
curr_ax.set_xlim(-5, 5)
curr_ax.set_ylim(-1 * t_data_nb, 1)
if t_data_nb == 0:
curr_ax.set_title(method)
curr_ax.set_xlabel('error= %.3f' % error)
plt.show()
| [
"mlp.MLP",
"matplotlib.pyplot.show",
"numpy.sin",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((1000, 1018), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(7)'], {}), '(2, 7)\n', (1012, 1018), True, 'import matplotlib.pyplot as plt\n'), ((2036, 2046), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2044, 2046), True, 'import matplotlib.pyplot as plt\n'), ((265, 274), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (271, 274), True, 'import numpy as np\n'), ((547, 574), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', 'nb_data'], {}), '(-5, 5, nb_data)\n', (558, 574), True, 'import numpy as np\n'), ((1139, 1212), 'mlp.MLP', 'MLP', (['K_list', 'activation_functions', 'diff_activation_functions'], {'init_seed': '(6)'}), '(K_list, activation_functions, diff_activation_functions, init_seed=6)\n', (1142, 1212), False, 'from mlp import MLP\n')] |
#!/usr/bin/env python
import numpy as np
import rospy
import tf2_geometry_msgs
import tf2_ros
from dynamic_reconfigure.server import Server
from geometry_msgs.msg import PoseStamped, Twist, Vector3
from nav_msgs.msg import Path
from risk_aware_planner.cfg import ControllerConfig
from shapely.geometry import LineString, Point
from tf.transformations import euler_from_quaternion
def array_from_msg(point_msg):
return np.array(point_from_msg(point_msg))
def array3_from_msg(point_msg):
return np.array([point_msg.x, point_msg.y, point_msg.z])
def point_from_msg(point_msg):
return [point_msg.x, point_msg.y, point_msg.z]
def quaternion_from_msg(quaternion_msg):
return [quaternion_msg.x, quaternion_msg.y, quaternion_msg.z, quaternion_msg.w]
def yaw_from_msg(quaternion_msg):
return euler_from_quaternion(quaternion_from_msg(quaternion_msg))[2]
def angle_difference(angle_1, angle_2):
a1, a2 = np.unwrap(np.array([angle_1, angle_2]))
return a2 - a1
def get_transform(tf_buffer, from_frame, to_frame):
try:
return tf_buffer.lookup_transform(
from_frame, to_frame, rospy.Time(0), rospy.Duration(0.1)
)
except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException) as e:
rospy.logerr(e)
return None
def path_in_frame(tf_buffer, path, frame_id):
t = get_transform(tf_buffer, frame_id, path.header.frame_id)
if not t:
return None
msg = Path(header=path.header)
msg.header.frame_id = frame_id
msg.poses = [tf2_geometry_msgs.do_transform_pose(pose, t) for pose in path.poses]
return msg
def pose_in_frame(tf_buffer, pose_s, frame_id):
# rospy.loginfo('pose_in_frame %s %s %s', tf_buffer, pose_s, frame_id)
t = get_transform(tf_buffer, frame_id, pose_s.header.frame_id)
if not t:
return None
return tf2_geometry_msgs.do_transform_pose(pose_s, t)
def normalize_s(s, max_s, loop):
if s < 0:
if loop:
s = s + max_s
else:
s = 0
elif s > max_s:
if loop:
s = s - max_s
else:
s = max_s
return s
def t_speed(x0, x1, tau, max_speed):
v = (x1 - x0) / tau
s = np.linalg.norm(v)
if s > max_speed:
v = v / s * max_speed
return v
def t_angular_speed(x0, x1, tau, max_speed):
v = angle_difference(x0, x1) / tau
s = np.abs(v)
if s > max_speed:
v = v / s * max_speed
return v
class PathFollower(object):
"""docstring for PathFollower."""
def __init__(self):
rospy.init_node("path_follower")
self.tf_buffer = tf2_ros.Buffer()
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
self.curve = None
self.path = None
self.frame_id = rospy.get_param("~frame_id", "map")
self.delta = rospy.get_param("~delta", 0.5)
self.min_distance = rospy.get_param("~min_distance", 0.5)
self.tau = rospy.get_param("~tau", 0.5)
self.target_speed = rospy.get_param("~max_speed", 0.3)
self.target_angular_speed = rospy.get_param("~max_angular_speed", 0.3)
self.k = rospy.get_param("~k", 1.0)
rate = rospy.get_param('~rate', 5.0)
self.min_dt = 1.0 / rate
self.last_t = rospy.Time.now()
self.pub_twist = rospy.Publisher("cmd_vel", Twist, queue_size=1)
rospy.Subscriber("selected_path", Path, self.has_updated_path)
rospy.Subscriber("pose", PoseStamped, self.has_updated_pose)
rospy.Subscriber("target", PoseStamped, self.has_updated_target)
self.srv = Server(ControllerConfig, self.callback)
rospy.spin()
def callback(self, config, level):
self.delta = config['delta']
self.min_distance = config['min_distance']
self.tau = config['tau']
self.target_speed = config['max_speed']
self.target_angular_speed = config['max_angular_speed']
self.k = config['k']
return config
def has_updated_target(self, msg):
self.stop()
def should_send(self):
dt = rospy.Time.now() - self.last_t
return dt.to_sec() > self.min_dt
def stop(self, msg=None):
self.path = None
self.curve = None
self.pub_twist.publish(Twist())
@property
def target_point(self):
if self.path and self.path.poses:
return self.path.poses[-1].pose.position
return None
def has_arrived(self, point):
if not self.path:
return True
distance = np.linalg.norm(array_from_msg(self.target_point)[:2] -
array_from_msg(point)[:2])
return distance < self.min_distance
def has_updated_path(self, msg):
path = path_in_frame(self.tf_buffer, msg, self.frame_id)
if not msg.poses or not path:
rospy.loginfo("Got invalid/empty path, will stop")
self.stop()
return
rospy.loginfo("Got new path")
self.curve = LineString([point_from_msg(pose.pose.position) for pose in path.poses])
self.ps = np.array(self.curve)
self.ls = np.linalg.norm(np.diff(self.ps, axis=0), axis=1)
self.cs = np.cumsum(self.ls)
self.path = path
def target_along_path(self, current_point):
cp = Point(current_point)
s = self.curve.project(cp)
s = s + self.delta
if s > self.cs[-1]:
return self.ps[-1]
return np.array(self.curve.interpolate(s))
def target_twist_along_path(self, pose_s):
if not self.path:
return None
current_point = array_from_msg(pose_s.pose.position)
current_yaw = yaw_from_msg(pose_s.pose.orientation)
target_point = self.target_along_path(current_point)
delta = target_point - current_point
target_yaw = np.arctan2(delta[1], delta[0])
target_angular_speed = t_angular_speed(current_yaw, target_yaw, self.tau,
self.target_angular_speed)
f = max(0, (1 - self.k * abs(target_angular_speed) / self.target_angular_speed))
target_speed = self.target_speed * f
return Twist(linear=Vector3(target_speed, 0, 0),
angular=Vector3(0, 0, target_angular_speed))
def has_updated_pose(self, msg):
if self.path:
pose_s = pose_in_frame(self.tf_buffer, msg, self.frame_id)
if not pose_s:
rospy.logerr('Could not transform pose %s to frame %s', msg, self.frame_id)
return
point = pose_s.pose.position
if self.has_arrived(point):
rospy.loginfo('Has arrived, will stop')
self.stop()
return
target_twist = self.target_twist_along_path(pose_s)
if not target_twist:
rospy.logerr('No target twist')
return
if self.should_send():
self.last_t = rospy.Time.now()
if target_twist:
self.pub_twist.publish(target_twist)
if __name__ == '__main__':
PathFollower()
| [
"geometry_msgs.msg.Vector3",
"rospy.logerr",
"numpy.abs",
"rospy.Subscriber",
"numpy.arctan2",
"tf2_geometry_msgs.do_transform_pose",
"rospy.Time",
"numpy.linalg.norm",
"rospy.Duration",
"shapely.geometry.Point",
"rospy.Time.now",
"tf2_ros.TransformListener",
"numpy.cumsum",
"rospy.init_no... | [((508, 557), 'numpy.array', 'np.array', (['[point_msg.x, point_msg.y, point_msg.z]'], {}), '([point_msg.x, point_msg.y, point_msg.z])\n', (516, 557), True, 'import numpy as np\n'), ((1496, 1520), 'nav_msgs.msg.Path', 'Path', ([], {'header': 'path.header'}), '(header=path.header)\n', (1500, 1520), False, 'from nav_msgs.msg import Path\n'), ((1894, 1940), 'tf2_geometry_msgs.do_transform_pose', 'tf2_geometry_msgs.do_transform_pose', (['pose_s', 't'], {}), '(pose_s, t)\n', (1929, 1940), False, 'import tf2_geometry_msgs\n'), ((2248, 2265), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (2262, 2265), True, 'import numpy as np\n'), ((2425, 2434), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (2431, 2434), True, 'import numpy as np\n'), ((943, 971), 'numpy.array', 'np.array', (['[angle_1, angle_2]'], {}), '([angle_1, angle_2])\n', (951, 971), True, 'import numpy as np\n'), ((1573, 1617), 'tf2_geometry_msgs.do_transform_pose', 'tf2_geometry_msgs.do_transform_pose', (['pose', 't'], {}), '(pose, t)\n', (1608, 1617), False, 'import tf2_geometry_msgs\n'), ((2601, 2633), 'rospy.init_node', 'rospy.init_node', (['"""path_follower"""'], {}), "('path_follower')\n", (2616, 2633), False, 'import rospy\n'), ((2659, 2675), 'tf2_ros.Buffer', 'tf2_ros.Buffer', ([], {}), '()\n', (2673, 2675), False, 'import tf2_ros\n'), ((2703, 2744), 'tf2_ros.TransformListener', 'tf2_ros.TransformListener', (['self.tf_buffer'], {}), '(self.tf_buffer)\n', (2728, 2744), False, 'import tf2_ros\n'), ((2820, 2855), 'rospy.get_param', 'rospy.get_param', (['"""~frame_id"""', '"""map"""'], {}), "('~frame_id', 'map')\n", (2835, 2855), False, 'import rospy\n'), ((2877, 2907), 'rospy.get_param', 'rospy.get_param', (['"""~delta"""', '(0.5)'], {}), "('~delta', 0.5)\n", (2892, 2907), False, 'import rospy\n'), ((2936, 2973), 'rospy.get_param', 'rospy.get_param', (['"""~min_distance"""', '(0.5)'], {}), "('~min_distance', 0.5)\n", (2951, 2973), False, 'import rospy\n'), ((2993, 3021), 'rospy.get_param', 'rospy.get_param', (['"""~tau"""', '(0.5)'], {}), "('~tau', 0.5)\n", (3008, 3021), False, 'import rospy\n'), ((3050, 3084), 'rospy.get_param', 'rospy.get_param', (['"""~max_speed"""', '(0.3)'], {}), "('~max_speed', 0.3)\n", (3065, 3084), False, 'import rospy\n'), ((3121, 3163), 'rospy.get_param', 'rospy.get_param', (['"""~max_angular_speed"""', '(0.3)'], {}), "('~max_angular_speed', 0.3)\n", (3136, 3163), False, 'import rospy\n'), ((3181, 3207), 'rospy.get_param', 'rospy.get_param', (['"""~k"""', '(1.0)'], {}), "('~k', 1.0)\n", (3196, 3207), False, 'import rospy\n'), ((3223, 3252), 'rospy.get_param', 'rospy.get_param', (['"""~rate"""', '(5.0)'], {}), "('~rate', 5.0)\n", (3238, 3252), False, 'import rospy\n'), ((3308, 3324), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3322, 3324), False, 'import rospy\n'), ((3350, 3397), 'rospy.Publisher', 'rospy.Publisher', (['"""cmd_vel"""', 'Twist'], {'queue_size': '(1)'}), "('cmd_vel', Twist, queue_size=1)\n", (3365, 3397), False, 'import rospy\n'), ((3406, 3468), 'rospy.Subscriber', 'rospy.Subscriber', (['"""selected_path"""', 'Path', 'self.has_updated_path'], {}), "('selected_path', Path, self.has_updated_path)\n", (3422, 3468), False, 'import rospy\n'), ((3477, 3537), 'rospy.Subscriber', 'rospy.Subscriber', (['"""pose"""', 'PoseStamped', 'self.has_updated_pose'], {}), "('pose', PoseStamped, self.has_updated_pose)\n", (3493, 3537), False, 'import rospy\n'), ((3546, 3610), 'rospy.Subscriber', 'rospy.Subscriber', (['"""target"""', 'PoseStamped', 'self.has_updated_target'], {}), "('target', PoseStamped, self.has_updated_target)\n", (3562, 3610), False, 'import rospy\n'), ((3631, 3670), 'dynamic_reconfigure.server.Server', 'Server', (['ControllerConfig', 'self.callback'], {}), '(ControllerConfig, self.callback)\n', (3637, 3670), False, 'from dynamic_reconfigure.server import Server\n'), ((3680, 3692), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3690, 3692), False, 'import rospy\n'), ((4989, 5018), 'rospy.loginfo', 'rospy.loginfo', (['"""Got new path"""'], {}), "('Got new path')\n", (5002, 5018), False, 'import rospy\n'), ((5130, 5150), 'numpy.array', 'np.array', (['self.curve'], {}), '(self.curve)\n', (5138, 5150), True, 'import numpy as np\n'), ((5236, 5254), 'numpy.cumsum', 'np.cumsum', (['self.ls'], {}), '(self.ls)\n', (5245, 5254), True, 'import numpy as np\n'), ((5342, 5362), 'shapely.geometry.Point', 'Point', (['current_point'], {}), '(current_point)\n', (5347, 5362), False, 'from shapely.geometry import LineString, Point\n'), ((5881, 5911), 'numpy.arctan2', 'np.arctan2', (['delta[1]', 'delta[0]'], {}), '(delta[1], delta[0])\n', (5891, 5911), True, 'import numpy as np\n'), ((1132, 1145), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (1142, 1145), False, 'import rospy\n'), ((1147, 1166), 'rospy.Duration', 'rospy.Duration', (['(0.1)'], {}), '(0.1)\n', (1161, 1166), False, 'import rospy\n'), ((1303, 1318), 'rospy.logerr', 'rospy.logerr', (['e'], {}), '(e)\n', (1315, 1318), False, 'import rospy\n'), ((4118, 4134), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (4132, 4134), False, 'import rospy\n'), ((4303, 4310), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (4308, 4310), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3\n'), ((4887, 4937), 'rospy.loginfo', 'rospy.loginfo', (['"""Got invalid/empty path, will stop"""'], {}), "('Got invalid/empty path, will stop')\n", (4900, 4937), False, 'import rospy\n'), ((5184, 5208), 'numpy.diff', 'np.diff', (['self.ps'], {'axis': '(0)'}), '(self.ps, axis=0)\n', (5191, 5208), True, 'import numpy as np\n'), ((6230, 6257), 'geometry_msgs.msg.Vector3', 'Vector3', (['target_speed', '(0)', '(0)'], {}), '(target_speed, 0, 0)\n', (6237, 6257), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3\n'), ((6288, 6323), 'geometry_msgs.msg.Vector3', 'Vector3', (['(0)', '(0)', 'target_angular_speed'], {}), '(0, 0, target_angular_speed)\n', (6295, 6323), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3\n'), ((6499, 6574), 'rospy.logerr', 'rospy.logerr', (['"""Could not transform pose %s to frame %s"""', 'msg', 'self.frame_id'], {}), "('Could not transform pose %s to frame %s', msg, self.frame_id)\n", (6511, 6574), False, 'import rospy\n'), ((6695, 6734), 'rospy.loginfo', 'rospy.loginfo', (['"""Has arrived, will stop"""'], {}), "('Has arrived, will stop')\n", (6708, 6734), False, 'import rospy\n'), ((6899, 6930), 'rospy.logerr', 'rospy.logerr', (['"""No target twist"""'], {}), "('No target twist')\n", (6911, 6930), False, 'import rospy\n'), ((7019, 7035), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (7033, 7035), False, 'import rospy\n')] |
"""
Arquivo usado para mudar uma quantidade N de arquivos
de uma pasta para outra e renomear os arquivos se precisar
"""
import cv2
import os
import numpy as np
from PIL import Image
import pathlib
def change_imagens(current_folder, destination_folder, name="crosswalk", qtd=0, dim=(128, 64)):
"""
Arquivo usado para mudar uma quantidade N de arquivos
de uma pasta para outra e renomear os arquivos se precisar
"""
img_path = [os.path.join(current_folder, file) for file in os.listdir(current_folder)]
qtd_img = 1
for img in img_path:
img_name = os.path.split(img)[1].split("/")[0]
extension = os.path.split(img_name)[1].split(".")[0]
new_name = name
saved_name = new_name + "_" + str(qtd_img + qtd)
print(img_name + " -> " + saved_name + ".jpg")
try:
saved_folder = destination + "/"
# carrega a imagem
img = Image.open(current_folder + "/" + img_name)
# converte a imagem (PIL) para numpy array
imgNp = np.array(img,'uint8')
# redimensionar a imagem
imgNp = cv2.resize(imgNp, dim)
# Cria a pasta positivas_final e salva as imagens
pathlib.Path(saved_folder).mkdir(parents=True, exist_ok=True)
cv2.imwrite(saved_folder + saved_name + ".jpg", imgNp)
qtd_img += 1
except ValueError:
print('.')
folder = 'test'
destination = 'new_folder'
change_imagens(folder, destination)
| [
"cv2.imwrite",
"PIL.Image.open",
"pathlib.Path",
"numpy.array",
"os.path.split",
"os.path.join",
"os.listdir",
"cv2.resize"
] | [((451, 485), 'os.path.join', 'os.path.join', (['current_folder', 'file'], {}), '(current_folder, file)\n', (463, 485), False, 'import os\n'), ((498, 524), 'os.listdir', 'os.listdir', (['current_folder'], {}), '(current_folder)\n', (508, 524), False, 'import os\n'), ((930, 973), 'PIL.Image.open', 'Image.open', (["(current_folder + '/' + img_name)"], {}), "(current_folder + '/' + img_name)\n", (940, 973), False, 'from PIL import Image\n'), ((1049, 1071), 'numpy.array', 'np.array', (['img', '"""uint8"""'], {}), "(img, 'uint8')\n", (1057, 1071), True, 'import numpy as np\n'), ((1128, 1150), 'cv2.resize', 'cv2.resize', (['imgNp', 'dim'], {}), '(imgNp, dim)\n', (1138, 1150), False, 'import cv2\n'), ((1300, 1354), 'cv2.imwrite', 'cv2.imwrite', (["(saved_folder + saved_name + '.jpg')", 'imgNp'], {}), "(saved_folder + saved_name + '.jpg', imgNp)\n", (1311, 1354), False, 'import cv2\n'), ((1226, 1252), 'pathlib.Path', 'pathlib.Path', (['saved_folder'], {}), '(saved_folder)\n', (1238, 1252), False, 'import pathlib\n'), ((587, 605), 'os.path.split', 'os.path.split', (['img'], {}), '(img)\n', (600, 605), False, 'import os\n'), ((643, 666), 'os.path.split', 'os.path.split', (['img_name'], {}), '(img_name)\n', (656, 666), False, 'import os\n')] |
# %% importing the libraries
import numpy as np
# %% Defining the sudoku grid
grid = [
[0,0,0,0,3,0,0,0,9],
[0,0,0,0,0,5,0,6,0],
[0,0,0,0,0,7,5,0,8],
[0,0,6,0,0,0,0,0,0],
[3,2,0,0,0,0,6,0,0],
[0,0,0,0,8,0,0,5,4],
[0,3,0,0,5,0,0,0,0],
[8,1,0,9,4,3,0,0,0],
[9,0,0,0,0,8,0,0,0]
]
# Converting the grid to matrix for better indexing
grid_matrix = np.matrix(grid)
# grid_matrix[0,2] == 3
# grid_matrix[3,0] == 6
# %%
# Validation methods
# Check if number can be inputted into row
def check_row(sudoku_grid, x, num):
sol = np.sum(np.isin(num, sudoku_grid[x,:]))
if(sol == 0):
return True
else:
return False
# Check if number can be inputted into column
def check_column(sudoku_grid, y, num):
sol = np.sum(np.isin(num, sudoku_grid[:,y]))
if(sol == 0):
return True
else:
return False
# Check if number can be inputted into the 3x3 grid
def check_3_3(sudoku_grid, x, y, num):
x_pos = x//3
y_pos = y//3
grid_3x3 = sudoku_grid[ x_pos*3:(x_pos+1)*3, y_pos*3:(y_pos+1)*3 ]
sol = np.sum(np.isin(num, grid_3x3))
if(sol == 0):
return True
else:
return False
def check_possible(sudoku_grid, x, y, num):
row = check_row(sudoku_grid, x, num)
column = check_column(sudoku_grid, y, num)
small_grid = check_3_3(sudoku_grid, x, y, num)
if(row and column and small_grid):
return True
else:
return False
# %%
# Solver algorithm
def solve(sudoku_grid):
for x in range(9):
for y in range(9):
if(sudoku_grid[x,y] == 0):
for n in range(1, 10):
if (check_possible(sudoku_grid, x, y, n)):
sudoku_grid[x, y] = n
solve(sudoku_grid)
sudoku_grid[x, y] = 0
return "Completed :D"
print(sudoku_grid)
# %%
solve(grid_matrix)
| [
"numpy.matrix",
"numpy.isin"
] | [((383, 398), 'numpy.matrix', 'np.matrix', (['grid'], {}), '(grid)\n', (392, 398), True, 'import numpy as np\n'), ((571, 602), 'numpy.isin', 'np.isin', (['num', 'sudoku_grid[x, :]'], {}), '(num, sudoku_grid[x, :])\n', (578, 602), True, 'import numpy as np\n'), ((775, 806), 'numpy.isin', 'np.isin', (['num', 'sudoku_grid[:, y]'], {}), '(num, sudoku_grid[:, y])\n', (782, 806), True, 'import numpy as np\n'), ((1091, 1113), 'numpy.isin', 'np.isin', (['num', 'grid_3x3'], {}), '(num, grid_3x3)\n', (1098, 1113), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
'''
Grow and visualize standard resting state ROIs from literature.
1. Read ROIs of standard regions involved in resting state networks from literature.
(the data is provided as a csv file with list of regions with seed MNI coordinates)
2. Grow labels of 1cm radius (approx) in the surface source space.
3. Make annotation and visualize the labels.
Uses RSNs provided by [1]
[1] <NAME>, <NAME>, and <NAME>,
“Quantifying the Test-Retest Reliability of Magnetoencephalography
Resting-State Functional Connectivity,” Brain Connect., vol. 6, no. 6, pp. 448–460, 2016.
Author: <NAME> <<EMAIL>>
'''
import os.path as op
import numpy as np
import mne
from mne.datasets import sample
from jumeg.jumeg_utils import get_jumeg_path
from jumeg.connectivity import make_annot_from_csv
from nilearn import plotting
from surfer import Brain
data_path = sample.data_path()
subject = 'sample'
subjects_dir = data_path + '/subjects'
parc_fname = 'standard_garces_2016'
csv_fname = op.join(get_jumeg_path(), 'data', 'standard_rsns.csv')
# set make_annot to True to save the annotation to disk
labels, coords, _ = make_annot_from_csv(subject, subjects_dir, csv_fname,
parc_fname=parc_fname, make_annot=False,
return_label_coords=True)
# to plot mni coords on glass brain
n_nodes = np.array(coords).shape[0]
# make a random zero valued connectivity matrix
con = np.zeros((n_nodes, n_nodes))
# plot the connectome on a glass brain background
plotting.plot_connectome(con, coords)
plotting.show()
# plot the brain surface, foci and labels
brain = Brain(subject, hemi='both', surf='white', subjects_dir=subjects_dir)
for mni_coord, mylabel in zip(coords, labels):
brain.add_foci(mni_coord, coords_as_verts=False, hemi=mylabel.hemi,
color='red', map_surface='white', scale_factor=0.6)
brain.add_label(mylabel, hemi=mylabel.hemi)
| [
"surfer.Brain",
"nilearn.plotting.plot_connectome",
"jumeg.jumeg_utils.get_jumeg_path",
"nilearn.plotting.show",
"numpy.zeros",
"jumeg.connectivity.make_annot_from_csv",
"numpy.array",
"mne.datasets.sample.data_path"
] | [((872, 890), 'mne.datasets.sample.data_path', 'sample.data_path', ([], {}), '()\n', (888, 890), False, 'from mne.datasets import sample\n'), ((1129, 1253), 'jumeg.connectivity.make_annot_from_csv', 'make_annot_from_csv', (['subject', 'subjects_dir', 'csv_fname'], {'parc_fname': 'parc_fname', 'make_annot': '(False)', 'return_label_coords': '(True)'}), '(subject, subjects_dir, csv_fname, parc_fname=parc_fname,\n make_annot=False, return_label_coords=True)\n', (1148, 1253), False, 'from jumeg.connectivity import make_annot_from_csv\n'), ((1457, 1485), 'numpy.zeros', 'np.zeros', (['(n_nodes, n_nodes)'], {}), '((n_nodes, n_nodes))\n', (1465, 1485), True, 'import numpy as np\n'), ((1536, 1573), 'nilearn.plotting.plot_connectome', 'plotting.plot_connectome', (['con', 'coords'], {}), '(con, coords)\n', (1560, 1573), False, 'from nilearn import plotting\n'), ((1574, 1589), 'nilearn.plotting.show', 'plotting.show', ([], {}), '()\n', (1587, 1589), False, 'from nilearn import plotting\n'), ((1641, 1709), 'surfer.Brain', 'Brain', (['subject'], {'hemi': '"""both"""', 'surf': '"""white"""', 'subjects_dir': 'subjects_dir'}), "(subject, hemi='both', surf='white', subjects_dir=subjects_dir)\n", (1646, 1709), False, 'from surfer import Brain\n'), ((1005, 1021), 'jumeg.jumeg_utils.get_jumeg_path', 'get_jumeg_path', ([], {}), '()\n', (1019, 1021), False, 'from jumeg.jumeg_utils import get_jumeg_path\n'), ((1377, 1393), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (1385, 1393), True, 'import numpy as np\n')] |
"""Some utilities/wrappers for Qiskit"""
from ast import literal_eval
import pickle
import operator
from os import path
import numpy as np
from numpy import pi
from qiskit.compiler import transpile
from qiskit.transpiler import CouplingMap
from qiskit.tools.monitor import job_monitor
from qiskit.providers.aer.noise import NoiseModel
from qiskit import execute, Aer, IBMQ, QuantumCircuit
from qiskit.quantum_info import Pauli
class NoiseModelWrapper:
"Load noise model from IBMQ real quantum computer"
def __init__(self, backend, project=None, no_save=False, quiet=False):
"""Load a noise model from either a local file or IBMQ"""
if not quiet:
print("Building circuit with noise from '{}'".format(backend))
# If a file exists called like the backend, then load the model from that.
# In this case, we also try to load a coupling map from
# a file with .map extension. If it does not exist, no
# worries, we just assume it is default (i.e., empty).
noise_filename = "{}.noise".format(backend)
coupling_map_filename = "{}.map".format(backend)
if path.exists(noise_filename):
if not quiet:
print("Loading noise model from {}".format(noise_filename))
with open(noise_filename, "rb") as infile:
self.noise_model = NoiseModel.from_dict(pickle.load(infile))
self.coupling_map = None
if path.exists(coupling_map_filename):
if not quiet:
print("Loading coupling map from {}".format(coupling_map_filename))
with open(coupling_map_filename, "r") as coupling_infile:
self.coupling_map = CouplingMap(
literal_eval(coupling_infile.read())
)
# Otherwise, load the noise model from IBMQ (requires token)
# account properties to be stored in default location
# and save the noise model for future use, unless the no_save flag is set
else:
# Build noise model from backend properties
provider = IBMQ.load_account()
if project is None:
backend = provider.get_backend(backend)
else:
# load a specific project
(hub, group, project) = splitProjectInfo(project)
if not quiet:
print(
f"IBMQ backend (hub: {hub}, group: {group}, project: {project})"
)
provider_project = IBMQ.get_provider(
hub=hub, group=group, project=project
)
backend = provider_project.get_backend(backend)
self.noise_model = NoiseModel.from_backend(backend)
# Get coupling map from backend
self.coupling_map = backend.configuration().coupling_map
# Save the model and coupling map (if not default) to file
if not no_save:
if not quiet:
print(
"Saving to {} the noise model for future use".format(
noise_filename
)
)
with open(noise_filename, "wb") as outfile:
pickle.dump(self.noise_model.to_dict(), outfile)
if self.coupling_map is not None:
if not quiet:
print(
"Saving to {} the coupling map for future use".format(
coupling_map_filename
)
)
with open(coupling_map_filename, "w") as coupling_outfile:
coupling_outfile.write(str(self.coupling_map))
def execute(self, qc, shots=1024):
"Execute simulation with noise"
result = execute(
qc,
Aer.get_backend("qasm_simulator"),
coupling_map=self.coupling_map,
basis_gates=self.noise_model.basis_gates,
noise_model=self.noise_model,
shots=shots,
).result()
return result
class IbmqWrapper:
"Wrapper to execute circuit on an IBMQ real quantum computer"
def __init__(self, backend, project=None, quiet=False, print_circuit=False):
self.quiet = quiet
self.print_circuit = print_circuit
if not quiet:
print("Loading IBMQ backend '{}'".format(backend))
# load IBM account
provider = IBMQ.load_account()
if project is None:
self.backend = provider.get_backend(backend)
else:
# load a specific project
(hub, group, project) = splitProjectInfo(project)
if not quiet:
print(f"IBMQ backend (hub: {hub}, group: {group}, project: {project})")
provider_project = IBMQ.get_provider(hub=hub, group=group, project=project)
self.backend = provider_project.get_backend(backend)
def execute_sync(self, qc, shots=1024):
"Execute simulation and wait for completion"
qc_compiled = transpile(qc, backend=self.backend, optimization_level=1)
if self.print_circuit:
print(qc_compiled.draw(output="text"))
job = execute(qc_compiled, backend=self.backend, shots=shots)
job_monitor(job)
result = job.result()
return result
def execute_async(self, qc, shots=1024):
"Execute simulation and return the job"
qc_compiled = transpile(qc, backend=self.backend, optimization_level=1)
if self.print_circuit:
print(qc_compiled.draw(output="text"))
return execute(qc_compiled, backend=self.backend, shots=shots)
def bloch_states(rho):
"""Return the values of the Bloch vectors for a given state.
Taken from plot_bloch_multivector in
qiskit.visualization.state_visualization.
"""
if rho.ndim == 1:
rho = np.outer(rho, np.conj(rho))
num = int(np.log2(len(rho)))
ret = []
for i in range(num):
pauli_singles = [
Pauli.pauli_single(num, i, "X"),
Pauli.pauli_single(num, i, "Y"),
Pauli.pauli_single(num, i, "Z"),
]
ret.append(
list(
map(
lambda x: np.real(np.trace(np.dot(x.to_matrix(), rho))),
pauli_singles,
)
)
)
return ret
def decode_message(result, print_message=False):
"""Find an encoded message from count of probabilities."""
message_decoded = max(result.get_counts().items(), key=operator.itemgetter(1))[0]
tot_counts = sum(result.get_counts().values())
if print_message:
print(
"received message {} (prob {:.2f}%)".format(
message_decoded,
100 * float(result.get_counts()[message_decoded]) / tot_counts,
)
)
return message_decoded
def qft(N, print_circuit=False):
"""Return a circuit to compute the QFT for N qbits."""
assert N > 0
qft_circuit = QuantumCircuit(N, name="QFT")
if N == 3:
print("Special value N=3 for QFT circuit creation")
# Handle qbit 2
qft_circuit.h(2)
qft_circuit.cu1(pi / 2, 1, 2)
qft_circuit.cu1(pi / 4, 0, 2)
# Handle qbit 1
qft_circuit.h(1)
qft_circuit.cu1(pi / 2, 0, 1)
# Handle qbit 0
qft_circuit.h(0)
# Swap qbits 0 and 2
qft_circuit.swap(0, 2)
else:
# Add Hadamard and rotation gates
for i in range(N):
cur = N - i - 1
qft_circuit.h(cur)
for j in range(cur):
qft_circuit.cu1(pi / 2 ** (cur - j), j, cur)
# Swap qbits
for i in range(int(N / 2)):
qft_circuit.swap(i, N - 1 - i)
if print_circuit:
print(qft_circuit.draw(output="text"))
return qft_circuit
# From Qiskit Textbook
def qft_dagger(circ, n):
"""n-qubit QFTdagger the first n qubits in circ"""
for qubit in range(n // 2):
circ.swap(qubit, n - qubit - 1)
for j in range(n):
for m in range(j):
circ.cu1(-pi / float(2 ** (j - m)), m, j)
circ.h(j)
def splitProjectInfo(value: str):
"""Split a comma-separated list of 3 values (hub,group,project)"""
tokens = value.split(",")
if len(tokens) != 3:
raise RuntimeError(f"Invalid project: {value}")
return tokens
| [
"qiskit.IBMQ.load_account",
"numpy.conj",
"qiskit.QuantumCircuit",
"qiskit.IBMQ.get_provider",
"os.path.exists",
"qiskit.compiler.transpile",
"qiskit.providers.aer.noise.NoiseModel.from_backend",
"qiskit.tools.monitor.job_monitor",
"pickle.load",
"qiskit.execute",
"operator.itemgetter",
"qiski... | [((7166, 7195), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['N'], {'name': '"""QFT"""'}), "(N, name='QFT')\n", (7180, 7195), False, 'from qiskit import execute, Aer, IBMQ, QuantumCircuit\n'), ((1146, 1173), 'os.path.exists', 'path.exists', (['noise_filename'], {}), '(noise_filename)\n', (1157, 1173), False, 'from os import path\n'), ((4571, 4590), 'qiskit.IBMQ.load_account', 'IBMQ.load_account', ([], {}), '()\n', (4588, 4590), False, 'from qiskit import execute, Aer, IBMQ, QuantumCircuit\n'), ((5180, 5237), 'qiskit.compiler.transpile', 'transpile', (['qc'], {'backend': 'self.backend', 'optimization_level': '(1)'}), '(qc, backend=self.backend, optimization_level=1)\n', (5189, 5237), False, 'from qiskit.compiler import transpile\n'), ((5335, 5390), 'qiskit.execute', 'execute', (['qc_compiled'], {'backend': 'self.backend', 'shots': 'shots'}), '(qc_compiled, backend=self.backend, shots=shots)\n', (5342, 5390), False, 'from qiskit import execute, Aer, IBMQ, QuantumCircuit\n'), ((5399, 5415), 'qiskit.tools.monitor.job_monitor', 'job_monitor', (['job'], {}), '(job)\n', (5410, 5415), False, 'from qiskit.tools.monitor import job_monitor\n'), ((5585, 5642), 'qiskit.compiler.transpile', 'transpile', (['qc'], {'backend': 'self.backend', 'optimization_level': '(1)'}), '(qc, backend=self.backend, optimization_level=1)\n', (5594, 5642), False, 'from qiskit.compiler import transpile\n'), ((5741, 5796), 'qiskit.execute', 'execute', (['qc_compiled'], {'backend': 'self.backend', 'shots': 'shots'}), '(qc_compiled, backend=self.backend, shots=shots)\n', (5748, 5796), False, 'from qiskit import execute, Aer, IBMQ, QuantumCircuit\n'), ((1462, 1496), 'os.path.exists', 'path.exists', (['coupling_map_filename'], {}), '(coupling_map_filename)\n', (1473, 1496), False, 'from os import path\n'), ((2133, 2152), 'qiskit.IBMQ.load_account', 'IBMQ.load_account', ([], {}), '()\n', (2150, 2152), False, 'from qiskit import execute, Aer, IBMQ, QuantumCircuit\n'), ((2763, 2795), 'qiskit.providers.aer.noise.NoiseModel.from_backend', 'NoiseModel.from_backend', (['backend'], {}), '(backend)\n', (2786, 2795), False, 'from qiskit.providers.aer.noise import NoiseModel\n'), ((4937, 4993), 'qiskit.IBMQ.get_provider', 'IBMQ.get_provider', ([], {'hub': 'hub', 'group': 'group', 'project': 'project'}), '(hub=hub, group=group, project=project)\n', (4954, 4993), False, 'from qiskit import execute, Aer, IBMQ, QuantumCircuit\n'), ((6034, 6046), 'numpy.conj', 'np.conj', (['rho'], {}), '(rho)\n', (6041, 6046), True, 'import numpy as np\n'), ((6159, 6190), 'qiskit.quantum_info.Pauli.pauli_single', 'Pauli.pauli_single', (['num', 'i', '"""X"""'], {}), "(num, i, 'X')\n", (6177, 6190), False, 'from qiskit.quantum_info import Pauli\n'), ((6204, 6235), 'qiskit.quantum_info.Pauli.pauli_single', 'Pauli.pauli_single', (['num', 'i', '"""Y"""'], {}), "(num, i, 'Y')\n", (6222, 6235), False, 'from qiskit.quantum_info import Pauli\n'), ((6249, 6280), 'qiskit.quantum_info.Pauli.pauli_single', 'Pauli.pauli_single', (['num', 'i', '"""Z"""'], {}), "(num, i, 'Z')\n", (6267, 6280), False, 'from qiskit.quantum_info import Pauli\n'), ((2572, 2628), 'qiskit.IBMQ.get_provider', 'IBMQ.get_provider', ([], {'hub': 'hub', 'group': 'group', 'project': 'project'}), '(hub=hub, group=group, project=project)\n', (2589, 2628), False, 'from qiskit import execute, Aer, IBMQ, QuantumCircuit\n'), ((6696, 6718), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (6715, 6718), False, 'import operator\n'), ((1388, 1407), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (1399, 1407), False, 'import pickle\n'), ((3958, 3991), 'qiskit.Aer.get_backend', 'Aer.get_backend', (['"""qasm_simulator"""'], {}), "('qasm_simulator')\n", (3973, 3991), False, 'from qiskit import execute, Aer, IBMQ, QuantumCircuit\n')] |
import os
os.sys.path.extend([os.pardir, os.curdir])
import numpy as np
from common.function import cross_entropy, sigmoid, softmax
from common.gradient import numerical_grad
class TwoLayer(object):
'''
>>> n = TwoLayer(2, 10, 3)
>>> output = n.predict(np.array([[1, 2]]))
>>> abs(np.sum(output) - 1.0) < 0.0001
True
>>> output = n.predict(np.array([[1, 2], [3, 4]]))
>>> np.all(abs(np.sum(output, axis=1) - 1.0) < 0.0001)
True
'''
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
self.params = {}
self.params['w1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['w2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def predict(self, x):
w1, w2 = self.params['w1'], self.params['w2']
b1, b2 = self.params['b1'], self.params['b2']
a1 = np.dot(x, w1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, w2) + b2
y = softmax(a2)
return y
def accuracy(self, x, t):
predicted_label = self.predict(x).argmax(axis=1)
test_label = t.argmax(axis=1)
return float(np.sum(predicted_label == test_label)) / x.shape[0]
def loss(self, x, t):
y = self.predict(x)
return cross_entropy(y, t)
def numerical_gradient(self, x, t):
lost_func = lambda w: self.loss(x, t)
grads = {}
for k in self.params:
grads[k] = numerical_grad(lost_func, self.params[k])
return grads
def grad(self, x, t):
w1, w2 = self.params['w1'], self.params['w2']
b1, b2 = self.params['b1'], self.params['b2']
grads = {}
# forward
a1 = np.dot(x, w1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, w2) + b2
y = softmax(a2)
# backward
dy = (y - t) / x.shape[0] # softmax with entropy loss's gradient, dL/dy
grads['w2'] = np.dot(z1.T, dy)
grads['b2'] = np.sum(dy, axis=0)
da1 = np.dot(dy, w2.T)
dz1 = (1.0 - sigmoid(a1)) * sigmoid(a1) * da1 # sigmoid's gradient
grads['w1'] = np.dot(x.T, dz1)
grads['b1'] = np.sum(dz1, axis=0)
return grads
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"numpy.sum",
"os.sys.path.extend",
"common.function.cross_entropy",
"numpy.random.randn",
"numpy.zeros",
"common.function.sigmoid",
"numpy.dot",
"common.function.softmax",
"common.gradient.numerical_grad",
"doctest.testmod"
] | [((10, 52), 'os.sys.path.extend', 'os.sys.path.extend', (['[os.pardir, os.curdir]'], {}), '([os.pardir, os.curdir])\n', (28, 52), False, 'import os\n'), ((2366, 2383), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (2381, 2383), False, 'import doctest\n'), ((697, 718), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (705, 718), True, 'import numpy as np\n'), ((835, 856), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (843, 856), True, 'import numpy as np\n'), ((1038, 1049), 'common.function.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (1045, 1049), False, 'from common.function import cross_entropy, sigmoid, softmax\n'), ((1095, 1106), 'common.function.softmax', 'softmax', (['a2'], {}), '(a2)\n', (1102, 1106), False, 'from common.function import cross_entropy, sigmoid, softmax\n'), ((1393, 1412), 'common.function.cross_entropy', 'cross_entropy', (['y', 't'], {}), '(y, t)\n', (1406, 1412), False, 'from common.function import cross_entropy, sigmoid, softmax\n'), ((1853, 1864), 'common.function.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (1860, 1864), False, 'from common.function import cross_entropy, sigmoid, softmax\n'), ((1910, 1921), 'common.function.softmax', 'softmax', (['a2'], {}), '(a2)\n', (1917, 1921), False, 'from common.function import cross_entropy, sigmoid, softmax\n'), ((2045, 2061), 'numpy.dot', 'np.dot', (['z1.T', 'dy'], {}), '(z1.T, dy)\n', (2051, 2061), True, 'import numpy as np\n'), ((2084, 2102), 'numpy.sum', 'np.sum', (['dy'], {'axis': '(0)'}), '(dy, axis=0)\n', (2090, 2102), True, 'import numpy as np\n'), ((2118, 2134), 'numpy.dot', 'np.dot', (['dy', 'w2.T'], {}), '(dy, w2.T)\n', (2124, 2134), True, 'import numpy as np\n'), ((2233, 2249), 'numpy.dot', 'np.dot', (['x.T', 'dz1'], {}), '(x.T, dz1)\n', (2239, 2249), True, 'import numpy as np\n'), ((2272, 2291), 'numpy.sum', 'np.sum', (['dz1'], {'axis': '(0)'}), '(dz1, axis=0)\n', (2278, 2291), True, 'import numpy as np\n'), ((628, 668), 'numpy.random.randn', 'np.random.randn', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (643, 668), True, 'import numpy as np\n'), ((765, 806), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (780, 806), True, 'import numpy as np\n'), ((1006, 1019), 'numpy.dot', 'np.dot', (['x', 'w1'], {}), '(x, w1)\n', (1012, 1019), True, 'import numpy as np\n'), ((1063, 1077), 'numpy.dot', 'np.dot', (['z1', 'w2'], {}), '(z1, w2)\n', (1069, 1077), True, 'import numpy as np\n'), ((1572, 1613), 'common.gradient.numerical_grad', 'numerical_grad', (['lost_func', 'self.params[k]'], {}), '(lost_func, self.params[k])\n', (1586, 1613), False, 'from common.gradient import numerical_grad\n'), ((1821, 1834), 'numpy.dot', 'np.dot', (['x', 'w1'], {}), '(x, w1)\n', (1827, 1834), True, 'import numpy as np\n'), ((1878, 1892), 'numpy.dot', 'np.dot', (['z1', 'w2'], {}), '(z1, w2)\n', (1884, 1892), True, 'import numpy as np\n'), ((1271, 1308), 'numpy.sum', 'np.sum', (['(predicted_label == test_label)'], {}), '(predicted_label == test_label)\n', (1277, 1308), True, 'import numpy as np\n'), ((2171, 2182), 'common.function.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (2178, 2182), False, 'from common.function import cross_entropy, sigmoid, softmax\n'), ((2156, 2167), 'common.function.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (2163, 2167), False, 'from common.function import cross_entropy, sigmoid, softmax\n')] |
# lower_bound = (40,70,70)
# upper_bound = (180,255,255)
import matplotlib.pyplot as plt
import numpy as np
import cv2
from matplotlib.colors import hsv_to_rgb, rgb_to_hsv
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib import colors
import argparse
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib import colors
parser = argparse.ArgumentParser()
parser.add_argument('data', help='path to png file')
args = parser.parse_args()
dataPath = args.data
patch = cv2.imread(dataPath)
patch_RGB = cv2.cvtColor(patch.copy(), cv2.COLOR_BGR2RGB)
# pink1 = np.uint8([[[255,192,203]]])
# pink2 = np.uint8([[[199,21,133]]])
# pink1_h = cv2.cvtColor(pink1,cv2.COLOR_RGB2HSV)
# pink2_h = cv2.cvtColor(pink2,cv2.COLOR_RGB2HSV)
# purple1 = np.uint8([[[75,0,130]]])
# purple2 = np.uint8([[[230,230,250]]])
# purple1_h = cv2.cvtColor(purple1,cv2.COLOR_RGB2HSV)
# purple2_h = cv2.cvtColor(purple2,cv2.COLOR_RGB2HSV)
""" Color plot >>> """
# r, g, b = cv2.split(patch_RGB)
# fig = plt.figure()
# axis = fig.add_subplot(1, 1, 1, projection="3d")
# pixel_colors = patch_RGB.reshape((np.shape(patch_RGB)[0]*np.shape(patch_RGB)[1], 3))
# norm = colors.Normalize(vmin=-1.,vmax=1.)
# norm.autoscale(pixel_colors)
# pixel_colors = norm(pixel_colors).tolist()
# axis.scatter(r.flatten(), g.flatten(), b.flatten(), facecolors=pixel_colors, marker=".")
# axis.set_xlabel("Red")
# axis.set_ylabel("Green")
# axis.set_zlabel("Blue")
# plt.show()
""" <<< Color plot """
# print(pink1_h)
# print(pink2_h)
# print(purple1_h)
# print(purple2_h)
# purple1_h = (240, 8, 98)
# purple3_h = (274, 100, 50)
# pink1_h = (322, 89, 78)
# pink2_h = (349, 24, 100)
patch_HSV = cv2.cvtColor(patch_RGB, cv2.COLOR_RGB2HSV)
"""
# hsv range
"""
# purple1 = np.uint8([[[204,153,255]]])
# purple2 = np.uint8([[[153,51,255]]])
# lower_bound =rgb_to_hsv(purple1)
# upper_bound =rgb_to_hsv(purple2)
# print("low = ", lower_bound)
# print("upp = ", upper_bound)
# lower_bound = (150,70,70)
# upper_bound = (200,255,255)
lower_bound = (40,70,70)
upper_bound = (180,255,255)
"""
# hsv range
"""
mask = cv2.inRange(patch_HSV, lower_bound, upper_bound)
# result = cv2.bitwise_and(patch_RGB,patch_RGB, mask=mask)
cnts, hierarchy = cv2.findContours(mask,
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# print(np.shape(cnts))
area = 0
# newCnts = list()
# for c in cnts:
# # if(cv2.contourArea(c) > 10000):
# area += cv2.contourArea(c)
# # print(np.shape(c))
# newCnts.append(c)
# cv2.drawContours(patch,[c],0,(0,255,0),cv2.FILLED)
# cv2.drawContours(patch_HSV,[c],0,(0,255,0),cv2.FILLED)
# cv2.drawContours(mask,[c],0,(128),cv2.FILLED)
# newCnts = np.asarray(newCnts)
# c = max(cnts, key = cv2.contourArea)
# cv2.drawContours(patch,[c],0,(0, 255, 0),3)
cv2.drawContours(patch,cnts,-1,(0, 255, 0),3)
cv2.imshow("origianl",patch)
cv2.waitKey()
# cv2.drawContours(mask,[c],0,255,-1)
pixelpoints = np.transpose(np.nonzero(mask))
print(np.shape(pixelpoints))
print("len pix points = ",len(pixelpoints))
(H,S,V) = (0,0,0)
overFlow = 0
for row in pixelpoints:
# print(overFlow)
H += patch_HSV[row[0],row[1],0]
S += patch_HSV[row[0],row[1],1]
V += patch_HSV[row[0],row[1],2]
H /= len(pixelpoints)
S /= len(pixelpoints)
V /= len(pixelpoints)
print("H,S,V = ",H,",",S,",",V)
lo_square = np.full((10, 10, 3), (H,S,V), dtype=np.uint8) / 255.0
# plt.subplot(1, 2, 2)
# plt.imshow(hsv_to_rgb(lo_square))
# plt.show()
cv2.imshow("patch in hsv",patch_HSV)
cv2.waitKey()
# print("contour points = ", cnts[1])
# print("---------------")
# print(np.shape(newCnts))
# print("---------------")
# Initialize empty list
lst_intensities = []
# For each list of contour points...
# for i in range(len(newCnts)):
# # Create a mask image that contains the contour filled in
# cimg = np.zeros_like(patch_HSV)
# cv2.drawContours(cimg, newCnts, i, color=255, thickness=-1)
# # Access the image pixels and create a 1D numpy array then add to list
# pts = np.where(cimg == 255)
# print(pts[:][:6])
# lst_intensities.append(patch_HSV[pts[0], pts[1]])
| [
"numpy.full",
"argparse.ArgumentParser",
"cv2.cvtColor",
"cv2.waitKey",
"numpy.nonzero",
"cv2.imread",
"numpy.shape",
"cv2.drawContours",
"cv2.imshow",
"cv2.inRange",
"cv2.findContours"
] | [((391, 416), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (414, 416), False, 'import argparse\n'), ((527, 547), 'cv2.imread', 'cv2.imread', (['dataPath'], {}), '(dataPath)\n', (537, 547), False, 'import cv2\n'), ((1708, 1750), 'cv2.cvtColor', 'cv2.cvtColor', (['patch_RGB', 'cv2.COLOR_RGB2HSV'], {}), '(patch_RGB, cv2.COLOR_RGB2HSV)\n', (1720, 1750), False, 'import cv2\n'), ((2127, 2175), 'cv2.inRange', 'cv2.inRange', (['patch_HSV', 'lower_bound', 'upper_bound'], {}), '(patch_HSV, lower_bound, upper_bound)\n', (2138, 2175), False, 'import cv2\n'), ((2255, 2321), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (2271, 2321), False, 'import cv2\n'), ((2814, 2863), 'cv2.drawContours', 'cv2.drawContours', (['patch', 'cnts', '(-1)', '(0, 255, 0)', '(3)'], {}), '(patch, cnts, -1, (0, 255, 0), 3)\n', (2830, 2863), False, 'import cv2\n'), ((2861, 2890), 'cv2.imshow', 'cv2.imshow', (['"""origianl"""', 'patch'], {}), "('origianl', patch)\n", (2871, 2890), False, 'import cv2\n'), ((2890, 2903), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (2901, 2903), False, 'import cv2\n'), ((3485, 3522), 'cv2.imshow', 'cv2.imshow', (['"""patch in hsv"""', 'patch_HSV'], {}), "('patch in hsv', patch_HSV)\n", (3495, 3522), False, 'import cv2\n'), ((3522, 3535), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (3533, 3535), False, 'import cv2\n'), ((2972, 2988), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (2982, 2988), True, 'import numpy as np\n'), ((2996, 3017), 'numpy.shape', 'np.shape', (['pixelpoints'], {}), '(pixelpoints)\n', (3004, 3017), True, 'import numpy as np\n'), ((3358, 3405), 'numpy.full', 'np.full', (['(10, 10, 3)', '(H, S, V)'], {'dtype': 'np.uint8'}), '((10, 10, 3), (H, S, V), dtype=np.uint8)\n', (3365, 3405), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Extracts raw values around a point.
There's some complexity in what should happen when the requested time is different from
the actual times available in the desired band: do we generate a pseudo-point or re-center
around a nearby one? For now we'll do the latter, and possibly throw away all data for
this band if it's simply too far away, but we should revisit this decision.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from justice import lightcurve
from justice.features import band_settings_params, pointwise_feature_extractor
class RawValueExtractor(pointwise_feature_extractor.PointwiseFeatureExtractor):
band_settings: band_settings_params.BandSettings
window_size: int
def __init__(self, window_size: int, band_settings, window_bias: float = 1e-8):
self.window_size = window_size
self.band_settings = band_settings
self.window_bias = window_bias
def _window_pad_left(self, array: np.ndarray, num_pad, axis=0):
assert axis == 0, "Other modes not implemented yet."
array = array[-self.window_size:]
return np.concatenate(
(np.zeros(shape=(num_pad, ) + array.shape[1:], dtype=array.dtype), array),
axis=axis
)
def _window_pad_right(self, array: np.ndarray, num_pad, axis=0):
assert axis == 0, "Other modes not implemented yet."
array = array[:self.window_size]
return np.concatenate(
(array, np.zeros(shape=(num_pad, ) + array.shape[1:], dtype=array.dtype)),
axis=axis
)
def _get_num_pad(self, array: np.ndarray, axis=0):
return max(0, self.window_size - array.shape[axis])
def _extract_per_band(self, band: lightcurve.BandData, time: float):
closest = band.closest_point(time)
before = band.before_time(closest.time, bias=self.window_bias)
before_pad = self._get_num_pad(before.time)
after = band.after_time(closest.time, bias=self.window_bias)
after_pad = self._get_num_pad(after.time)
return {
"before_time": self._window_pad_left(before.time, before_pad),
"before_flux": self._window_pad_left(before.flux, before_pad),
"before_padding": before_pad,
"after_time": self._window_pad_right(after.time, after_pad),
"after_flux": self._window_pad_right(after.flux, after_pad),
"after_padding": after_pad,
"requested_time": time,
"closest_time_in_band": closest.time,
"closest_flux_in_band": closest.flux,
"closest_time_diff": abs(closest.time - time),
}
def extract(self, lc: lightcurve._LC, time: float):
return self.band_settings.generate_per_band_features(
functools.partial(self._extract_per_band, time=time), lc
)
| [
"functools.partial",
"numpy.zeros"
] | [((2880, 2932), 'functools.partial', 'functools.partial', (['self._extract_per_band'], {'time': 'time'}), '(self._extract_per_band, time=time)\n', (2897, 2932), False, 'import functools\n'), ((1246, 1309), 'numpy.zeros', 'np.zeros', ([], {'shape': '((num_pad,) + array.shape[1:])', 'dtype': 'array.dtype'}), '(shape=(num_pad,) + array.shape[1:], dtype=array.dtype)\n', (1254, 1309), True, 'import numpy as np\n'), ((1575, 1638), 'numpy.zeros', 'np.zeros', ([], {'shape': '((num_pad,) + array.shape[1:])', 'dtype': 'array.dtype'}), '(shape=(num_pad,) + array.shape[1:], dtype=array.dtype)\n', (1583, 1638), True, 'import numpy as np\n')] |
import copy
import datetime
import logging
import os
import time
from functools import partial
from pathlib import Path
from typing import Any, Dict, Optional, Union
import numpy as np
from memory_profiler import memory_usage
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.bandstructure import BandStructure
from pymatgen.electronic_structure.core import Spin
from pymatgen.io.vasp import Vasprun
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.string import unicodeify, unicodeify_spacegroup
from tabulate import tabulate
from amset import __version__
from amset.constants import bohr_to_cm, ev_to_hartree, hbar, numeric_types
from amset.core.transport import solve_boltzman_transport_equation
from amset.electronic_structure.common import get_band_structure
from amset.interpolation.bandstructure import Interpolator
from amset.interpolation.projections import ProjectionOverlapCalculator
from amset.interpolation.wavefunction import WavefunctionOverlapCalculator
from amset.io import load_settings, write_settings
from amset.log import initialize_amset_logger, log_banner, log_list
from amset.scattering.calculate import ScatteringCalculator, basic_scatterers
from amset.util import tensor_average, validate_settings
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
logger = logging.getLogger(__name__)
_kpt_str = "[{k[0]:.2f}, {k[1]:.2f}, {k[2]:.2f}]"
class Runner(MSONable):
def __init__(
self,
band_structure: BandStructure,
num_electrons: int,
settings: Dict[str, Any],
):
self._band_structure = band_structure
self._num_electrons = num_electrons
# set materials and performance parameters
# if the user doesn't specify a value then use the default
self.settings = validate_settings(settings)
def run(
self,
directory: Union[str, Path] = ".",
return_usage_stats: bool = False,
prefix: Optional[str] = None,
):
mem_usage, (amset_data, usage_stats) = memory_usage(
partial(self._run_wrapper, directory=directory, prefix=prefix),
max_usage=True,
retval=True,
interval=0.1,
include_children=False,
multiprocess=True,
)
log_banner("END")
logger.info("Timing and memory usage:")
timing_info = [f"{n} time: {t:.4f} s" for n, t in usage_stats.items()]
log_list(timing_info + [f"max memory: {mem_usage:.1f} MB"])
this_date = datetime.datetime.now().strftime("%d %b %Y")
this_time = datetime.datetime.now().strftime("%H:%M")
logger.info(f"amset exiting on {this_date} at {this_time}")
if return_usage_stats:
usage_stats["max_memory"] = mem_usage
return amset_data, usage_stats
else:
return amset_data
def _run_wrapper(
self, directory: Union[str, Path] = ".", prefix: Optional[str] = None
):
if self.settings["print_log"] or self.settings["write_log"]:
if self.settings["write_log"]:
log_file = f"{prefix}_amset.log" if prefix else "amset.log"
else:
log_file = False
initialize_amset_logger(
directory=directory,
filename=log_file,
print_log=self.settings["print_log"],
)
self._check_wavefunction()
tt = time.perf_counter()
_log_amset_intro()
_log_settings(self)
_log_structure_information(
self._band_structure.structure, self.settings["symprec"]
)
_log_band_structure_information(self._band_structure)
amset_data, interpolation_time = self._do_interpolation()
timing = {"interpolation": interpolation_time}
amset_data, dos_time = self._do_dos(amset_data)
timing["dos"] = dos_time
amset_data, scattering_time = self._do_scattering(amset_data)
timing["scattering"] = scattering_time
if isinstance(self.settings["fd_tol"], numeric_types):
amset_data, timing = self._do_fd_tol(amset_data, directory, prefix, timing)
else:
amset_data, timing = self._do_many_fd_tol(
amset_data, self.settings["fd_tol"], directory, prefix, timing
)
timing["total"] = time.perf_counter() - tt
return amset_data, timing
def _do_fd_tol(self, amset_data, directory, prefix, timing):
amset_data.fill_rates_outside_cutoffs()
amset_data, transport_time = self._do_transport(amset_data)
timing["transport"] = transport_time
filepath, writing_time = self._do_writing(amset_data, directory, prefix)
timing["writing"] = writing_time
return amset_data, timing
def _do_many_fd_tol(self, amset_data, fd_tols, directory, prefix, timing):
prefix = "" if prefix is None else prefix + "_"
cutoff_pad = _get_cutoff_pad(
self.settings["pop_frequency"], self.settings["scattering_type"]
)
orig_rates = copy.deepcopy(amset_data.scattering_rates)
mobility_rates_only = self.settings["mobility_rates_only"]
for fd_tol in sorted(fd_tols)[::-1]:
# do smallest cutoff last, so the final amset_data is the best result
for spin in amset_data.spins:
amset_data.scattering_rates[spin][:] = orig_rates[spin][:]
amset_data.calculate_fd_cutoffs(
fd_tol, cutoff_pad=cutoff_pad, mobility_rates_only=mobility_rates_only
)
fd_prefix = prefix + f"fd-{fd_tol}"
_, timing = self._do_fd_tol(amset_data, directory, fd_prefix, timing)
timing[f"transport ({fd_tol})"] = timing.pop("transport")
timing[f"writing ({fd_tol})"] = timing.pop("writing")
return amset_data, timing
def _check_wavefunction(self):
if (
not Path(self.settings["wavefunction_coefficients"]).exists()
and not self.settings["use_projections"]
):
raise ValueError(
"Could not find wavefunction coefficients. To run AMSET, the \n"
"wavefunction coefficients should first be extracted from a WAVECAR \n"
"file using the 'amset wave' command. See the documentation for more \n"
"details: https://hackingmaterials.lbl.gov/amset/\n\n"
"Alternatively, to use the band structure orbital projections to \n"
"approximate overlap, set 'use_projections' option to true."
)
elif self.settings["use_projections"] and not self._band_structure.projections:
raise ValueError(
"use_projections is set to true but calculation does not contain\n"
"orbital projections. Ensure VASP was run with 'LORBIT = 11'\n"
"Alternatively, use wavefunction coefficients to calculate overlap.\n"
"Wavefunction coefficients can be extracted from a VASP WAVECAR\n"
"file using the 'amset wave' command. See the documentation for more\n"
"details: https://hackingmaterials.lbl.gov/amset/\n\n"
)
elif self.settings["use_projections"]:
logger.info(
"Using orbital projections to approximate wavefunction overlap. This "
"can result in inaccurate results. I hope you know what you are doing."
)
def _do_interpolation(self):
log_banner("INTERPOLATION")
t0 = time.perf_counter()
interpolater = Interpolator(
self._band_structure,
num_electrons=self._num_electrons,
interpolation_factor=self.settings["interpolation_factor"],
soc=self.settings["soc"],
)
amset_data = interpolater.get_amset_data(
energy_cutoff=self.settings["energy_cutoff"],
scissor=self.settings["scissor"],
bandgap=self.settings["bandgap"],
symprec=self.settings["symprec"],
nworkers=self.settings["nworkers"],
)
if set(self.settings["scattering_type"]).issubset(set(basic_scatterers)):
overlap_calculator = None
elif self.settings["use_projections"]:
overlap_calculator = ProjectionOverlapCalculator.from_band_structure(
self._band_structure,
energy_cutoff=self.settings["energy_cutoff"],
symprec=self.settings["symprec"],
)
else:
overlap_calculator = WavefunctionOverlapCalculator.from_file(
self.settings["wavefunction_coefficients"]
)
amset_data.set_overlap_calculator(overlap_calculator)
return amset_data, time.perf_counter() - t0
def _do_dos(self, amset_data):
log_banner("DOS")
t0 = time.perf_counter()
amset_data.calculate_dos(
estep=self.settings["dos_estep"], progress_bar=self.settings["print_log"]
)
amset_data.set_doping_and_temperatures(
self.settings["doping"], self.settings["temperatures"]
)
cutoff_pad = _get_cutoff_pad(
self.settings["pop_frequency"], self.settings["scattering_type"]
)
if isinstance(self.settings["fd_tol"], numeric_types):
fd_tol = self.settings["fd_tol"]
else:
fd_tol = min(self.settings["fd_tol"])
mob_only = self.settings["mobility_rates_only"]
amset_data.calculate_fd_cutoffs(
fd_tol, cutoff_pad=cutoff_pad, mobility_rates_only=mob_only
)
return amset_data, time.perf_counter() - t0
def _do_scattering(self, amset_data):
log_banner("SCATTERING")
t0 = time.perf_counter()
cutoff_pad = _get_cutoff_pad(
self.settings["pop_frequency"], self.settings["scattering_type"]
)
scatter = ScatteringCalculator(
self.settings,
amset_data,
cutoff_pad,
scattering_type=self.settings["scattering_type"],
progress_bar=self.settings["print_log"],
cache_wavefunction=self.settings["cache_wavefunction"],
nworkers=self.settings["nworkers"],
)
amset_data.set_scattering_rates(
scatter.calculate_scattering_rates(), scatter.scatterer_labels
)
return amset_data, time.perf_counter() - t0
def _do_transport(self, amset_data):
log_banner("TRANSPORT")
t0 = time.perf_counter()
transport_properties = solve_boltzman_transport_equation(
amset_data,
separate_mobility=self.settings["separate_mobility"],
calculate_mobility=self.settings["calculate_mobility"],
progress_bar=self.settings["print_log"],
)
amset_data.set_transport_properties(*transport_properties)
return amset_data, time.perf_counter() - t0
def _do_writing(self, amset_data, directory, prefix):
log_banner("RESULTS")
_log_results_summary(amset_data, self.settings)
abs_dir = os.path.abspath(directory)
t0 = time.perf_counter()
if not os.path.exists(abs_dir):
os.makedirs(abs_dir)
if self.settings["write_input"]:
self.write_settings(abs_dir)
if self.settings["file_format"] is None:
full_filename = None
else:
filename = amset_data.to_file(
directory=abs_dir,
write_mesh_file=self.settings["write_mesh"],
prefix=prefix,
file_format=self.settings["file_format"],
)
if isinstance(filename, tuple):
full_filename = "\nand\n".join(
[str(Path(abs_dir) / f) for f in filename]
)
else:
full_filename = Path(abs_dir) / filename
logger.info(f"Results written to:\n{full_filename}")
return full_filename, time.perf_counter() - t0
@staticmethod
def from_vasprun(
vasprun: Union[str, Path, Vasprun], settings: Dict[str, Any]
) -> "Runner":
"""Initialise an AmsetRunner from a Vasprun.
The nelect and soc options will be determined from the Vasprun
automatically.
Args:
vasprun: Path to a vasprun or a Vasprun pymatgen object.
settings: AMSET settings.
Returns:
A Runner object that can be used to calculate transport properties.
"""
if not isinstance(vasprun, Vasprun):
vasprun = Vasprun(vasprun, parse_projected_eigen=True)
zwk_option = settings.get("zero_weighted_kpoints", "keep")
band_structure = get_band_structure(vasprun, zero_weighted=zwk_option)
nelect = vasprun.parameters["NELECT"]
settings["soc"] = vasprun.parameters["LSORBIT"]
return Runner(band_structure, nelect, settings)
@staticmethod
def from_directory(
directory: Union[str, Path] = ".",
input_file: Optional[Union[str, Path]] = None,
settings_file: Optional[Union[str, Path]] = None,
settings_override: Optional[Dict[str, Any]] = None,
):
"""
Initialize amset Runner from a directory.
If input_file or settings_file are not specified, the code will look in the
specified directory for these files.
Args:
directory: A directory.
input_file: An input file path, can either be vasprun.xml(.gz) or a
band_structure_data.json(.gz) file containing the keys:
- "nelect" (int): The number of electrons in the system.
- "band_structure (BandStructure)": A pymatgen band structure object.
settings_file: Path to settings file.
settings_override: Settings that will be used to override the settings
in the settings file.
Returns:
A Runner instance that can be used to run amset.
"""
directory = Path(directory)
if not settings_file:
settings_file = directory / "settings.yaml"
settings = load_settings(settings_file)
if settings_override:
settings.update(settings_override)
run_type, input_file = _get_run_type(directory, input_file)
if run_type == "vasprun":
return Runner.from_vasprun(input_file, settings)
elif run_type == "band_structure":
data = loadfn(input_file)
nelect = data["nelect"]
band_structure = data["band_structure"]
return Runner(band_structure, nelect, settings)
else:
raise ValueError(f"Unrecognised run type: {run_type}")
def write_settings(self, directory: str = ".", prefix: Optional[str] = None):
prefix = "" if prefix is None else f"{prefix}_"
filename = Path(directory) / f"{prefix}amset_settings.yaml"
write_settings(self.settings, filename)
def _log_amset_intro():
now = datetime.datetime.now()
logger.info(
"""
█████╗ ███╗ ███╗███████╗███████╗████████╗
██╔══██╗████╗ ████║██╔════╝██╔════╝╚══██╔══╝
███████║██╔████╔██║███████╗█████╗ ██║
██╔══██║██║╚██╔╝██║╚════██║██╔══╝ ██║
██║ ██║██║ ╚═╝ ██║███████║███████╗ ██║
╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝╚══════╝ ╚═╝
v{}
<NAME>., <NAME>., <NAME>., Woods-Robinson,
R., <NAME>., <NAME>. Efficient calculation of
carrier scattering rates from first principles.
Nat. Commun. 12, 2222 (2021)
amset starting on {} at {}""".format(
__version__, now.strftime("%d %b %Y"), now.strftime("%H:%M")
)
)
def _log_structure_information(structure: Structure, symprec):
log_banner("STRUCTURE")
logger.info("Structure information:")
comp = structure.composition
lattice = structure.lattice
formula = comp.get_reduced_formula_and_factor(iupac_ordering=True)[0]
if not symprec:
symprec = 1e-32
sga = SpacegroupAnalyzer(structure, symprec=symprec)
spg = unicodeify_spacegroup(sga.get_space_group_symbol())
comp_info = [
f"formula: {unicodeify(formula)}",
f"# sites: {structure.num_sites}",
f"space group: {spg}",
]
log_list(comp_info)
logger.info("Lattice:")
lattice_info = [
"a, b, c [Å]: {:.2f}, {:.2f}, {:.2f}".format(*lattice.abc),
"α, β, γ [°]: {:.0f}, {:.0f}, {:.0f}".format(*lattice.angles),
]
log_list(lattice_info)
_tensor_str = """
│ [[{:6.2f} {:6.2f} {:6.2f}]
│ [{:6.2f} {:6.2f} {:6.2f}]
│ [{:6.2f} {:6.2f} {:6.2f}]]"""
_elastic_tensor_str = """
│ [[{:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f}]
│ [{:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f}]
│ [{:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f}]
│ [{:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f}]
│ [{:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f}]
│ [{:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f} {:6.1f}]]"""
_piezo_tensor_str = """
│ [[{:7.4f} {:7.4f} {:7.4f} {:7.4f} {:7.4f} {:7.4f}]
│ [{:7.4f} {:7.4f} {:7.4f} {:7.4f} {:7.4f} {:7.4f}]
│ [{:7.4f} {:7.4f} {:7.4f} {:7.4f} {:7.4f} {:7.4f}]]"""
def _log_settings(runner: Runner):
from pymatgen.core.tensors import Tensor
def ff(prop):
# format tensor properties
if isinstance(prop, np.ndarray):
if prop.shape == (3, 3):
return _tensor_str.format(*prop.ravel())
elif prop.shape == (3, 3, 3):
return _piezo_tensor_str.format(*Tensor(prop).voigt.ravel())
elif prop.shape == (3, 3, 3, 3):
return _elastic_tensor_str.format(*Tensor(prop).voigt.ravel())
return prop
log_banner("SETTINGS")
logger.info("Run parameters:")
p = [f"{k}: {ff(v)}" for k, v in runner.settings.items() if v is not None]
log_list(p)
def _log_band_structure_information(band_structure: BandStructure):
log_banner("BAND STRUCTURE")
info = [
f"# bands: {band_structure.nb_bands}",
f"# k-points: {len(band_structure.kpoints)}",
f"Fermi level: {band_structure.efermi:.3f} eV",
f"spin polarized: {band_structure.is_spin_polarized}",
f"metallic: {band_structure.is_metal()}",
]
logger.info("Input band structure information:")
log_list(info)
if band_structure.is_metal():
return
logger.info("Band gap:")
band_gap_info = []
bg_data = band_structure.get_band_gap()
if not bg_data["direct"]:
band_gap_info.append("indirect band gap: {:.3f} eV".format(bg_data["energy"]))
direct_data = band_structure.get_direct_band_gap_dict()
direct_bg = min(spin_data["value"] for spin_data in direct_data.values())
band_gap_info.append(f"direct band gap: {direct_bg:.3f} eV")
direct_kpoint = []
for spin, spin_data in direct_data.items():
direct_kindex = spin_data["kpoint_index"]
kpt_str = _kpt_str.format(k=band_structure.kpoints[direct_kindex].frac_coords)
direct_kpoint.append(kpt_str)
band_gap_info.append("direct k-point: {}".format(", ".join(direct_kpoint)))
log_list(band_gap_info)
vbm_data = band_structure.get_vbm()
cbm_data = band_structure.get_cbm()
logger.info("Valence band maximum:")
_log_band_edge_information(band_structure, vbm_data)
logger.info("Conduction band minimum:")
_log_band_edge_information(band_structure, cbm_data)
def _log_band_edge_information(band_structure, edge_data):
"""Log data about the valence band maximum or conduction band minimum.
Args:
band_structure: A band structure.
edge_data (dict): The :obj:`dict` from ``bs.get_vbm()`` or
``bs.get_cbm()``
"""
if band_structure.is_spin_polarized:
spins = edge_data["band_index"].keys()
b_indices = [
", ".join([str(i + 1) for i in edge_data["band_index"][spin]])
+ f"({spin.name.capitalize()})"
for spin in spins
]
b_indices = ", ".join(b_indices)
else:
b_indices = ", ".join([str(i + 1) for i in edge_data["band_index"][Spin.up]])
kpoint = edge_data["kpoint"]
kpoint_str = _kpt_str.format(k=kpoint.frac_coords)
info = [
"energy: {:.3f} eV".format(edge_data["energy"]),
f"k-point: {kpoint_str}",
f"band indices: {b_indices}",
]
log_list(info)
def _log_results_summary(amset_data, output_parameters):
results_summary = []
doping = [d * (1 / bohr_to_cm) ** 3 for d in amset_data.doping]
temps = amset_data.temperatures
if output_parameters["calculate_mobility"] and not amset_data.is_metal:
logger.info(
"Average conductivity (σ), Seebeck (S) and mobility (μ)" " results:"
)
headers = ("conc [cm⁻³]", "temp [K]", "σ [S/m]", "S [µV/K]", "μ [cm²/Vs]")
for c, t in np.ndindex(amset_data.fermi_levels.shape):
results = (
doping[c],
temps[t],
tensor_average(amset_data.conductivity[c, t]),
tensor_average(amset_data.seebeck[c, t]),
tensor_average(amset_data.mobility["overall"][c, t]),
)
results_summary.append(results)
else:
logger.info("Average conductivity (σ) and Seebeck (S) results:")
headers = ("conc [cm⁻³]", "temp [K]", "σ [S/m]", "S [µV/K]")
for c, t in np.ndindex(amset_data.fermi_levels.shape):
results = (
doping[c],
temps[t],
tensor_average(amset_data.conductivity[c, t]),
tensor_average(amset_data.seebeck[c, t]),
)
results_summary.append(results)
table = tabulate(
results_summary,
headers=headers,
numalign="right",
stralign="center",
floatfmt=(".2e", ".1f", ".2e", ".2e", ".1f"),
)
logger.info(table)
if output_parameters["separate_mobility"] and not amset_data.is_metal:
labels = amset_data.scattering_labels
logger.info("Mobility breakdown by scattering mechanism, in cm²/Vs:")
headers = ["conc [cm⁻³]", "temp [K]"] + labels
results_summary = []
for c, t in np.ndindex(amset_data.fermi_levels.shape):
results = [doping[c], temps[t]]
results += [tensor_average(amset_data.mobility[s][c, t]) for s in labels]
results_summary.append(results)
table = tabulate(
results_summary,
headers=headers,
numalign="right",
stralign="center",
floatfmt=[".2e", ".1f"] + [".2e"] * len(labels),
)
logger.info(table)
def _get_cutoff_pad(pop_frequency, scattering_type):
cutoff_pad = 0
if pop_frequency and ("POP" in scattering_type or scattering_type == "auto"):
# convert from THz to angular frequency in Hz
pop_frequency = pop_frequency * 1e12 * 2 * np.pi
# use the phonon energy to pad the fermi dirac cutoffs, this is because
# pop scattering from a kpoints, k, to kpoints with energies above and below
# k. We therefore need k-points above and below to be within the cut-offs
# otherwise scattering cannot occur
cutoff_pad = pop_frequency * hbar * ev_to_hartree
return cutoff_pad
def _get_run_type(directory: Path, input_file: Optional[str]):
if input_file is None:
vr_files = list(directory.glob("*vasprun*"))
bs_files = list(directory.glob("*band_structure_data*"))
if len(vr_files) > 0:
input_file = vr_files[0]
elif len(bs_files) > 0:
input_file = bs_files[0]
else:
raise ValueError(
"Could not find vasprun.xml, vasprun.xml.gz or band_structure_data.json"
f" in {directory}"
)
input_file = str(input_file)
if "xml" in input_file:
run_type = "vasprun"
elif "json" in input_file:
run_type = "band_structure"
else:
raise ValueError(f"Could not determine input file type: {input_file}")
return run_type, input_file
| [
"amset.interpolation.wavefunction.WavefunctionOverlapCalculator.from_file",
"amset.log.initialize_amset_logger",
"monty.serialization.loadfn",
"pathlib.Path",
"amset.io.write_settings",
"amset.util.validate_settings",
"pymatgen.util.string.unicodeify",
"amset.core.transport.solve_boltzman_transport_eq... | [((1437, 1464), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1454, 1464), False, 'import logging\n'), ((15232, 15255), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15253, 15255), False, 'import datetime\n'), ((16063, 16086), 'amset.log.log_banner', 'log_banner', (['"""STRUCTURE"""'], {}), "('STRUCTURE')\n", (16073, 16086), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((16325, 16371), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['structure'], {'symprec': 'symprec'}), '(structure, symprec=symprec)\n', (16343, 16371), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((16580, 16599), 'amset.log.log_list', 'log_list', (['comp_info'], {}), '(comp_info)\n', (16588, 16599), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((16799, 16821), 'amset.log.log_list', 'log_list', (['lattice_info'], {}), '(lattice_info)\n', (16807, 16821), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((18082, 18104), 'amset.log.log_banner', 'log_banner', (['"""SETTINGS"""'], {}), "('SETTINGS')\n", (18092, 18104), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((18223, 18234), 'amset.log.log_list', 'log_list', (['p'], {}), '(p)\n', (18231, 18234), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((18309, 18337), 'amset.log.log_banner', 'log_banner', (['"""BAND STRUCTURE"""'], {}), "('BAND STRUCTURE')\n", (18319, 18337), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((18685, 18699), 'amset.log.log_list', 'log_list', (['info'], {}), '(info)\n', (18693, 18699), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((19501, 19524), 'amset.log.log_list', 'log_list', (['band_gap_info'], {}), '(band_gap_info)\n', (19509, 19524), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((20748, 20762), 'amset.log.log_list', 'log_list', (['info'], {}), '(info)\n', (20756, 20762), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((22098, 22228), 'tabulate.tabulate', 'tabulate', (['results_summary'], {'headers': 'headers', 'numalign': '"""right"""', 'stralign': '"""center"""', 'floatfmt': "('.2e', '.1f', '.2e', '.2e', '.1f')"}), "(results_summary, headers=headers, numalign='right', stralign=\n 'center', floatfmt=('.2e', '.1f', '.2e', '.2e', '.1f'))\n", (22106, 22228), False, 'from tabulate import tabulate\n'), ((1914, 1941), 'amset.util.validate_settings', 'validate_settings', (['settings'], {}), '(settings)\n', (1931, 1941), False, 'from amset.util import tensor_average, validate_settings\n'), ((2401, 2418), 'amset.log.log_banner', 'log_banner', (['"""END"""'], {}), "('END')\n", (2411, 2418), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((2555, 2614), 'amset.log.log_list', 'log_list', (["(timing_info + [f'max memory: {mem_usage:.1f} MB'])"], {}), "(timing_info + [f'max memory: {mem_usage:.1f} MB'])\n", (2563, 2614), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((3555, 3574), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3572, 3574), False, 'import time\n'), ((5204, 5246), 'copy.deepcopy', 'copy.deepcopy', (['amset_data.scattering_rates'], {}), '(amset_data.scattering_rates)\n', (5217, 5246), False, 'import copy\n'), ((7657, 7684), 'amset.log.log_banner', 'log_banner', (['"""INTERPOLATION"""'], {}), "('INTERPOLATION')\n", (7667, 7684), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((7698, 7717), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7715, 7717), False, 'import time\n'), ((7742, 7906), 'amset.interpolation.bandstructure.Interpolator', 'Interpolator', (['self._band_structure'], {'num_electrons': 'self._num_electrons', 'interpolation_factor': "self.settings['interpolation_factor']", 'soc': "self.settings['soc']"}), "(self._band_structure, num_electrons=self._num_electrons,\n interpolation_factor=self.settings['interpolation_factor'], soc=self.\n settings['soc'])\n", (7754, 7906), False, 'from amset.interpolation.bandstructure import Interpolator\n'), ((8996, 9013), 'amset.log.log_banner', 'log_banner', (['"""DOS"""'], {}), "('DOS')\n", (9006, 9013), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((9027, 9046), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9044, 9046), False, 'import time\n'), ((9885, 9909), 'amset.log.log_banner', 'log_banner', (['"""SCATTERING"""'], {}), "('SCATTERING')\n", (9895, 9909), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((9923, 9942), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9940, 9942), False, 'import time\n'), ((10088, 10344), 'amset.scattering.calculate.ScatteringCalculator', 'ScatteringCalculator', (['self.settings', 'amset_data', 'cutoff_pad'], {'scattering_type': "self.settings['scattering_type']", 'progress_bar': "self.settings['print_log']", 'cache_wavefunction': "self.settings['cache_wavefunction']", 'nworkers': "self.settings['nworkers']"}), "(self.settings, amset_data, cutoff_pad, scattering_type\n =self.settings['scattering_type'], progress_bar=self.settings[\n 'print_log'], cache_wavefunction=self.settings['cache_wavefunction'],\n nworkers=self.settings['nworkers'])\n", (10108, 10344), False, 'from amset.scattering.calculate import ScatteringCalculator, basic_scatterers\n'), ((10655, 10678), 'amset.log.log_banner', 'log_banner', (['"""TRANSPORT"""'], {}), "('TRANSPORT')\n", (10665, 10678), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((10692, 10711), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (10709, 10711), False, 'import time\n'), ((10743, 10949), 'amset.core.transport.solve_boltzman_transport_equation', 'solve_boltzman_transport_equation', (['amset_data'], {'separate_mobility': "self.settings['separate_mobility']", 'calculate_mobility': "self.settings['calculate_mobility']", 'progress_bar': "self.settings['print_log']"}), "(amset_data, separate_mobility=self.\n settings['separate_mobility'], calculate_mobility=self.settings[\n 'calculate_mobility'], progress_bar=self.settings['print_log'])\n", (10776, 10949), False, 'from amset.core.transport import solve_boltzman_transport_equation\n'), ((11185, 11206), 'amset.log.log_banner', 'log_banner', (['"""RESULTS"""'], {}), "('RESULTS')\n", (11195, 11206), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((11282, 11308), 'os.path.abspath', 'os.path.abspath', (['directory'], {}), '(directory)\n', (11297, 11308), False, 'import os\n'), ((11322, 11341), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11339, 11341), False, 'import time\n'), ((12922, 12975), 'amset.electronic_structure.common.get_band_structure', 'get_band_structure', (['vasprun'], {'zero_weighted': 'zwk_option'}), '(vasprun, zero_weighted=zwk_option)\n', (12940, 12975), False, 'from amset.electronic_structure.common import get_band_structure\n'), ((14238, 14253), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (14242, 14253), False, 'from pathlib import Path\n'), ((14360, 14388), 'amset.io.load_settings', 'load_settings', (['settings_file'], {}), '(settings_file)\n', (14373, 14388), False, 'from amset.io import load_settings, write_settings\n'), ((15156, 15195), 'amset.io.write_settings', 'write_settings', (['self.settings', 'filename'], {}), '(self.settings, filename)\n', (15170, 15195), False, 'from amset.io import load_settings, write_settings\n'), ((21244, 21285), 'numpy.ndindex', 'np.ndindex', (['amset_data.fermi_levels.shape'], {}), '(amset_data.fermi_levels.shape)\n', (21254, 21285), True, 'import numpy as np\n'), ((21786, 21827), 'numpy.ndindex', 'np.ndindex', (['amset_data.fermi_levels.shape'], {}), '(amset_data.fermi_levels.shape)\n', (21796, 21827), True, 'import numpy as np\n'), ((22599, 22640), 'numpy.ndindex', 'np.ndindex', (['amset_data.fermi_levels.shape'], {}), '(amset_data.fermi_levels.shape)\n', (22609, 22640), True, 'import numpy as np\n'), ((2173, 2235), 'functools.partial', 'partial', (['self._run_wrapper'], {'directory': 'directory', 'prefix': 'prefix'}), '(self._run_wrapper, directory=directory, prefix=prefix)\n', (2180, 2235), False, 'from functools import partial\n'), ((3340, 3446), 'amset.log.initialize_amset_logger', 'initialize_amset_logger', ([], {'directory': 'directory', 'filename': 'log_file', 'print_log': "self.settings['print_log']"}), "(directory=directory, filename=log_file, print_log=\n self.settings['print_log'])\n", (3363, 3446), False, 'from amset.log import initialize_amset_logger, log_banner, log_list\n'), ((4478, 4497), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4495, 4497), False, 'import time\n'), ((11358, 11381), 'os.path.exists', 'os.path.exists', (['abs_dir'], {}), '(abs_dir)\n', (11372, 11381), False, 'import os\n'), ((11395, 11415), 'os.makedirs', 'os.makedirs', (['abs_dir'], {}), '(abs_dir)\n', (11406, 11415), False, 'import os\n'), ((12784, 12828), 'pymatgen.io.vasp.Vasprun', 'Vasprun', (['vasprun'], {'parse_projected_eigen': '(True)'}), '(vasprun, parse_projected_eigen=True)\n', (12791, 12828), False, 'from pymatgen.io.vasp import Vasprun\n'), ((15099, 15114), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (15103, 15114), False, 'from pathlib import Path\n'), ((2636, 2659), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2657, 2659), False, 'import datetime\n'), ((2701, 2724), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2722, 2724), False, 'import datetime\n'), ((8463, 8621), 'amset.interpolation.projections.ProjectionOverlapCalculator.from_band_structure', 'ProjectionOverlapCalculator.from_band_structure', (['self._band_structure'], {'energy_cutoff': "self.settings['energy_cutoff']", 'symprec': "self.settings['symprec']"}), "(self._band_structure,\n energy_cutoff=self.settings['energy_cutoff'], symprec=self.settings[\n 'symprec'])\n", (8510, 8621), False, 'from amset.interpolation.projections import ProjectionOverlapCalculator\n'), ((8723, 8811), 'amset.interpolation.wavefunction.WavefunctionOverlapCalculator.from_file', 'WavefunctionOverlapCalculator.from_file', (["self.settings['wavefunction_coefficients']"], {}), "(self.settings[\n 'wavefunction_coefficients'])\n", (8762, 8811), False, 'from amset.interpolation.wavefunction import WavefunctionOverlapCalculator\n'), ((8927, 8946), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8944, 8946), False, 'import time\n'), ((9809, 9828), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9826, 9828), False, 'import time\n'), ((10580, 10599), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (10597, 10599), False, 'import time\n'), ((11093, 11112), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11110, 11112), False, 'import time\n'), ((12183, 12202), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (12200, 12202), False, 'import time\n'), ((14693, 14711), 'monty.serialization.loadfn', 'loadfn', (['input_file'], {}), '(input_file)\n', (14699, 14711), False, 'from monty.serialization import loadfn\n'), ((16473, 16492), 'pymatgen.util.string.unicodeify', 'unicodeify', (['formula'], {}), '(formula)\n', (16483, 16492), False, 'from pymatgen.util.string import unicodeify, unicodeify_spacegroup\n'), ((21380, 21425), 'amset.util.tensor_average', 'tensor_average', (['amset_data.conductivity[c, t]'], {}), '(amset_data.conductivity[c, t])\n', (21394, 21425), False, 'from amset.util import tensor_average, validate_settings\n'), ((21443, 21483), 'amset.util.tensor_average', 'tensor_average', (['amset_data.seebeck[c, t]'], {}), '(amset_data.seebeck[c, t])\n', (21457, 21483), False, 'from amset.util import tensor_average, validate_settings\n'), ((21501, 21553), 'amset.util.tensor_average', 'tensor_average', (["amset_data.mobility['overall'][c, t]"], {}), "(amset_data.mobility['overall'][c, t])\n", (21515, 21553), False, 'from amset.util import tensor_average, validate_settings\n'), ((21922, 21967), 'amset.util.tensor_average', 'tensor_average', (['amset_data.conductivity[c, t]'], {}), '(amset_data.conductivity[c, t])\n', (21936, 21967), False, 'from amset.util import tensor_average, validate_settings\n'), ((21985, 22025), 'amset.util.tensor_average', 'tensor_average', (['amset_data.seebeck[c, t]'], {}), '(amset_data.seebeck[c, t])\n', (21999, 22025), False, 'from amset.util import tensor_average, validate_settings\n'), ((22710, 22754), 'amset.util.tensor_average', 'tensor_average', (['amset_data.mobility[s][c, t]'], {}), '(amset_data.mobility[s][c, t])\n', (22724, 22754), False, 'from amset.util import tensor_average, validate_settings\n'), ((12062, 12075), 'pathlib.Path', 'Path', (['abs_dir'], {}), '(abs_dir)\n', (12066, 12075), False, 'from pathlib import Path\n'), ((6072, 6120), 'pathlib.Path', 'Path', (["self.settings['wavefunction_coefficients']"], {}), "(self.settings['wavefunction_coefficients'])\n", (6076, 6120), False, 'from pathlib import Path\n'), ((11956, 11969), 'pathlib.Path', 'Path', (['abs_dir'], {}), '(abs_dir)\n', (11960, 11969), False, 'from pathlib import Path\n'), ((17904, 17916), 'pymatgen.core.tensors.Tensor', 'Tensor', (['prop'], {}), '(prop)\n', (17910, 17916), False, 'from pymatgen.core.tensors import Tensor\n'), ((18028, 18040), 'pymatgen.core.tensors.Tensor', 'Tensor', (['prop'], {}), '(prop)\n', (18034, 18040), False, 'from pymatgen.core.tensors import Tensor\n')] |
import pathlib
import matplotlib.pyplot as plt
import numpy as np
from quantile_dotplot import ntile_dotplot
import matplotlib
if __name__ == "__main__":
here = pathlib.Path(__file__).resolve().parent
fig, ax = plt.subplots(figsize=(10, 7))
data = np.random.lognormal(mean=np.log(11.4), sigma=0.2, size=1_000_000)
ax = ntile_dotplot(data, dots=20, edgecolor="k", linewidth=2, ax=ax)
ax.set_xlabel("Minutes to bus")
for spine in ("left", "right", "top"):
ax.spines[spine].set_visible(False)
ax.yaxis.set_visible(False)
fig.savefig(here / "figures" / "bus_times.png")
| [
"pathlib.Path",
"matplotlib.pyplot.subplots",
"numpy.log",
"quantile_dotplot.ntile_dotplot"
] | [((224, 253), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (236, 253), True, 'import matplotlib.pyplot as plt\n'), ((341, 404), 'quantile_dotplot.ntile_dotplot', 'ntile_dotplot', (['data'], {'dots': '(20)', 'edgecolor': '"""k"""', 'linewidth': '(2)', 'ax': 'ax'}), "(data, dots=20, edgecolor='k', linewidth=2, ax=ax)\n", (354, 404), False, 'from quantile_dotplot import ntile_dotplot\n'), ((290, 302), 'numpy.log', 'np.log', (['(11.4)'], {}), '(11.4)\n', (296, 302), True, 'import numpy as np\n'), ((169, 191), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (181, 191), False, 'import pathlib\n')] |
import os
import re
import sys
from operator import itemgetter
from os import path
import numpy as np
def process_latencies_file(file_path):
pattern = re.compile(r'\[latency: (-?\d+) ms\]')
with open(file_path, 'r') as f:
return [int(re.search(pattern, line).group(1)) for line in f.readlines()]
def get_flink_latencies(result_path):
# Parse out.txt
out_file = path.join(result_path, 'out.txt')
latencies = process_latencies_file(out_file)
# Parse trans.txt if it is a file
trans_file = path.join(result_path, 'trans.txt')
if path.isfile(trans_file):
latencies += process_latencies_file(trans_file)
# Parse trans.txt if it is a dir
if path.isdir(trans_file):
for ls in (process_latencies_file(path.join(trans_file, file))
for file in os.listdir(trans_file)
if path.isfile(path.join(trans_file, file))):
latencies += ls
p10 = np.percentile(latencies, 10)
p50 = np.percentile(latencies, 50)
p90 = np.percentile(latencies, 90)
return p10, p50, p90
def get_flink_throughput(result_path):
stats_file = path.join(result_path, 'stats.txt')
throughput_pattern = re.compile(r'Mean throughput \(events/ms\): ([0-9.]+)')
with open(stats_file, 'r') as f:
for line in f:
match = re.match(throughput_pattern, line)
if match:
return float(match.group(1))
# Old code for processing Erlang latencies from lib.py
# TODO: Make it a bit cleaner
def parse_vb_producer_line(line):
timestamp = line.split("}")[-2].split(',')[-1]
message = line[2:].split("}")[0] + '}'
return message, int(timestamp)
def parse_vb_sink_line(line):
timestamp = line.split("}")[-2].split(',')[-1]
message = line[2:].split("}")[0].split('{')[-1]
return '{' + message + '}', int(timestamp)
def parse_stream_table_join_producer_line(line):
timestamp = line.split(',')[-1].rstrip('\n}')
message = line.split('}}')[0].lstrip('{')
return '{{' + message + '}}', int(timestamp)
def parse_stream_table_join_sink_line(line):
timestamp = line.split(',')[-1].rstrip('\n}')
message = line.split('}}')[0].lstrip('{')
return '{{' + message + '}}', int(timestamp)
def parse_full_vb_producer_line(line):
timestamp = line.split("}")[-2].split(',')[-1]
if(line[4] == 'a'):
message = "}".join(line[2:].split("}")[0:2]) + "}"
else:
message = line[2:].split("}")[0] + '}'
# print(line)
# print(message, int(timestamp))
# exit(1)
return message, int(timestamp)
def parse_full_vb_sink_line(line):
timestamp = line.split("}")[-2].split(',')[-1]
if(line[4] == 'a'):
message = "}".join(line[2:].split("}")[0:2]) + "}"
else:
message = '{' + line[2:].split("}")[0].split('{')[-1] + '}'
# print(line)
# print(message, int(timestamp))
# exit(1)
return message, int(timestamp)
## Here we need to register all different producer-sink line-parsing
## functions for different experiments
def parse_producer_line(line, experiment):
if(experiment == "value-barrier"):
return parse_vb_producer_line(line)
elif(experiment == "stream-table-join"):
return parse_stream_table_join_producer_line(line)
elif(experiment == "full-value-barrier"):
return parse_full_vb_producer_line(line)
else:
print("Error: Don't know how to parse producer lines for {} experiment!".format(experiment))
exit(1)
def parse_sink_line(line, experiment):
if(experiment == "value-barrier"):
return parse_vb_sink_line(line)
elif(experiment == "stream-table-join"):
return parse_stream_table_join_sink_line(line)
elif(experiment == "full-value-barrier"):
return parse_full_vb_sink_line(line)
else:
print("Error: Don't know how to parse sink lines for {} experiment!".format(experiment))
exit(1)
def read_preprocess_latency_data(log_dir_name, experiment="value-barrier"):
log_file_names = os.listdir(log_dir_name)
producer_file_names = [path.join(log_dir_name, filename)
for filename in log_file_names
if filename.startswith('producer_<')]
sink_file_names = [path.join(log_dir_name, filename)
for filename in log_file_names
if filename.startswith('sink_<')]
producer_dic = {}
for producer_file_name in producer_file_names:
with open(producer_file_name) as file:
producer_dic.update(parse_producer_line(line, experiment) for line in file.readlines())
# print(producer_dic)
sink_dic = {}
for sink_file_name in sink_file_names:
with open(sink_file_name) as file:
sink_dic.update(parse_sink_line(line, experiment) for line in file.readlines())
# print(sink_dic)
if(experiment == "full-value-barrier"):
unsorted_latency_pairs = [(sink_dic[msg] - producer_dic[msg], sink_dic[msg])
for msg in sink_dic.keys() if msg in producer_dic]
not_found_keys = [msg for msg in sink_dic.keys() if not msg in producer_dic]
if(len(not_found_keys) > 0):
print(" !! {} keys not found:".format(len(not_found_keys)))
else:
unsorted_latency_pairs = [(sink_dic[msg] - producer_dic[msg], sink_dic[msg])
for msg in sink_dic.keys()]
# print(sink_dic)
# print(producer_dic)
latency_pairs = sorted(unsorted_latency_pairs, key=itemgetter(1))
## Latencies and timestamps are per millisecond
raw_timestamps = [ts for lat, ts in latency_pairs]
if not raw_timestamps:
print(f'{log_dir_name}: No raw timestamps!')
return [0], [0]
first_ts = raw_timestamps[0]
timestamps = [(ts - first_ts) / 1_000_000.0 for ts in raw_timestamps]
latencies = [lat / 1_000_000.0 for lat, ts in latency_pairs]
return timestamps, latencies
def get_erlang_latencies(result_path, experiment='value-barrier'):
ts, latencies = read_preprocess_latency_data(result_path, experiment)
p10 = np.percentile(latencies, 10)
p50 = np.percentile(latencies, 50)
p90 = np.percentile(latencies, 90)
return p10, p50, p90
# Processing Erlang throughputs
def get_flumina_net_runtime(log_dir):
producer_filename = path.join(log_dir, 'producers_time.log')
with open(producer_filename) as file:
lines = file.readlines()
start_time_ns = int(lines[0].split(':')[-1])
sink_filename = path.join(log_dir, 'sink_stats.log')
with open(sink_filename) as file:
lines = file.readlines()
end_time_ns = int(lines[1].split(':')[-1])
net_runtime_ms = (end_time_ns - start_time_ns) / 1000000
return net_runtime_ms
def get_events_processed(log_dir):
filename = path.join(log_dir, 'experiment_stats.log')
with open(filename) as file:
lines = file.readlines()
number_events = int(lines[0].split(':')[-1])
return number_events
def get_erlang_throughput(log_dir):
try:
runtime = get_flumina_net_runtime(log_dir)
events = get_events_processed(log_dir)
new_throughput = events / runtime
# print("New:", new_throughput)
return new_throughput
except:
return 0
# NS3 log parsing
def get_network_stats_file(log_dir):
files = [f for f in os.listdir(log_dir) if f.startswith('ns3') and f.endswith('stats.txt')]
if len(files) == 0:
sys.exit('Network statistics file not found!')
elif len(files) >= 2:
sys.exit('Multiple network statistics files found!')
return path.join(log_dir, files[0])
def get_network_data(log_dir):
stats_file = get_network_stats_file(log_dir)
with open(stats_file) as f:
first_line = f.readline()
# The first line says for example:
#
# Total IPv4 data: 459688780
#
# So the number starts at position 17
return int(first_line[17:])
| [
"os.path.isdir",
"re.match",
"numpy.percentile",
"os.path.isfile",
"re.search",
"operator.itemgetter",
"os.path.join",
"os.listdir",
"sys.exit",
"re.compile"
] | [((158, 198), 're.compile', 're.compile', (['"""\\\\[latency: (-?\\\\d+) ms\\\\]"""'], {}), "('\\\\[latency: (-?\\\\d+) ms\\\\]')\n", (168, 198), False, 'import re\n'), ((390, 423), 'os.path.join', 'path.join', (['result_path', '"""out.txt"""'], {}), "(result_path, 'out.txt')\n", (399, 423), False, 'from os import path\n'), ((529, 564), 'os.path.join', 'path.join', (['result_path', '"""trans.txt"""'], {}), "(result_path, 'trans.txt')\n", (538, 564), False, 'from os import path\n'), ((572, 595), 'os.path.isfile', 'path.isfile', (['trans_file'], {}), '(trans_file)\n', (583, 595), False, 'from os import path\n'), ((698, 720), 'os.path.isdir', 'path.isdir', (['trans_file'], {}), '(trans_file)\n', (708, 720), False, 'from os import path\n'), ((951, 979), 'numpy.percentile', 'np.percentile', (['latencies', '(10)'], {}), '(latencies, 10)\n', (964, 979), True, 'import numpy as np\n'), ((990, 1018), 'numpy.percentile', 'np.percentile', (['latencies', '(50)'], {}), '(latencies, 50)\n', (1003, 1018), True, 'import numpy as np\n'), ((1029, 1057), 'numpy.percentile', 'np.percentile', (['latencies', '(90)'], {}), '(latencies, 90)\n', (1042, 1057), True, 'import numpy as np\n'), ((1141, 1176), 'os.path.join', 'path.join', (['result_path', '"""stats.txt"""'], {}), "(result_path, 'stats.txt')\n", (1150, 1176), False, 'from os import path\n'), ((1202, 1258), 're.compile', 're.compile', (['"""Mean throughput \\\\(events/ms\\\\): ([0-9.]+)"""'], {}), "('Mean throughput \\\\(events/ms\\\\): ([0-9.]+)')\n", (1212, 1258), False, 'import re\n'), ((4042, 4066), 'os.listdir', 'os.listdir', (['log_dir_name'], {}), '(log_dir_name)\n', (4052, 4066), False, 'import os\n'), ((6140, 6168), 'numpy.percentile', 'np.percentile', (['latencies', '(10)'], {}), '(latencies, 10)\n', (6153, 6168), True, 'import numpy as np\n'), ((6179, 6207), 'numpy.percentile', 'np.percentile', (['latencies', '(50)'], {}), '(latencies, 50)\n', (6192, 6207), True, 'import numpy as np\n'), ((6218, 6246), 'numpy.percentile', 'np.percentile', (['latencies', '(90)'], {}), '(latencies, 90)\n', (6231, 6246), True, 'import numpy as np\n'), ((6369, 6409), 'os.path.join', 'path.join', (['log_dir', '"""producers_time.log"""'], {}), "(log_dir, 'producers_time.log')\n", (6378, 6409), False, 'from os import path\n'), ((6559, 6595), 'os.path.join', 'path.join', (['log_dir', '"""sink_stats.log"""'], {}), "(log_dir, 'sink_stats.log')\n", (6568, 6595), False, 'from os import path\n'), ((6857, 6899), 'os.path.join', 'path.join', (['log_dir', '"""experiment_stats.log"""'], {}), "(log_dir, 'experiment_stats.log')\n", (6866, 6899), False, 'from os import path\n'), ((7663, 7691), 'os.path.join', 'path.join', (['log_dir', 'files[0]'], {}), '(log_dir, files[0])\n', (7672, 7691), False, 'from os import path\n'), ((4094, 4127), 'os.path.join', 'path.join', (['log_dir_name', 'filename'], {}), '(log_dir_name, filename)\n', (4103, 4127), False, 'from os import path\n'), ((4274, 4307), 'os.path.join', 'path.join', (['log_dir_name', 'filename'], {}), '(log_dir_name, filename)\n', (4283, 4307), False, 'from os import path\n'), ((7518, 7564), 'sys.exit', 'sys.exit', (['"""Network statistics file not found!"""'], {}), "('Network statistics file not found!')\n", (7526, 7564), False, 'import sys\n'), ((1338, 1372), 're.match', 're.match', (['throughput_pattern', 'line'], {}), '(throughput_pattern, line)\n', (1346, 1372), False, 'import re\n'), ((5554, 5567), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (5564, 5567), False, 'from operator import itemgetter\n'), ((7414, 7433), 'os.listdir', 'os.listdir', (['log_dir'], {}), '(log_dir)\n', (7424, 7433), False, 'import os\n'), ((7599, 7651), 'sys.exit', 'sys.exit', (['"""Multiple network statistics files found!"""'], {}), "('Multiple network statistics files found!')\n", (7607, 7651), False, 'import sys\n'), ((764, 791), 'os.path.join', 'path.join', (['trans_file', 'file'], {}), '(trans_file, file)\n', (773, 791), False, 'from os import path\n'), ((824, 846), 'os.listdir', 'os.listdir', (['trans_file'], {}), '(trans_file)\n', (834, 846), False, 'import os\n'), ((881, 908), 'os.path.join', 'path.join', (['trans_file', 'file'], {}), '(trans_file, file)\n', (890, 908), False, 'from os import path\n'), ((253, 277), 're.search', 're.search', (['pattern', 'line'], {}), '(pattern, line)\n', (262, 277), False, 'import re\n')] |
import numpy as np
import pandas as pd
# import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
color_dict = {'Aeolian Sandstone': '#ffffe0',
'Anhydrite': '#ff80ff',
'Argillaceous Limestone': '#1e90ff',
'Arkose': '#eedd82',
'Basement': '#fa8072',
'Biogenic Ooze': '#CCCC00',
'Calcareous Cement': '#00ffff',
'Calcareous Debris Flow': '#40e0d0',
'Calcareous Shale': '#008b8b',
'Carnallite': '#ff00ff',
'Chalk': '#6a5acd',
'Cinerite': '#00ffff',
'Coal': '#000000',
'Conglomerate': '#ffffe0',
'Cross Bedded Sst': '#ffd700',
'Dolomite': '#00ffff',
'Gap': '#ffffff',
'Halite': '#ffc0cb',
'Kaïnite': '#fff0f5',
'Limestone': '#6a5acd',
'Marlstone': '#00bfff',
'Metamorphic Rock': '#008b8b',
'Plutonic Rock': '#ff0000',
'Polyhalite': '#ffb6c1',
'Porous Limestone': '#6a5acd',
'Sandstone': '#ffff00',
'Sandy Silt': '#d2b48c',
'Shale': '#008b8b',
'Shaly Silt': '#CCCC00',
'Silt': '#ffa07a',
'Silty Sand': '#ffffe0',
'Silty Shale': '#006400',
'Spiculite': '#939799',
'Sylvinite': '#ff80ff',
'Volcanic Rock': '#ffa500',
'Volcanic Tuff': '#ff6347',
}
def remove_unused_categories(cats):
"""Takes in list of pd.Categorical. Gives them same, cleaned categories."""
new_categories = []
for c in cats:
new_categories += list(set(c))
new_categories = sorted(set(new_categories))
cats = [pd.Categorical(c, categories=new_categories) for c in cats]
return cats
def plot_facies(facies:pd.Categorical, ax=None, colorbar=True, xlabel='Facies'):
"""Plot as facies log.
Facies must be a pandas categorical series e.g. pd.Categorical(['Sst', 'Lmst', 'SSt'])
"""
if ax is None:
ax = plt.gca()
facies_colors = [color_dict.get(f, 'white') for f in facies.categories]
# Plot facies as image
cluster=np.repeat(np.expand_dims(facies.codes,1), 100, 1)
# custom qualitative colormap
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
# the 0.5 is to center the labels
im=ax.imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=-0.5,vmax=len(facies.categories)-0.5)
ax.set_xlabel(xlabel)
divider = make_axes_locatable(ax)
if colorbar:
cax = divider.append_axes("right", size="20%", pad=0.05)
# modified from https://gist.github.com/jakevdp/8a992f606899ac24b711
# This function formatter will replace integers with target names
formatter = plt.FuncFormatter(lambda val, loc: facies.categories[val])
# We must be sure to specify the ticks matching our target names
plt.colorbar(im, ticks=range(len(facies.categories)), format=formatter, cax=cax)
ax.set_xticklabels([])
def plot_well(well_name:str, logs:pd.DataFrame, facies:pd.Categorical, figsize=(8, 12)):
ztop=logs.DEPT.min(); zbot=logs.DEPT.max()
f, ax = plt.subplots(nrows=1, ncols=5, figsize=(8, 12))
ax[0].plot(logs.GR, logs.DEPT, '-g')
ax[0].set_xlabel("GR")
ax[1].plot(logs.CALI, logs.DEPT, '-')
ax[1].set_xlabel("CALI")
ax[2].plot(logs.RSHA, logs.DEPT, '-b', alpha=0.9)
ax[2].plot(logs.RMED, logs.DEPT, '-g', alpha=0.5)
ax[2].plot(logs.RDEP, logs.DEPT, '-r', alpha=0.4)
res = logs[['RDEP', 'RMED', 'RSHA']]
std = np.std(res.values)
# ax[2].set_xlim(0, np.min([res.values.max(), std*15]))
ax[2].set_xlabel("RDEP (r) & RMED (g)\n & RSHA (b)")
ax[3].plot(logs.RHOB, logs.DEPT, '-')
ax3b = ax[3].twiny()
ax3b.plot(logs.NPHI, logs.DEPT, '-g')
ax[3].set_xlabel("RHOB (b)")
ax3b.set_xlabel("NPHI (g)")
# Note also NPHI
[facies] = remove_unused_categories([facies])
plot_facies(facies, ax[-1])
for i in range(len(ax)-1):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]);
ax[-1].set_xticklabels([])
f.suptitle('Well: %s'%well_name, fontsize=14,y=0.94)
return f, ax
def plot_well_pred(well_name:str, logs:pd.DataFrame, facies_true:pd.Categorical, facies_pred=pd.Categorical, figsize=(8, 12)):
ztop=logs.DEPT.min(); zbot=logs.DEPT.max()
f, ax = plt.subplots(nrows=1, ncols=6, figsize=figsize)
ax[0].plot(logs.GR, logs.DEPT, '-g')
ax[0].set_xlabel("GR")
ax[1].plot(logs.CALI, logs.DEPT, '-')
ax[1].set_xlabel("CALI")
ax[2].plot(logs.RDEP, logs.DEPT, '-r', alpha=0.7)
ax[2].plot(logs.RMED, logs.DEPT, '-g', alpha=0.7)
ax[2].set_xlim(logs.RDEP.min(),100)
ax[2].set_xlabel("RDEP (r) & RMED (g)")
ax[3].plot(logs.RHOB, logs.DEPT, '-')
ax[3].set_xlabel("RHOB")
[facies_pred, facies_true] = remove_unused_categories([facies_pred, facies_true])
assert (facies_pred.categories == facies_true.categories).all()
plot_facies(facies_pred, ax=ax[4], colorbar=False, xlabel='Facies (pred)')
plot_facies(facies_true, ax=ax[5], xlabel='Facies (true)')
for i in range(len(ax)-2):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
for i in range(1, len(ax)):
ax[i].set_yticklabels([])
ax[-2].set_xticklabels([])
ax[-1].set_xticklabels([])
f.suptitle('Well: %s'%well_name, fontsize=14,y=0.94)
return f, ax
| [
"mpl_toolkits.axes_grid1.make_axes_locatable",
"matplotlib.pyplot.FuncFormatter",
"numpy.std",
"numpy.expand_dims",
"pandas.Categorical",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplots"
] | [((2303, 2326), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (2322, 2326), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2988, 3035), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(5)', 'figsize': '(8, 12)'}), '(nrows=1, ncols=5, figsize=(8, 12))\n', (3000, 3035), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3410), 'numpy.std', 'np.std', (['res.values'], {}), '(res.values)\n', (3398, 3410), True, 'import numpy as np\n'), ((4393, 4440), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(6)', 'figsize': 'figsize'}), '(nrows=1, ncols=6, figsize=figsize)\n', (4405, 4440), True, 'import matplotlib.pyplot as plt\n'), ((1431, 1475), 'pandas.Categorical', 'pd.Categorical', (['c'], {'categories': 'new_categories'}), '(c, categories=new_categories)\n', (1445, 1475), True, 'import pandas as pd\n'), ((1752, 1761), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1759, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1923), 'numpy.expand_dims', 'np.expand_dims', (['facies.codes', '(1)'], {}), '(facies.codes, 1)\n', (1906, 1923), True, 'import numpy as np\n'), ((2581, 2639), 'matplotlib.pyplot.FuncFormatter', 'plt.FuncFormatter', (['(lambda val, loc: facies.categories[val])'], {}), '(lambda val, loc: facies.categories[val])\n', (2598, 2639), True, 'import matplotlib.pyplot as plt\n')] |
from ENVS.Envs import PendulumEnv
from PPO_TD_Lambda.model import MLPContiControlModel, MLPEvaluateModel
from PPO_TD_Lambda.algo_2 import PPOTDLambda
from PPO_TD_Lambda.algo import PPOTDLamda as PPO1
from TOOLS.Logger import LoggerPrinter
import numpy as np
"""
本测试完成了对PPO_TD_Lambda算法在倒立摆上的运行效果,
game_index=1: 在algo实现上测试
game_index=2: 在algo_2实现上测试
PPO算法在倒立摆问题上控制效果都差一些,只有对奖励函数做一些修正,即(rew+8)/8之后才能获得较好的效果。
这说明了奖励信号如果是连续的,那么最后通过一定的数值计算来修正,确保奖励信号的分布和估值网络的初始分布向接近。
"""
def main(game_index, game_mode):
logger = LoggerPrinter()
if game_index == 1:
exp_name = 'Pendulum'
env = PendulumEnv(logger=logger)
act_lim = np.array([2., ])
gamma = 0.90
learn_epoch = 900
max_control_len = 200
clip_ratio = 0.2
elif game_index == 2:
exp_name = 'Pendulum'
env = PendulumEnv(logger=logger)
act_lim = np.array([2., ])
gamma = 0.90
learn_epoch = 900
max_control_len = 200
clip_ratio = 0.2
if game_index in [1, 2]:
policy_model = MLPContiControlModel(env.obs_dim, env.act_dim, hidden_size=(100,), hd_activation='ReLU',
max_control_lim=act_lim, logger=logger)
else:
policy_model = None
evaluate_model = MLPEvaluateModel(env.obs_dim, hidden_size=(100,), hd_activation='ReLU', logger=logger)
if game_mode == 'TRAIN':
if game_index == 1:
ppotdlambda = PPOTDLambda(env=env, policy_model=policy_model, evaluate_model=evaluate_model,
model_dir='MODEL_PARAMS', exp_name=exp_name, logger=logger, gamma=gamma,
clip_ratio=clip_ratio, policy_lr=0.0001, evaluate_lr=0.0002,
conti_act_lim=act_lim)
ppotdlambda.train(retrain_label=False, max_iter_per_epoch=max_control_len, learning_epoch=learn_epoch,
mini_batch=32, update_num=10, save_freq=100)
elif game_index == 2:
ppotdlambda = PPO1(env=env, policy_model=policy_model, evaluate_model=evaluate_model,
model_save_dir='MODEL_PARAMS', exp_name=exp_name, logger=logger, gamma=gamma,
clip_ratio=clip_ratio, policy_lr=0.0001, evaluate_lr=0.0001, is_ou_noise=False,
act_lim=act_lim)
ppotdlambda.train(1000, 200, 32, False, 10, 100)
if __name__ == '__main__':
game_index = 2
game_mode = 'TRAIN'
main(game_index, game_mode)
| [
"PPO_TD_Lambda.model.MLPContiControlModel",
"PPO_TD_Lambda.algo.PPOTDLamda",
"PPO_TD_Lambda.model.MLPEvaluateModel",
"numpy.array",
"PPO_TD_Lambda.algo_2.PPOTDLambda",
"ENVS.Envs.PendulumEnv",
"TOOLS.Logger.LoggerPrinter"
] | [((529, 544), 'TOOLS.Logger.LoggerPrinter', 'LoggerPrinter', ([], {}), '()\n', (542, 544), False, 'from TOOLS.Logger import LoggerPrinter\n'), ((1319, 1409), 'PPO_TD_Lambda.model.MLPEvaluateModel', 'MLPEvaluateModel', (['env.obs_dim'], {'hidden_size': '(100,)', 'hd_activation': '"""ReLU"""', 'logger': 'logger'}), "(env.obs_dim, hidden_size=(100,), hd_activation='ReLU',\n logger=logger)\n", (1335, 1409), False, 'from PPO_TD_Lambda.model import MLPContiControlModel, MLPEvaluateModel\n'), ((616, 642), 'ENVS.Envs.PendulumEnv', 'PendulumEnv', ([], {'logger': 'logger'}), '(logger=logger)\n', (627, 642), False, 'from ENVS.Envs import PendulumEnv\n'), ((662, 677), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (670, 677), True, 'import numpy as np\n'), ((1083, 1215), 'PPO_TD_Lambda.model.MLPContiControlModel', 'MLPContiControlModel', (['env.obs_dim', 'env.act_dim'], {'hidden_size': '(100,)', 'hd_activation': '"""ReLU"""', 'max_control_lim': 'act_lim', 'logger': 'logger'}), "(env.obs_dim, env.act_dim, hidden_size=(100,),\n hd_activation='ReLU', max_control_lim=act_lim, logger=logger)\n", (1103, 1215), False, 'from PPO_TD_Lambda.model import MLPContiControlModel, MLPEvaluateModel\n'), ((858, 884), 'ENVS.Envs.PendulumEnv', 'PendulumEnv', ([], {'logger': 'logger'}), '(logger=logger)\n', (869, 884), False, 'from ENVS.Envs import PendulumEnv\n'), ((904, 919), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (912, 919), True, 'import numpy as np\n'), ((1494, 1743), 'PPO_TD_Lambda.algo_2.PPOTDLambda', 'PPOTDLambda', ([], {'env': 'env', 'policy_model': 'policy_model', 'evaluate_model': 'evaluate_model', 'model_dir': '"""MODEL_PARAMS"""', 'exp_name': 'exp_name', 'logger': 'logger', 'gamma': 'gamma', 'clip_ratio': 'clip_ratio', 'policy_lr': '(0.0001)', 'evaluate_lr': '(0.0002)', 'conti_act_lim': 'act_lim'}), "(env=env, policy_model=policy_model, evaluate_model=\n evaluate_model, model_dir='MODEL_PARAMS', exp_name=exp_name, logger=\n logger, gamma=gamma, clip_ratio=clip_ratio, policy_lr=0.0001,\n evaluate_lr=0.0002, conti_act_lim=act_lim)\n", (1505, 1743), False, 'from PPO_TD_Lambda.algo_2 import PPOTDLambda\n'), ((2097, 2356), 'PPO_TD_Lambda.algo.PPOTDLamda', 'PPO1', ([], {'env': 'env', 'policy_model': 'policy_model', 'evaluate_model': 'evaluate_model', 'model_save_dir': '"""MODEL_PARAMS"""', 'exp_name': 'exp_name', 'logger': 'logger', 'gamma': 'gamma', 'clip_ratio': 'clip_ratio', 'policy_lr': '(0.0001)', 'evaluate_lr': '(0.0001)', 'is_ou_noise': '(False)', 'act_lim': 'act_lim'}), "(env=env, policy_model=policy_model, evaluate_model=evaluate_model,\n model_save_dir='MODEL_PARAMS', exp_name=exp_name, logger=logger, gamma=\n gamma, clip_ratio=clip_ratio, policy_lr=0.0001, evaluate_lr=0.0001,\n is_ou_noise=False, act_lim=act_lim)\n", (2101, 2356), True, 'from PPO_TD_Lambda.algo import PPOTDLamda as PPO1\n')] |
from scipy.integrate import solve_ivp
from scipy.optimize import root_scalar
import numpy as np
def qho(x, psi, E):
hbar = m = k = 1.0
return np.asarray([psi[1], 2.0 * m * (k * x ** 2 / 2 - E) * psi[0] / hbar ** 2])
def single_shooting_method(tise, x, psi, dpsi, E):
objective_func = lambda _ : solve_ivp(tise, x, np.asarray([psi[0], dpsi]), args=(_, )).y[0, -1] - psi[1]
return root_scalar(objective_func, bracket=E)
x = np.asarray([-10.0, 10.0])
psi = np.asarray([0.0, 0.0])
dpsi = 1.0e-12
E = [0.01, 1.0]
print(E0 := single_shooting_method(qho, x, psi, dpsi, E)) | [
"numpy.asarray",
"scipy.optimize.root_scalar"
] | [((443, 468), 'numpy.asarray', 'np.asarray', (['[-10.0, 10.0]'], {}), '([-10.0, 10.0])\n', (453, 468), True, 'import numpy as np\n'), ((475, 497), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (485, 497), True, 'import numpy as np\n'), ((152, 225), 'numpy.asarray', 'np.asarray', (['[psi[1], 2.0 * m * (k * x ** 2 / 2 - E) * psi[0] / hbar ** 2]'], {}), '([psi[1], 2.0 * m * (k * x ** 2 / 2 - E) * psi[0] / hbar ** 2])\n', (162, 225), True, 'import numpy as np\n'), ((398, 436), 'scipy.optimize.root_scalar', 'root_scalar', (['objective_func'], {'bracket': 'E'}), '(objective_func, bracket=E)\n', (409, 436), False, 'from scipy.optimize import root_scalar\n'), ((329, 355), 'numpy.asarray', 'np.asarray', (['[psi[0], dpsi]'], {}), '([psi[0], dpsi])\n', (339, 355), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Generate the sample vectors for the test.
"""
__author__ = "<NAME> <<EMAIL>>"
__date__ = "24/02/2021"
import os
import shutil
import numpy as np
if __name__ == '__main__':
width = 128
height = 128
base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
for label, component_count in [("mono", 1), ("rgb", 3), ("rgba", 4), ("multi", 10)]:
for signed in [True, False]:
for bytes_per_sample in [1, 2]:
if signed and bytes_per_sample == 1:
continue
for missing_bits in range(8):
bits_per_sample = 8 * bytes_per_sample - missing_bits
output_dir = os.path.join(base_dir, f"{label}_{'s' if signed else 'u'}{bits_per_sample}be")
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir)
type = f"{'s' if signed else 'u'}{8 * bytes_per_sample}be"
dtype = f">{'i' if signed else 'u'}{bytes_per_sample}"
geometry = f"{component_count}x{height}x{width}"
output_path = os.path.join(output_dir, f"sample_{type}-{geometry}.raw")
total_samples = width*height*component_count
if signed:
min_sample_value = - (2 ** (bits_per_sample-1))
max_sample_value = 2 ** (bits_per_sample-1) - 1
else:
min_sample_value = 0
max_sample_value = 2**bits_per_sample - 1
samples = np.zeros(total_samples)
for i in range(total_samples):
samples[i] = min_sample_value + i % (max_sample_value - min_sample_value + 1)
samples[0] = min_sample_value
samples[1] = max_sample_value
with open(output_path, "wb") as output_file:
output_file.write(bytes(samples.astype(dtype)))
| [
"os.path.abspath",
"os.makedirs",
"numpy.zeros",
"shutil.rmtree",
"os.path.join"
] | [((302, 327), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (317, 327), False, 'import os\n'), ((746, 824), 'os.path.join', 'os.path.join', (['base_dir', 'f"""{label}_{\'s\' if signed else \'u\'}{bits_per_sample}be"""'], {}), '(base_dir, f"{label}_{\'s\' if signed else \'u\'}{bits_per_sample}be")\n', (758, 824), False, 'import os\n'), ((845, 890), 'shutil.rmtree', 'shutil.rmtree', (['output_dir'], {'ignore_errors': '(True)'}), '(output_dir, ignore_errors=True)\n', (858, 890), False, 'import shutil\n'), ((911, 934), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (922, 934), False, 'import os\n'), ((1193, 1250), 'os.path.join', 'os.path.join', (['output_dir', 'f"""sample_{type}-{geometry}.raw"""'], {}), "(output_dir, f'sample_{type}-{geometry}.raw')\n", (1205, 1250), False, 'import os\n'), ((1659, 1682), 'numpy.zeros', 'np.zeros', (['total_samples'], {}), '(total_samples)\n', (1667, 1682), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import numpy as np
import math
from scipy import stats
from scipy.special import gammaln, multigammaln
from dists import CollapsibleDistribution, FrozenDistribution
LOG2PI = math.log(2*math.pi)
LOG2 = math.log(2)
LOGPI = math.log(math.pi)
class uncert_NormalFixedCovar(CollapsibleDistribution):
"""
Multivariate Normal likelihood with multivariate Normal prior on
mean and a fixed covariance matrix.
All math taken from <NAME>'s 2007 technical report,
'Conjugate Bayesian analysis of the Gaussian distribution'.
"""
def __init__(self, **prior_hyperparameters):
self.mu_0 = prior_hyperparameters['mu_0']
self.sigma_0 = prior_hyperparameters['sigma_0']
self.S = prior_hyperparameters['S']
self.d = float(len(self.mu_0))
sgn, self.sigma_0_det = np.linalg.slogdet(self.sigma_0)
sgn, self.S_det = np.linalg.slogdet(self.S)
self.sigma_0_inv = np.linalg.inv(self.sigma_0)
self.S_inv = np.linalg.inv(self.S)
self.log_z0 = self.calc_log_z(self.mu_0, self.sigma_0, self.S)
@staticmethod
def update_parameters(X, X_uncert, _mu, _sigma, S, _d):
if X.shape[0] != X_uncert.shape[0]:
raise ValueError("The shapes of X and X_uncert do not "
"agree. {0} {1}".format(X.shape,
X_uncert.shape))
n = X.shape[0]
sigma_sum = np.linalg.inv(_sigma)
mu_sum = np.dot(np.linalg.inv(_sigma), _mu)
for it in range(n):
inv_uncert = np.linalg.inv(X_uncert[it, :, :]+S)
sigma_sum += inv_uncert
mu_sum += np.dot(inv_uncert, X[it, :])
sigma_n = np.linalg.inv(sigma_sum)
mu_n = np.dot(sigma_n, mu_sum)
assert(mu_n.shape[0] == _mu.shape[0])
assert(sigma_n.shape[0] == _sigma.shape[0])
assert(sigma_n.shape[1] == _sigma.shape[1])
return mu_n, sigma_n, S
@staticmethod
def update_remove(X, X_uncert, _mu, _sigma, S, _d):
if X.shape[0] != X_uncert.shape[0]:
raise ValueError("The shapes of X and X_uncert do not "
"agree. {0} {1}".format(X.shape,
X_uncert.shape))
# Ensure correct dimensionality when X is only one datum
if X.ndim == 1:
X = X[np.newaxis, :]
if X_uncert.ndim == 2:
if X_uncert.shape[0] != X_uncert.shape[1]:
raise IndexError("Covariance array is not square")
X_uncert = X_uncert[np.newaxis, :, :]
n = X.shape[0]
sigma_sum = np.linalg.inv(_sigma)
mu_sum = np.dot(np.linalg.inv(_sigma), _mu)
for it in range(n):
inv_uncert = np.linalg.inv(X_uncert[it, :, :]+S)
sigma_sum -= inv_uncert
mu_sum -= np.dot(inv_uncert, X[it, :])
sigma_n = np.linalg.inv(sigma_sum)
mu_n = np.dot(sigma_n, mu_sum)
assert(mu_n.shape[0] == _mu.shape[0])
assert(sigma_n.shape[0] == _sigma.shape[0])
assert(sigma_n.shape[1] == _sigma.shape[1])
return mu_n, sigma_n, S
@staticmethod
def calc_log_z(_mu, _sigma, S):
d = len(_mu)
sign, detr = np.linalg.slogdet(_sigma)
_sigma_inv = np.linalg.inv(_sigma)
log_z = detr/2 + np.dot(_mu, np.dot(_sigma_inv, _mu))/2
return log_z
def log_marginal_likelihood(self, X, X_uncert, mu_sum=None, sigma_sum=None,
log_det_prod=None, Q=None, verbose=False):
n = X.shape[0]
if (mu_sum is None or sigma_sum is None or log_det_prod is None
or Q is None):
sigma_sum = np.zeros(self.sigma_0.shape)
mu_sum = np.zeros(self.mu_0.shape)
Q = 0.
log_det_prod = 0.
for it in range(n):
inv_uncert = np.linalg.inv(X_uncert[it, :, :]+self.S)
sigma_sum += inv_uncert
weighted_mu = np.dot(inv_uncert, X[it, :])
mu_sum += weighted_mu
sgn, minus_log_det = np.linalg.slogdet(inv_uncert)
log_det_prod -= minus_log_det
Q += np.dot(X[it, :], weighted_mu)
mu_sum += np.dot(self.sigma_0_inv, self.mu_0)
sigma_n = np.linalg.inv(sigma_sum + self.sigma_0_inv)
mu_n = np.dot(sigma_n, mu_sum + np.dot(self.sigma_0_inv, self.mu_0))
log_z_n = self.calc_log_z(mu_n, sigma_n, self.S)
lml = (log_z_n - self.log_z0 - LOG2PI*(n*self.d/2) - Q/2
- log_det_prod/2)
sums_dict = {"mu_sum": mu_sum, "sigma_sum": sigma_sum,
"log_det_prod": log_det_prod, "Q": Q}
if verbose:
print(lml, log_z_n, -Q, -log_det_prod/2, log_z_n-self.log_z0,
params_n[1])
return lml, sums_dict
def log_posterior_predictive(self, X_new, X_uncert_new,
X_old, X_uncert_old):
""" log_posterior_predictive(X_new, X_uncert_new,
X_old, X_uncert_old)
Find the posterior predictive probabilitiy p(X_new|X_old)
where X_old is some data we already have and X_new is the
point at which we want the posterior predictive prob.
The posterior predictive distribution is a (multivariate)
Normal distribution.
Parameters
----------
X_old : ndarray
The existing data on which the posterior predicitve
is to be conditioned.
X_uncert_old : ndarray
The uncertainty on the measurements of the existing
data.
X_new : ndarray
The point for which we want the posterior predicitve.
X_new : ndarray
The uncertainty on the point for which we want the
posterior predicitve.
"""
params_old = self.update_parameters(X_old, X_uncert_old,
self.mu_0, self.sigma_0,
self.S, self.d)
z_sigma = params_old[1]+self.S+X_uncert_new
z_sigma_inv = np.linalg.inv(z_sigma)
diff = X_new-params_old[0]
z = np.sum(diff*np.dot(z_sigma_inv, diff))
sgn, det = np.linalg.slogdet(z_sigma)
prob = (- self.d/2*LOG2PI - det/2 - z/2)
return prob
def single_posterior(self, datum, datum_uncert, cluster_params):
""" single_posterior(datum, datum_uncert, cluster_params)
Find the marginal posterior for the parameters of a
single datum in a cluster.
Parameters
----------
datum : ndarray
The measurement of the data point of interest
datum_uncerts : ndarray
The uncertianty on the measurement - assumed to be
Gaussian and given as a covariance matrix.
cluster_params : dict
The posterior parameters of the cluster, in the
form given by update_params().
Returns
-------
mu_post : ndarray(d)
The mean of the posterior
sigma_post : ndarray(d,d)
The covariance matrix of the posterior
"""
# first get 'cavity prior'
cavity_mu, cavity_sigma = self.cavity_prior(datum,
datum_uncert,
cluster_params)
cavity_sigma_inv = np.linalg.inv(cavity_sigma)
uncert_inv = np.linalg.inv(datum_uncert)
sigma_post_inv = (cavity_sigma_inv + uncert_inv)
sigma_post = np.linalg.inv(sigma_post_inv)
mu_sum = (np.dot(cavity_sigma_inv, cavity_mu)
+ np.dot(uncert_inv, datum))
mu_post = np.dot(sigma_post, mu_sum)
return mu_post, sigma_post
def cavity_prior(self, datum, datum_uncert, cluster_params):
""" cavity_prior(datum, datum_uncert, cluster_params)
Find the 'cavity prior' for the parameters of a
single datum in a cluster.
Parameters
----------
datum : ndarray
The measurement of the data point of interest
datum_uncerts : ndarray
The uncertianty on the measurement - assumed to be
Gaussian and given as a covariance matrix.
cluster_params : dict
The posterior parameters of the cluster, in the
form given by update_params().
Returns
-------
mu_cavity : ndarray(d)
The mean of the cavity prior
sigma_cavity : ndarray(d,d)
The covariance matrix of the cavity prior
"""
# First update params, removing datum
cavity_params = self.update_remove(datum, datum_uncert,
cluster_params[0],
cluster_params[1],
cluster_params[2],
self.d)
# now calculate cacity prior params
sigma_cavity = cavity_params[1]+cavity_params[2]
mu_cavity = cavity_params[0]
return mu_cavity, sigma_cavity
def freeze_posterior_predictive(self, X_old, X_uncert_old):
""" freeze_posterior_predictive(X_old, X_uncert_old)
Dump a frozen version of the posterior predictive
distribution p(X_new | X_old)
Parameters
----------
X_old : ndarray
The existing data on which the posterior predicitve
is to be conditioned.
X_uncert_old : ndarray
The uncertainty on the measurements of the existing
data.
Returns
-------
frozen_dist : FrozenNFCPosteriorPred
The frozen distribution
"""
params_old = self.update_parameters(X_old, X_uncert_old,
self.mu_0, self.sigma_0,
self.S, self.d)
frozen_dist = FrozenNFCPosteriorPred(params_old[0],
params_old[1]+self.S)
return frozen_dist
class FrozenNFCPosteriorPred(FrozenDistribution):
""" The log posterior predicitve distribution implied by a
uncert_NormalFixedCovar instance with its parameters
(i.e. mean, covariance) frozen.
Parameters
----------
mu : ndarray
The mean of the distribution
sigma : ndarray
The (marginal) covariance of the distribution
"""
def __init__(self, mu, sigma):
self.__mu = mu
self.__sigma = sigma
self.__d = float(len(self.__mu))
sgn, self.__det = np.linalg.slogdet(sigma)
if sgn <= 0:
raise AttributeError("Covariance matrix is not PSD")
def __call__(self, X, X_uncert):
""" __call__(X, X_uncert)
Returns the log-probability of a noisy datum (assumed
Gaussian) given this distribution.
Parameters
----------
X : ndarray
The measurement
X_uncert : ndarray
The uncertainties on the measurment, expressed as a
covariance array
Returns
-------
log_prob : float
The log-probability of the datum given this
distribution
"""
z_sigma = self.__sigma + X_uncert
z_sigma_inv = np.linalg.inv(z_sigma)
diff = X - self.__mu
z = np.dot(diff, np.dot(z_sigma_inv, diff))
log_prob = - self.__d*LOG2PI/2. - self.__det - z
return log_prob
def __str__(self):
out_string = ("FrozenNFCPosteriorPred with \n"
"mean : [")
for i in range(self.__mu.shape[0]):
out_string += "{0}".format(self.__mu[i])
if i+1 < self.__mu.shape[0]:
out_string += ", "
out_string += "]"
return out_string
| [
"numpy.zeros",
"numpy.linalg.inv",
"numpy.linalg.slogdet",
"numpy.dot",
"math.log"
] | [((227, 248), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (235, 248), False, 'import math\n'), ((254, 265), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (262, 265), False, 'import math\n'), ((274, 291), 'math.log', 'math.log', (['math.pi'], {}), '(math.pi)\n', (282, 291), False, 'import math\n'), ((868, 899), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['self.sigma_0'], {}), '(self.sigma_0)\n', (885, 899), True, 'import numpy as np\n'), ((926, 951), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['self.S'], {}), '(self.S)\n', (943, 951), True, 'import numpy as np\n'), ((980, 1007), 'numpy.linalg.inv', 'np.linalg.inv', (['self.sigma_0'], {}), '(self.sigma_0)\n', (993, 1007), True, 'import numpy as np\n'), ((1029, 1050), 'numpy.linalg.inv', 'np.linalg.inv', (['self.S'], {}), '(self.S)\n', (1042, 1050), True, 'import numpy as np\n'), ((1490, 1511), 'numpy.linalg.inv', 'np.linalg.inv', (['_sigma'], {}), '(_sigma)\n', (1503, 1511), True, 'import numpy as np\n'), ((1760, 1784), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma_sum'], {}), '(sigma_sum)\n', (1773, 1784), True, 'import numpy as np\n'), ((1800, 1823), 'numpy.dot', 'np.dot', (['sigma_n', 'mu_sum'], {}), '(sigma_n, mu_sum)\n', (1806, 1823), True, 'import numpy as np\n'), ((2698, 2719), 'numpy.linalg.inv', 'np.linalg.inv', (['_sigma'], {}), '(_sigma)\n', (2711, 2719), True, 'import numpy as np\n'), ((2968, 2992), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma_sum'], {}), '(sigma_sum)\n', (2981, 2992), True, 'import numpy as np\n'), ((3008, 3031), 'numpy.dot', 'np.dot', (['sigma_n', 'mu_sum'], {}), '(sigma_n, mu_sum)\n', (3014, 3031), True, 'import numpy as np\n'), ((3313, 3338), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['_sigma'], {}), '(_sigma)\n', (3330, 3338), True, 'import numpy as np\n'), ((3360, 3381), 'numpy.linalg.inv', 'np.linalg.inv', (['_sigma'], {}), '(_sigma)\n', (3373, 3381), True, 'import numpy as np\n'), ((4327, 4362), 'numpy.dot', 'np.dot', (['self.sigma_0_inv', 'self.mu_0'], {}), '(self.sigma_0_inv, self.mu_0)\n', (4333, 4362), True, 'import numpy as np\n'), ((4382, 4425), 'numpy.linalg.inv', 'np.linalg.inv', (['(sigma_sum + self.sigma_0_inv)'], {}), '(sigma_sum + self.sigma_0_inv)\n', (4395, 4425), True, 'import numpy as np\n'), ((6300, 6322), 'numpy.linalg.inv', 'np.linalg.inv', (['z_sigma'], {}), '(z_sigma)\n', (6313, 6322), True, 'import numpy as np\n'), ((6430, 6456), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['z_sigma'], {}), '(z_sigma)\n', (6447, 6456), True, 'import numpy as np\n'), ((7690, 7717), 'numpy.linalg.inv', 'np.linalg.inv', (['cavity_sigma'], {}), '(cavity_sigma)\n', (7703, 7717), True, 'import numpy as np\n'), ((7739, 7766), 'numpy.linalg.inv', 'np.linalg.inv', (['datum_uncert'], {}), '(datum_uncert)\n', (7752, 7766), True, 'import numpy as np\n'), ((7846, 7875), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma_post_inv'], {}), '(sigma_post_inv)\n', (7859, 7875), True, 'import numpy as np\n'), ((7996, 8022), 'numpy.dot', 'np.dot', (['sigma_post', 'mu_sum'], {}), '(sigma_post, mu_sum)\n', (8002, 8022), True, 'import numpy as np\n'), ((11079, 11103), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['sigma'], {}), '(sigma)\n', (11096, 11103), True, 'import numpy as np\n'), ((11846, 11868), 'numpy.linalg.inv', 'np.linalg.inv', (['z_sigma'], {}), '(z_sigma)\n', (11859, 11868), True, 'import numpy as np\n'), ((1536, 1557), 'numpy.linalg.inv', 'np.linalg.inv', (['_sigma'], {}), '(_sigma)\n', (1549, 1557), True, 'import numpy as np\n'), ((1618, 1655), 'numpy.linalg.inv', 'np.linalg.inv', (['(X_uncert[it, :, :] + S)'], {}), '(X_uncert[it, :, :] + S)\n', (1631, 1655), True, 'import numpy as np\n'), ((1712, 1740), 'numpy.dot', 'np.dot', (['inv_uncert', 'X[it, :]'], {}), '(inv_uncert, X[it, :])\n', (1718, 1740), True, 'import numpy as np\n'), ((2744, 2765), 'numpy.linalg.inv', 'np.linalg.inv', (['_sigma'], {}), '(_sigma)\n', (2757, 2765), True, 'import numpy as np\n'), ((2826, 2863), 'numpy.linalg.inv', 'np.linalg.inv', (['(X_uncert[it, :, :] + S)'], {}), '(X_uncert[it, :, :] + S)\n', (2839, 2863), True, 'import numpy as np\n'), ((2920, 2948), 'numpy.dot', 'np.dot', (['inv_uncert', 'X[it, :]'], {}), '(inv_uncert, X[it, :])\n', (2926, 2948), True, 'import numpy as np\n'), ((3777, 3805), 'numpy.zeros', 'np.zeros', (['self.sigma_0.shape'], {}), '(self.sigma_0.shape)\n', (3785, 3805), True, 'import numpy as np\n'), ((3827, 3852), 'numpy.zeros', 'np.zeros', (['self.mu_0.shape'], {}), '(self.mu_0.shape)\n', (3835, 3852), True, 'import numpy as np\n'), ((7895, 7930), 'numpy.dot', 'np.dot', (['cavity_sigma_inv', 'cavity_mu'], {}), '(cavity_sigma_inv, cavity_mu)\n', (7901, 7930), True, 'import numpy as np\n'), ((7951, 7976), 'numpy.dot', 'np.dot', (['uncert_inv', 'datum'], {}), '(uncert_inv, datum)\n', (7957, 7976), True, 'import numpy as np\n'), ((11925, 11950), 'numpy.dot', 'np.dot', (['z_sigma_inv', 'diff'], {}), '(z_sigma_inv, diff)\n', (11931, 11950), True, 'import numpy as np\n'), ((3964, 4006), 'numpy.linalg.inv', 'np.linalg.inv', (['(X_uncert[it, :, :] + self.S)'], {}), '(X_uncert[it, :, :] + self.S)\n', (3977, 4006), True, 'import numpy as np\n'), ((4075, 4103), 'numpy.dot', 'np.dot', (['inv_uncert', 'X[it, :]'], {}), '(inv_uncert, X[it, :])\n', (4081, 4103), True, 'import numpy as np\n'), ((4180, 4209), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['inv_uncert'], {}), '(inv_uncert)\n', (4197, 4209), True, 'import numpy as np\n'), ((4278, 4307), 'numpy.dot', 'np.dot', (['X[it, :]', 'weighted_mu'], {}), '(X[it, :], weighted_mu)\n', (4284, 4307), True, 'import numpy as np\n'), ((4466, 4501), 'numpy.dot', 'np.dot', (['self.sigma_0_inv', 'self.mu_0'], {}), '(self.sigma_0_inv, self.mu_0)\n', (4472, 4501), True, 'import numpy as np\n'), ((6383, 6408), 'numpy.dot', 'np.dot', (['z_sigma_inv', 'diff'], {}), '(z_sigma_inv, diff)\n', (6389, 6408), True, 'import numpy as np\n'), ((3420, 3443), 'numpy.dot', 'np.dot', (['_sigma_inv', '_mu'], {}), '(_sigma_inv, _mu)\n', (3426, 3443), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Author: tom-hydrogen
# @Date: 2018-03-07 10:51:02
# @Last Modified by: tom-hydrogen
# @Last Modified time: 2018-03-09 16:51:22
""" gp.py
Bayesian optimisation of loss functions.
"""
import numpy as np
from scipy.optimize import minimize
from copy import deepcopy
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
import GPy
from GPy.models import GPRegression, SparseGPRegression
from .core import BaseSampler
from .utils import random_sample, expected_improvement
from ..constants import EPSILON, RANDOM_STATE
class BayesSampler(BaseSampler):
"""Bayesian optimization sampler
Sample next location based on gaussian process
Parameters
----------
space: list(dict)
Define search space. Each element has to the following key
values: 'name', 'type', and 'domain' (,'num_grid' is optional).
init_X: array-like(float), shape=(n_samples, n_dim)
The list of parameters to initizlie sampler
init_y: array-like(float), shape(n_samples,)
The list of score of init_X
r_min: int
The number of random samples before starting using gaussian process
method: str
The name of acquisition functions
kernel: kernel object, optional
is_normalize: bool
If ture, normalized score values are used for optimization
n_restarts_optimizer: int
The number of trial to opimize GP hyperparameters
backend: str (default 'gpy')
Determine which GP package is used. That has to be
either of 'gpy' or 'sklearn'.
optimizer: str
The name of optimizers of hyperparameters of GP, which is valid when
backend='gpy'.
max_iters: int
The maximum number of iteration to optimize hyperparamters of GP,
which is valid when backend='gpy'.
ARD: bool
Wheather to use ARD for kernel, which is valid when backend='gpy'.
sparse: bool
If true, use sparse GP, which is valid when backend='gpy'.
num_inducing: int
The number of inducing inputs for sparse GP, which is valid
when backend='gpy' and sparse is True.
random_state: int
"""
sampler_name = "bayes"
def __init__(self, space, init_X=None, init_y=None, r_min=3, method="EI",
kernel=None, is_normalize=True, n_restarts_optimizer=10,
backend="gpy", optimizer="bfgs", max_iters=1000,
ARD=False, sparse=False,
num_inducing=10, random_state=RANDOM_STATE):
super(BayesSampler, self).__init__(space, init_X, init_y)
self._r_min = r_min
self.model = None
self.acquisition_func = self._get_acquisition_func(method)
self._is_normalize = is_normalize
self._ARD = ARD
self._kernel = kernel
self._sparse = sparse
self._num_inducing = num_inducing
self._optimizer = optimizer
self._max_iters = max_iters
self._backend = backend.lower()
self._n_restarts_optimizer = n_restarts_optimizer
self._random_state = random_state
def _update(self, new_X, new_y, eps=EPSILON):
X, y = self.data
X = deepcopy(X)
y = deepcopy(y)
if len(X) >= self._r_min:
X_vec = np.array([self.params2vec(x) for x in X])
y = np.array(y)
if self._is_normalize:
sig = np.sqrt(np.var(y))
sig = max(sig, eps)
mu = np.mean(y)
y = (y - mu) / sig
if self._backend == "sklearn":
if self._kernel is None:
self._kernel = Matern(nu=2.5)
self.model = GaussianProcessRegressor(
kernel=self._kernel,
n_restarts_optimizer=self._n_restarts_optimizer,
random_state=self._random_state,
normalize_y=False
)
self.model.fit(X_vec, y)
elif self._backend.lower() == "gpy":
y = np.array(y)[:, None]
if self.model is None:
self._create_model(X_vec, y)
else:
self.model.set_XY(X_vec, y)
self.model.optimize_restarts(self._n_restarts_optimizer,
optimizer=self._optimizer,
max_iters=self._max_iters,
messages=False,
verbose=False,
ipython_notebook=False)
def _create_model(self, X, y):
"""
Creates the GPy model given some input data X and Y.
"""
# Define kernel
input_dim = X.shape[1]
if self._kernel is None:
kern = GPy.kern.Matern52(input_dim, variance=1., ARD=self._ARD)
else:
kern = self._kernel
self._kernel = None
# Define model
noise_var = y.var() * 0.01
if not self._sparse:
self.model = GPRegression(X, y, kernel=kern, noise_var=noise_var)
else:
self.model = SparseGPRegression(X, y, kernel=kern,
num_inducing=self._num_inducing)
self.model.Gaussian_noise.constrain_bounded(1e-9, 1e6, warning=False)
def sample(self, num_samples=1, *args, **kwargs):
"""Sample next location to evaluate based on GP
Parameters
---------
num_samples: int
The number of samples
Returns
-------
Xs: list(dict), length is num_samples
"""
_num_data = self.num_data
if _num_data < self._r_min:
Xs = self._random_sample(num_samples)
else:
Xs = self._bayes_sample(num_samples)
return Xs
def _bayes_sample(self, num_samples, num_restarts=25):
num_restarts = max(num_samples, num_restarts)
init_params = self._random_sample(num_restarts)
init_xs = [self.params2vec(param) for param in init_params]
bounds = self.design_space.get_bounds()
if self._backend == "sklearn":
evaluated_loss = np.array(self.model.y_train_)
else:
evaluated_loss = np.array(self.model.Y)[:, 0]
ys = []
xs = []
def minus_ac(x):
return -self.acquisition_func(x, self.model,
evaluated_loss,
mode=self._backend)
for x0 in init_xs:
res = minimize(fun=minus_ac,
x0=x0,
bounds=bounds,
method='L-BFGS-B')
ys.append(-res.fun)
xs.append(res.x)
idx = np.argsort(ys)[::-1][:num_samples]
best_x = np.array(xs)[idx]
best_params = [self.vec2params(x) for x in best_x]
return best_params
def _random_sample(self, num_samples):
Xs = []
for i in range(num_samples):
x = random_sample(self.params_conf)
Xs.append(x)
return list(Xs)
def _get_acquisition_func(self, method):
if method == "EI":
return expected_improvement
else:
raise NotImplementedError(method)
def params2vec(self, params):
# Not include fixed params
vec = []
for conf in self.params_conf:
val = params[conf['name']]
vec.append(val)
vec = self.design_space.objective_to_model([vec])
return np.array(vec)
def vec2params(self, vec):
# Not include fixed params
params = {}
vec = self.design_space.model_to_objective([vec])
for i, val in enumerate(vec):
conf = self.params_conf[i]
params[conf["name"]] = val
return params
| [
"GPy.kern.Matern52",
"copy.deepcopy",
"scipy.optimize.minimize",
"GPy.models.GPRegression",
"GPy.models.SparseGPRegression",
"numpy.var",
"numpy.argsort",
"sklearn.gaussian_process.kernels.Matern",
"numpy.mean",
"numpy.array",
"sklearn.gaussian_process.GaussianProcessRegressor"
] | [((3210, 3221), 'copy.deepcopy', 'deepcopy', (['X'], {}), '(X)\n', (3218, 3221), False, 'from copy import deepcopy\n'), ((3234, 3245), 'copy.deepcopy', 'deepcopy', (['y'], {}), '(y)\n', (3242, 3245), False, 'from copy import deepcopy\n'), ((7661, 7674), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (7669, 7674), True, 'import numpy as np\n'), ((3358, 3369), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3366, 3369), True, 'import numpy as np\n'), ((4882, 4939), 'GPy.kern.Matern52', 'GPy.kern.Matern52', (['input_dim'], {'variance': '(1.0)', 'ARD': 'self._ARD'}), '(input_dim, variance=1.0, ARD=self._ARD)\n', (4899, 4939), False, 'import GPy\n'), ((5130, 5182), 'GPy.models.GPRegression', 'GPRegression', (['X', 'y'], {'kernel': 'kern', 'noise_var': 'noise_var'}), '(X, y, kernel=kern, noise_var=noise_var)\n', (5142, 5182), False, 'from GPy.models import GPRegression, SparseGPRegression\n'), ((5222, 5292), 'GPy.models.SparseGPRegression', 'SparseGPRegression', (['X', 'y'], {'kernel': 'kern', 'num_inducing': 'self._num_inducing'}), '(X, y, kernel=kern, num_inducing=self._num_inducing)\n', (5240, 5292), False, 'from GPy.models import GPRegression, SparseGPRegression\n'), ((6269, 6298), 'numpy.array', 'np.array', (['self.model.y_train_'], {}), '(self.model.y_train_)\n', (6277, 6298), True, 'import numpy as np\n'), ((6653, 6716), 'scipy.optimize.minimize', 'minimize', ([], {'fun': 'minus_ac', 'x0': 'x0', 'bounds': 'bounds', 'method': '"""L-BFGS-B"""'}), "(fun=minus_ac, x0=x0, bounds=bounds, method='L-BFGS-B')\n", (6661, 6716), False, 'from scipy.optimize import minimize\n'), ((6925, 6937), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (6933, 6937), True, 'import numpy as np\n'), ((3503, 3513), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (3510, 3513), True, 'import numpy as np\n'), ((3712, 3863), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'kernel': 'self._kernel', 'n_restarts_optimizer': 'self._n_restarts_optimizer', 'random_state': 'self._random_state', 'normalize_y': '(False)'}), '(kernel=self._kernel, n_restarts_optimizer=self.\n _n_restarts_optimizer, random_state=self._random_state, normalize_y=False)\n', (3736, 3863), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((6342, 6364), 'numpy.array', 'np.array', (['self.model.Y'], {}), '(self.model.Y)\n', (6350, 6364), True, 'import numpy as np\n'), ((6873, 6887), 'numpy.argsort', 'np.argsort', (['ys'], {}), '(ys)\n', (6883, 6887), True, 'import numpy as np\n'), ((3435, 3444), 'numpy.var', 'np.var', (['y'], {}), '(y)\n', (3441, 3444), True, 'import numpy as np\n'), ((3668, 3682), 'sklearn.gaussian_process.kernels.Matern', 'Matern', ([], {'nu': '(2.5)'}), '(nu=2.5)\n', (3674, 3682), False, 'from sklearn.gaussian_process.kernels import Matern\n'), ((4067, 4078), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4075, 4078), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
incomes = np.random.normal(27000, 15000, 10000)
def Mean():
return np.mean(incomes)
def Visualize():
plt.hist(incomes,50)
plt.show()
Mean()
Visualize() #displays graphical window popup
| [
"matplotlib.pyplot.hist",
"numpy.mean",
"matplotlib.pyplot.show",
"numpy.random.normal"
] | [((62, 99), 'numpy.random.normal', 'np.random.normal', (['(27000)', '(15000)', '(10000)'], {}), '(27000, 15000, 10000)\n', (78, 99), True, 'import numpy as np\n'), ((123, 139), 'numpy.mean', 'np.mean', (['incomes'], {}), '(incomes)\n', (130, 139), True, 'import numpy as np\n'), ((162, 183), 'matplotlib.pyplot.hist', 'plt.hist', (['incomes', '(50)'], {}), '(incomes, 50)\n', (170, 183), True, 'import matplotlib.pyplot as plt\n'), ((187, 197), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (195, 197), True, 'import matplotlib.pyplot as plt\n')] |
from pathlib import Path
import math
import numpy as np
from PIL import Image
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, WeightedRandomSampler
IMAGE_SIZE = 224
BANNERHEIGHT = 12
ROTATION_ANGLE = 10
NORM = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
l = IMAGE_SIZE / 2
rad = math.radians(ROTATION_ANGLE)
c = math.cos(rad)
s = math.sin(rad)
x = l * c - l * s
y = l * s + l * c
rotpad = math.ceil(max(x, y) - l)
TRAIN_TRANSFORM = transforms.Compose(
[
transforms.Pad((0, 0, 0, -BANNERHEIGHT)), # Crop banner from bottom edge of image
transforms.Resize(IMAGE_SIZE),
transforms.RandomResizedCrop(IMAGE_SIZE),
transforms.RandomHorizontalFlip(),
transforms.Pad(
rotpad, padding_mode="reflect"
), # Mirror boundary to avoid empty corners of rotated image
transforms.RandomRotation(ROTATION_ANGLE),
transforms.CenterCrop(IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize(*NORM),
]
)
PREDICT_TRANSFORM = transforms.Compose(
[
transforms.Pad((0, 0, 0, -BANNERHEIGHT)),
transforms.Resize(IMAGE_SIZE),
transforms.CenterCrop(IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize(*NORM),
]
)
def class_counts(dataset):
_, counts = np.unique(dataset.targets, return_counts=True)
return counts
def dataset_weights(dataset):
class_weights = 1 / class_counts(dataset)
return class_weights[dataset.targets]
def load_data(data_dir, batch_size, num_workers=4, sample=True):
# TODO: Write docstring
"""[summary]
Args:
data_dir ([type]): [description]
Returns:
[type]: [description]
"""
data_dir = Path(data_dir)
data_transforms = {
"train": TRAIN_TRANSFORM,
"valid": PREDICT_TRANSFORM,
"test": PREDICT_TRANSFORM,
}
image_datasets = {
x: datasets.ImageFolder(data_dir / x, data_transforms[x])
for x in ["train", "valid", "test"]
}
samplers = {
x: WeightedRandomSampler(dataset_weights(image_datasets[x]), len(image_datasets[x]))
if sample
else None
for x in ["train", "valid", "test"]
}
dataloaders = {
x: DataLoader(
image_datasets[x],
sampler=samplers[x],
batch_size=batch_size,
shuffle=(not sample),
num_workers=num_workers,
)
for x in ["train", "valid", "test"]
}
class_names = image_datasets["train"].classes
return class_names, image_datasets, dataloaders
def process_image_file(image_path):
"""[summary]
Args:
image_path ([type]): [description]
Returns:
[type]: [description]
"""
img_pil = Image.open(image_path)
return process_image_data(img_pil)
def process_image_data(image_data):
"""Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
Args:
image_path ([type]): [description]
Returns:
[type]: [description]
"""
adjustments = PREDICT_TRANSFORM
img_tensor = adjustments(image_data)
return img_tensor
def unnormalize_img_tensor(img_tensor):
"""Imshow for Tensor
Args:
img_tensor ([type]): [description]
Returns:
[type]: [description]
"""
img_tensor = img_tensor.cpu().numpy().transpose((1, 2, 0))
mean = np.array(NORM[0])
std = np.array(NORM[1])
img_tensor = std * img_tensor + mean
img_tensor = np.clip(img_tensor, 0, 1)
return img_tensor
| [
"torchvision.transforms.RandomHorizontalFlip",
"torch.utils.data.DataLoader",
"math.radians",
"torchvision.transforms.RandomRotation",
"torchvision.transforms.Normalize",
"math.sin",
"PIL.Image.open",
"numpy.clip",
"torchvision.transforms.ToTensor",
"pathlib.Path",
"torchvision.transforms.Pad",
... | [((324, 352), 'math.radians', 'math.radians', (['ROTATION_ANGLE'], {}), '(ROTATION_ANGLE)\n', (336, 352), False, 'import math\n'), ((357, 370), 'math.cos', 'math.cos', (['rad'], {}), '(rad)\n', (365, 370), False, 'import math\n'), ((375, 388), 'math.sin', 'math.sin', (['rad'], {}), '(rad)\n', (383, 388), False, 'import math\n'), ((1335, 1381), 'numpy.unique', 'np.unique', (['dataset.targets'], {'return_counts': '(True)'}), '(dataset.targets, return_counts=True)\n', (1344, 1381), True, 'import numpy as np\n'), ((1751, 1765), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (1755, 1765), False, 'from pathlib import Path\n'), ((2792, 2814), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (2802, 2814), False, 'from PIL import Image\n'), ((3448, 3465), 'numpy.array', 'np.array', (['NORM[0]'], {}), '(NORM[0])\n', (3456, 3465), True, 'import numpy as np\n'), ((3476, 3493), 'numpy.array', 'np.array', (['NORM[1]'], {}), '(NORM[1])\n', (3484, 3493), True, 'import numpy as np\n'), ((3552, 3577), 'numpy.clip', 'np.clip', (['img_tensor', '(0)', '(1)'], {}), '(img_tensor, 0, 1)\n', (3559, 3577), True, 'import numpy as np\n'), ((513, 553), 'torchvision.transforms.Pad', 'transforms.Pad', (['(0, 0, 0, -BANNERHEIGHT)'], {}), '((0, 0, 0, -BANNERHEIGHT))\n', (527, 553), False, 'from torchvision import datasets, transforms\n'), ((604, 633), 'torchvision.transforms.Resize', 'transforms.Resize', (['IMAGE_SIZE'], {}), '(IMAGE_SIZE)\n', (621, 633), False, 'from torchvision import datasets, transforms\n'), ((643, 683), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['IMAGE_SIZE'], {}), '(IMAGE_SIZE)\n', (671, 683), False, 'from torchvision import datasets, transforms\n'), ((693, 726), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (724, 726), False, 'from torchvision import datasets, transforms\n'), ((736, 782), 'torchvision.transforms.Pad', 'transforms.Pad', (['rotpad'], {'padding_mode': '"""reflect"""'}), "(rotpad, padding_mode='reflect')\n", (750, 782), False, 'from torchvision import datasets, transforms\n'), ((873, 914), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['ROTATION_ANGLE'], {}), '(ROTATION_ANGLE)\n', (898, 914), False, 'from torchvision import datasets, transforms\n'), ((924, 957), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['IMAGE_SIZE'], {}), '(IMAGE_SIZE)\n', (945, 957), False, 'from torchvision import datasets, transforms\n'), ((967, 988), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (986, 988), False, 'from torchvision import datasets, transforms\n'), ((998, 1025), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['*NORM'], {}), '(*NORM)\n', (1018, 1025), False, 'from torchvision import datasets, transforms\n'), ((1090, 1130), 'torchvision.transforms.Pad', 'transforms.Pad', (['(0, 0, 0, -BANNERHEIGHT)'], {}), '((0, 0, 0, -BANNERHEIGHT))\n', (1104, 1130), False, 'from torchvision import datasets, transforms\n'), ((1140, 1169), 'torchvision.transforms.Resize', 'transforms.Resize', (['IMAGE_SIZE'], {}), '(IMAGE_SIZE)\n', (1157, 1169), False, 'from torchvision import datasets, transforms\n'), ((1179, 1212), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['IMAGE_SIZE'], {}), '(IMAGE_SIZE)\n', (1200, 1212), False, 'from torchvision import datasets, transforms\n'), ((1222, 1243), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1241, 1243), False, 'from torchvision import datasets, transforms\n'), ((1253, 1280), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['*NORM'], {}), '(*NORM)\n', (1273, 1280), False, 'from torchvision import datasets, transforms\n'), ((1937, 1991), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['(data_dir / x)', 'data_transforms[x]'], {}), '(data_dir / x, data_transforms[x])\n', (1957, 1991), False, 'from torchvision import datasets, transforms\n'), ((2271, 2393), 'torch.utils.data.DataLoader', 'DataLoader', (['image_datasets[x]'], {'sampler': 'samplers[x]', 'batch_size': 'batch_size', 'shuffle': '(not sample)', 'num_workers': 'num_workers'}), '(image_datasets[x], sampler=samplers[x], batch_size=batch_size,\n shuffle=not sample, num_workers=num_workers)\n', (2281, 2393), False, 'from torch.utils.data import DataLoader, WeightedRandomSampler\n')] |
import numpy as np
import math
from abc import abstractmethod
from jmetal.core.solution import FloatSolution
import Levenshtein as levenshtein
import sys
class Behavior:
def evaluate_novelty(self, individual: FloatSolution, population: [FloatSolution], neighborhood_size: int = 2):
distances = self.calculate_distance(individual, population)
distances.sort()
novelty_score = self.calculate_knn(distances=distances, k=neighborhood_size)
return novelty_score
@abstractmethod
def calculate_distance(self, individual: FloatSolution, population: [FloatSolution]):
pass
@staticmethod
def calculate_knn(distances: np.array, k: int) -> float:
knn_score = np.average(distances[:k])
return knn_score
class ProteinBehaviorSS_SASA(Behavior):
def calculate_distance(self, individual: FloatSolution, population: [FloatSolution]):
ind_ss2 = individual.attributes["ss2_score"]
ind_sasa = individual.attributes["sasa"]
x = np.array([ind_ss2, ind_sasa])
distances = np.empty(len(population), dtype="float")
#Euclideand Distance using the numpy array operation. Faster than any other.
for i, sol in enumerate(population):
distances[i] = np.linalg.norm(x - [sol.attributes["ss2_score"], sol.attributes["sasa"]])
return distances
class ProteinBehaviorSS(Behavior):
def calculate_distance(self, individual: FloatSolution, population: [FloatSolution]):
ind_ss2 = individual.attributes["ss2"]
distances = np.empty(len(population), dtype="float")
for i, sol in enumerate(population):
distances[i] = levenshtein.distance(ind_ss2, sol.attributes["ss2"])
return distances
class GenericBehavior(Behavior):
def calculate_distance(self, individual: FloatSolution, population: [FloatSolution]) -> np.array:
x = np.array(individual.variables)
distances = np.empty(len(population), dtype="float")
for i, sol in enumerate(population):
distances[i] = np.linalg.norm(x - sol.variables)
return distances
| [
"numpy.array",
"numpy.linalg.norm",
"Levenshtein.distance",
"numpy.average"
] | [((720, 745), 'numpy.average', 'np.average', (['distances[:k]'], {}), '(distances[:k])\n', (730, 745), True, 'import numpy as np\n'), ((1018, 1047), 'numpy.array', 'np.array', (['[ind_ss2, ind_sasa]'], {}), '([ind_ss2, ind_sasa])\n', (1026, 1047), True, 'import numpy as np\n'), ((1905, 1935), 'numpy.array', 'np.array', (['individual.variables'], {}), '(individual.variables)\n', (1913, 1935), True, 'import numpy as np\n'), ((1267, 1340), 'numpy.linalg.norm', 'np.linalg.norm', (["(x - [sol.attributes['ss2_score'], sol.attributes['sasa']])"], {}), "(x - [sol.attributes['ss2_score'], sol.attributes['sasa']])\n", (1281, 1340), True, 'import numpy as np\n'), ((1676, 1728), 'Levenshtein.distance', 'levenshtein.distance', (['ind_ss2', "sol.attributes['ss2']"], {}), "(ind_ss2, sol.attributes['ss2'])\n", (1696, 1728), True, 'import Levenshtein as levenshtein\n'), ((2070, 2103), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - sol.variables)'], {}), '(x - sol.variables)\n', (2084, 2103), True, 'import numpy as np\n')] |
"""
Plot script of various spectra.
"""
import matplotlib.pyplot as plt
import numpy as np
import os.path
from math import pi
import argparse
import h5py
def plot_spectra_in_file(filename):
"""
Plot spectra in file.
Parameters
----------
filename : str
"""
print('Read data from file: ', filename)
h5f = h5py.File(filename,'r')
print("data-sets:", list(h5f.keys()))
# Load energy meshes
if 'w' in h5f:
w = np.array(h5f['w'])
if 'wIn' in h5f:
wIn = np.array(h5f['wIn'])
if 'wLoss' in h5f:
wLoss = np.array(h5f['wLoss'])
# Load momentum vector information
if 'qsNIXS' in h5f:
qs = np.array(h5f['qsNIXS'])
# Load radial mesh information
if 'r' in h5f and 'RiNIXS' in h5f and 'RjNIXS' in h5f:
r = np.array(h5f['r'])
Ri = np.array(h5f['RiNIXS'])
Rj = np.array(h5f['RjNIXS'])
# Load thermally averaged spectra
if 'PSthermal' in h5f:
ps = np.array(h5f['PSthermal'])
print(np.shape(ps))
if 'XPSthermal' in h5f:
xps = np.array(h5f['XPSthermal'])
print(np.shape(xps))
if 'XASthermal' in h5f:
xas = np.array(h5f['XASthermal'])
print(np.shape(xas))
if 'RIXSthermal' in h5f:
rixs = np.array(h5f['RIXSthermal'])
print(np.shape(rixs))
if 'NIXSthermal' in h5f:
nixs = np.array(h5f['NIXSthermal'])
print(np.shape(nixs))
h5f.close()
print('Plot spectra...')
if 'ps' in locals():
print('Photo-emission spectroscopy (PS) spectrum')
fig = plt.figure()
# Sum over spin-orbitals
plt.plot(w,np.sum(ps,axis=0),'-k',label='photo-emission')
plt.legend()
plt.xlabel(r'$\omega$ (eV)')
plt.ylabel('Intensity')
#plt.xlim([-8,18])
#plt.ylim([0,0.25])
plt.tight_layout()
plt.show()
if 'xps' in locals():
print('X-ray photo-emission spectroscopy (XPS) spectrum')
fig = plt.figure()
# Sum over spin-orbitals
plt.plot(w,np.sum(xps,axis=0),'-k',label='XPS')
plt.legend()
plt.xlabel(r'$\omega$ (eV)')
plt.ylabel('Intensity')
#plt.xlim([-8,18])
#plt.ylim([0,0.25])
plt.tight_layout()
plt.show()
if 'xas' in locals():
print('XAS spectrum')
fig = plt.figure()
# Sum over polarizations
plt.plot(w,np.sum(xas[0:],axis=0),'-k',label='XAS')
if 'rixs' in locals():
scaleFY = 1./(pi*np.shape(rixs)[0])
print('Fluorescence yield spectrum')
plt.plot(wIn,(wLoss[1]-wLoss[0])*np.sum(rixs[0:],axis=(0,1,3))*scaleFY,
'-r',label='FY')
mask = wLoss < 0.2
y = np.sum(rixs[:, :, :, mask], axis=(0, 1, 3))
plt.plot(wIn, (wLoss[1] - wLoss[0]) * y * scaleFY, "-b", label="quasi-elastic FY")
plt.legend()
plt.xlabel(r'$\omega_{in}$ (eV)')
plt.ylabel('Intensity')
#plt.xlim([-8,18])
#plt.ylim([0,0.25])
plt.tight_layout()
plt.show()
if 'nixs' in locals():
print('NIXS spectrum')
fig = plt.figure()
if "qs" in locals():
labels = ["|q|={:3.1f}".format(np.linalg.norm(q)) + r" A$^{-1}$" for q in qs]
else:
labels = [str(i) for i in range(len(nixs))]
for i in range(len(nixs)):
plt.plot(wLoss,nixs[i,:], label=labels[i])
plt.legend()
plt.xlabel(r'$\omega_{loss}$ (eV)')
plt.ylabel('Intensity')
plt.tight_layout()
plt.show()
if 'rixs' in locals():
print('Energy loss spectra')
fig,axes = plt.subplots(nrows=2,sharex=True)
# L3-edge energies.
# Adjust these energies to the current material.
es = np.arange(-5 , -0 , 0.1)
plotOffset = 0.1
print('Chosen L3 energies: ', es)
print('Chosen plotOffset: ', plotOffset)
for n,e in enumerate(es[-1::-1]):
i = np.argmin(np.abs(wIn-e))
axes[0].plot(wLoss, plotOffset*(len(es)-1-n) + np.sum(rixs, axis=(0,1))[i,:],
label=r'$\omega_{in}$' + '={:3.1f}'.format(e))
# L2-edge energies.
# Adjust these energies to the current material.
es = np.arange( 13 , 17 , 0.1)
plotOffset = 0.1
print('Chosen L2 energies: ', es)
print('Chosen plotOffset: ', plotOffset)
for n,e in enumerate(es[-1::-1]):
i = np.argmin(np.abs(wIn-e))
axes[1].plot(wLoss,plotOffset*(len(es)-1-n) + np.sum(rixs,axis=(0,1))[i,:],
label=r'$\omega_{in}$' + '={:3.1f}'.format(e))
axes[1].set_xlabel(r'$E_{loss}$ (eV)')
axes[0].set_title(r'$L_3$')
axes[1].set_title(r'$L_2$')
for ax in axes:
ax.legend()
#plt.tight_layout()
plt.show()
if 'rixs' in locals():
print('RIXS map')
print('Plot log10 of RIXS intensity for better visibility.')
print('In-coming photon mesh resolution: {:5.3f} eV'.format(wIn[1]-wIn[0]))
print('Energy loss mesh resolution: {:5.3f} eV'.format(wLoss[1]-wLoss[0]))
plotCutOff = 0.001
# Sum over in and out-going polarizations
fig = plt.figure()
tmp = np.sum(rixs,axis=(0,1)).T
mask = tmp < plotCutOff
tmp[mask] = np.nan
# Choose a nice colormap, e.g. 'viridis' or 'Blues'
cs = plt.contourf(wIn,wLoss,np.log10(tmp),cmap=plt.get_cmap('viridis'))
#cs2 = plt.contour(cs, levels=cs.levels[::2], cmap=plt.get_cmap('viridis'))
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig.colorbar(cs)
cbar.ax.set_ylabel('log RIXS intensity')
# Add the contour line levels to the colorbar
#cbar.add_lines(cs2)
#for e in wIn:
# plt.plot([e,e],[wLoss[0],wLoss[-1]],'-k',lw=0.5)
plt.grid(c='k', ls='-', alpha=0.3)
plt.xlabel(r'$\omega_{in}$ (eV)')
plt.ylabel(r'$\omega_{loss}$ (eV)')
plt.tight_layout()
plt.show()
# All polarization combinations In:x,y,z , Out:x,y,z
fig, axes = plt.subplots(nrows=np.shape(rixs)[0], ncols=np.shape(rixs)[1], sharex=True, sharey=True)
if np.shape(rixs)[:2] == (1, 1):
tmp = np.copy(rixs[0, 0, :, :].T)
mask = tmp < plotCutOff
tmp[mask] = np.nan
# Choose a nice colormap, e.g. 'viridis' or 'Blues'
cs = axes.contourf(wIn,wLoss,np.log10(tmp),cmap=plt.get_cmap('viridis'))
#cs2 = plt.contour(cs, levels=cs.levels[::2], cmap=plt.get_cmap('viridis'))
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig.colorbar(cs, ax=axes)
cbar.ax.set_ylabel('log RIXS intensity')
# Add the contour line levels to the colorbar
#cbar.add_lines(cs2)
#for e in wIn:
# plt.plot([e,e],[wLoss[0],wLoss[-1]],'-k',lw=0.5)
plt.grid(c='k', ls='-', alpha=0.3)
axes.set_xlabel(r'$\omega_{in}$ (eV)')
axes.set_ylabel(r'$\omega_{loss}$ (eV)')
else:
for i in range(np.shape(axes)[0]):
for j in range(np.shape(axes)[1]):
tmp = np.copy(rixs[i,j,:,:].T)
mask = tmp < plotCutOff
tmp[mask] = np.nan
# Choose a nice colormap, e.g. 'viridis' or 'Blues'
cs = axes[i, j].contourf(wIn, wLoss, np.log10(tmp), cmap=plt.get_cmap("viridis"))
# cs2 = plt.contour(cs, levels=cs.levels[::2], cmap=plt.get_cmap('viridis'))
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig.colorbar(cs, ax=axes[i,j])
cbar.ax.set_ylabel('log RIXS intensity')
# Add the contour line levels to the colorbar
#cbar.add_lines(cs2)
#for e in wIn:
# plt.plot([e,e],[wLoss[0],wLoss[-1]],'-k',lw=0.5)
#plt.grid(c='k', ls='-', alpha=0.3)
axes[i,j].set_title('(' + str(i) + str(j) + ')')
for ax in axes[-1,:]:
ax.set_xlabel(r'$\omega_{in}$ (eV)')
for ax in axes[:,0]:
ax.set_ylabel(r'$\omega_{loss}$ (eV)')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Plot spectra')
parser.add_argument('--filename', type=str, default="spectra.h5",
help='Filename containing spectra.')
args = parser.parse_args()
if not os.path.isfile(args.filename):
raise Exception('Data file does not exist: ' + args.filename)
plot_spectra_in_file(args.filename)
| [
"numpy.sum",
"argparse.ArgumentParser",
"numpy.abs",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.linalg.norm",
"matplotlib.pyplot.tight_layout",
"numpy.copy",
"numpy.log10",
"matplotlib.pyplot.subplots",
"h5py.File",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cm... | [((343, 367), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (352, 367), False, 'import h5py\n'), ((8545, 8596), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot spectra"""'}), "(description='Plot spectra')\n", (8568, 8596), False, 'import argparse\n'), ((465, 483), 'numpy.array', 'np.array', (["h5f['w']"], {}), "(h5f['w'])\n", (473, 483), True, 'import numpy as np\n'), ((519, 539), 'numpy.array', 'np.array', (["h5f['wIn']"], {}), "(h5f['wIn'])\n", (527, 539), True, 'import numpy as np\n'), ((579, 601), 'numpy.array', 'np.array', (["h5f['wLoss']"], {}), "(h5f['wLoss'])\n", (587, 601), True, 'import numpy as np\n'), ((678, 701), 'numpy.array', 'np.array', (["h5f['qsNIXS']"], {}), "(h5f['qsNIXS'])\n", (686, 701), True, 'import numpy as np\n'), ((808, 826), 'numpy.array', 'np.array', (["h5f['r']"], {}), "(h5f['r'])\n", (816, 826), True, 'import numpy as np\n'), ((840, 863), 'numpy.array', 'np.array', (["h5f['RiNIXS']"], {}), "(h5f['RiNIXS'])\n", (848, 863), True, 'import numpy as np\n'), ((877, 900), 'numpy.array', 'np.array', (["h5f['RjNIXS']"], {}), "(h5f['RjNIXS'])\n", (885, 900), True, 'import numpy as np\n'), ((979, 1005), 'numpy.array', 'np.array', (["h5f['PSthermal']"], {}), "(h5f['PSthermal'])\n", (987, 1005), True, 'import numpy as np\n'), ((1076, 1103), 'numpy.array', 'np.array', (["h5f['XPSthermal']"], {}), "(h5f['XPSthermal'])\n", (1084, 1103), True, 'import numpy as np\n'), ((1175, 1202), 'numpy.array', 'np.array', (["h5f['XASthermal']"], {}), "(h5f['XASthermal'])\n", (1183, 1202), True, 'import numpy as np\n'), ((1276, 1304), 'numpy.array', 'np.array', (["h5f['RIXSthermal']"], {}), "(h5f['RIXSthermal'])\n", (1284, 1304), True, 'import numpy as np\n'), ((1379, 1407), 'numpy.array', 'np.array', (["h5f['NIXSthermal']"], {}), "(h5f['NIXSthermal'])\n", (1387, 1407), True, 'import numpy as np\n'), ((1583, 1595), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1593, 1595), True, 'import matplotlib.pyplot as plt\n'), ((1703, 1715), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1713, 1715), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1754), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega$ (eV)"""'], {}), "('$\\\\omega$ (eV)')\n", (1734, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1763, 1786), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {}), "('Intensity')\n", (1773, 1786), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1868), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1866, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1877, 1887), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1885, 1887), True, 'import matplotlib.pyplot as plt\n'), ((1995, 2007), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2005, 2007), True, 'import matplotlib.pyplot as plt\n'), ((2105, 2117), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2115, 2117), True, 'import matplotlib.pyplot as plt\n'), ((2126, 2156), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega$ (eV)"""'], {}), "('$\\\\omega$ (eV)')\n", (2136, 2156), True, 'import matplotlib.pyplot as plt\n'), ((2165, 2188), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {}), "('Intensity')\n", (2175, 2188), True, 'import matplotlib.pyplot as plt\n'), ((2252, 2270), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2268, 2270), True, 'import matplotlib.pyplot as plt\n'), ((2279, 2289), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2287, 2289), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2373), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2371, 2373), True, 'import matplotlib.pyplot as plt\n'), ((2910, 2922), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2920, 2922), True, 'import matplotlib.pyplot as plt\n'), ((2931, 2966), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega_{in}$ (eV)"""'], {}), "('$\\\\omega_{in}$ (eV)')\n", (2941, 2966), True, 'import matplotlib.pyplot as plt\n'), ((2975, 2998), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {}), "('Intensity')\n", (2985, 2998), True, 'import matplotlib.pyplot as plt\n'), ((3062, 3080), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3078, 3080), True, 'import matplotlib.pyplot as plt\n'), ((3089, 3099), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3097, 3099), True, 'import matplotlib.pyplot as plt\n'), ((3173, 3185), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3183, 3185), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3485), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3483, 3485), True, 'import matplotlib.pyplot as plt\n'), ((3494, 3531), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega_{loss}$ (eV)"""'], {}), "('$\\\\omega_{loss}$ (eV)')\n", (3504, 3531), True, 'import matplotlib.pyplot as plt\n'), ((3540, 3563), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {}), "('Intensity')\n", (3550, 3563), True, 'import matplotlib.pyplot as plt\n'), ((3572, 3590), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3588, 3590), True, 'import matplotlib.pyplot as plt\n'), ((3599, 3609), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3607, 3609), True, 'import matplotlib.pyplot as plt\n'), ((3694, 3728), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'sharex': '(True)'}), '(nrows=2, sharex=True)\n', (3706, 3728), True, 'import matplotlib.pyplot as plt\n'), ((3826, 3848), 'numpy.arange', 'np.arange', (['(-5)', '(-0)', '(0.1)'], {}), '(-5, -0, 0.1)\n', (3835, 3848), True, 'import numpy as np\n'), ((4309, 4331), 'numpy.arange', 'np.arange', (['(13)', '(17)', '(0.1)'], {}), '(13, 17, 0.1)\n', (4318, 4331), True, 'import numpy as np\n'), ((4900, 4910), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4908, 4910), True, 'import matplotlib.pyplot as plt\n'), ((5294, 5306), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5304, 5306), True, 'import matplotlib.pyplot as plt\n'), ((5963, 5997), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'c': '"""k"""', 'ls': '"""-"""', 'alpha': '(0.3)'}), "(c='k', ls='-', alpha=0.3)\n", (5971, 5997), True, 'import matplotlib.pyplot as plt\n'), ((6006, 6041), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega_{in}$ (eV)"""'], {}), "('$\\\\omega_{in}$ (eV)')\n", (6016, 6041), True, 'import matplotlib.pyplot as plt\n'), ((6050, 6087), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\omega_{loss}$ (eV)"""'], {}), "('$\\\\omega_{loss}$ (eV)')\n", (6060, 6087), True, 'import matplotlib.pyplot as plt\n'), ((6096, 6114), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6112, 6114), True, 'import matplotlib.pyplot as plt\n'), ((6123, 6133), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6131, 6133), True, 'import matplotlib.pyplot as plt\n'), ((8465, 8483), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8481, 8483), True, 'import matplotlib.pyplot as plt\n'), ((8492, 8502), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8500, 8502), True, 'import matplotlib.pyplot as plt\n'), ((1020, 1032), 'numpy.shape', 'np.shape', (['ps'], {}), '(ps)\n', (1028, 1032), True, 'import numpy as np\n'), ((1118, 1131), 'numpy.shape', 'np.shape', (['xps'], {}), '(xps)\n', (1126, 1131), True, 'import numpy as np\n'), ((1217, 1230), 'numpy.shape', 'np.shape', (['xas'], {}), '(xas)\n', (1225, 1230), True, 'import numpy as np\n'), ((1319, 1333), 'numpy.shape', 'np.shape', (['rixs'], {}), '(rixs)\n', (1327, 1333), True, 'import numpy as np\n'), ((1422, 1436), 'numpy.shape', 'np.shape', (['nixs'], {}), '(nixs)\n', (1430, 1436), True, 'import numpy as np\n'), ((1648, 1666), 'numpy.sum', 'np.sum', (['ps'], {'axis': '(0)'}), '(ps, axis=0)\n', (1654, 1666), True, 'import numpy as np\n'), ((2060, 2079), 'numpy.sum', 'np.sum', (['xps'], {'axis': '(0)'}), '(xps, axis=0)\n', (2066, 2079), True, 'import numpy as np\n'), ((2426, 2449), 'numpy.sum', 'np.sum', (['xas[0:]'], {'axis': '(0)'}), '(xas[0:], axis=0)\n', (2432, 2449), True, 'import numpy as np\n'), ((2763, 2806), 'numpy.sum', 'np.sum', (['rixs[:, :, :, mask]'], {'axis': '(0, 1, 3)'}), '(rixs[:, :, :, mask], axis=(0, 1, 3))\n', (2769, 2806), True, 'import numpy as np\n'), ((2819, 2906), 'matplotlib.pyplot.plot', 'plt.plot', (['wIn', '((wLoss[1] - wLoss[0]) * y * scaleFY)', '"""-b"""'], {'label': '"""quasi-elastic FY"""'}), "(wIn, (wLoss[1] - wLoss[0]) * y * scaleFY, '-b', label=\n 'quasi-elastic FY')\n", (2827, 2906), True, 'import matplotlib.pyplot as plt\n'), ((3422, 3466), 'matplotlib.pyplot.plot', 'plt.plot', (['wLoss', 'nixs[i, :]'], {'label': 'labels[i]'}), '(wLoss, nixs[i, :], label=labels[i])\n', (3430, 3466), True, 'import matplotlib.pyplot as plt\n'), ((5321, 5346), 'numpy.sum', 'np.sum', (['rixs'], {'axis': '(0, 1)'}), '(rixs, axis=(0, 1))\n', (5327, 5346), True, 'import numpy as np\n'), ((5502, 5515), 'numpy.log10', 'np.log10', (['tmp'], {}), '(tmp)\n', (5510, 5515), True, 'import numpy as np\n'), ((6364, 6391), 'numpy.copy', 'np.copy', (['rixs[0, 0, :, :].T'], {}), '(rixs[0, 0, :, :].T)\n', (6371, 6391), True, 'import numpy as np\n'), ((7070, 7104), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'c': '"""k"""', 'ls': '"""-"""', 'alpha': '(0.3)'}), "(c='k', ls='-', alpha=0.3)\n", (7078, 7104), True, 'import matplotlib.pyplot as plt\n'), ((4035, 4050), 'numpy.abs', 'np.abs', (['(wIn - e)'], {}), '(wIn - e)\n', (4041, 4050), True, 'import numpy as np\n'), ((4521, 4536), 'numpy.abs', 'np.abs', (['(wIn - e)'], {}), '(wIn - e)\n', (4527, 4536), True, 'import numpy as np\n'), ((5521, 5544), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (5533, 5544), True, 'import matplotlib.pyplot as plt\n'), ((6316, 6330), 'numpy.shape', 'np.shape', (['rixs'], {}), '(rixs)\n', (6324, 6330), True, 'import numpy as np\n'), ((6564, 6577), 'numpy.log10', 'np.log10', (['tmp'], {}), '(tmp)\n', (6572, 6577), True, 'import numpy as np\n'), ((6235, 6249), 'numpy.shape', 'np.shape', (['rixs'], {}), '(rixs)\n', (6243, 6249), True, 'import numpy as np\n'), ((6260, 6274), 'numpy.shape', 'np.shape', (['rixs'], {}), '(rixs)\n', (6268, 6274), True, 'import numpy as np\n'), ((6583, 6606), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (6595, 6606), True, 'import matplotlib.pyplot as plt\n'), ((7254, 7268), 'numpy.shape', 'np.shape', (['axes'], {}), '(axes)\n', (7262, 7268), True, 'import numpy as np\n'), ((7351, 7378), 'numpy.copy', 'np.copy', (['rixs[i, j, :, :].T'], {}), '(rixs[i, j, :, :].T)\n', (7358, 7378), True, 'import numpy as np\n'), ((2527, 2541), 'numpy.shape', 'np.shape', (['rixs'], {}), '(rixs)\n', (2535, 2541), True, 'import numpy as np\n'), ((2640, 2672), 'numpy.sum', 'np.sum', (['rixs[0:]'], {'axis': '(0, 1, 3)'}), '(rixs[0:], axis=(0, 1, 3))\n', (2646, 2672), True, 'import numpy as np\n'), ((3258, 3275), 'numpy.linalg.norm', 'np.linalg.norm', (['q'], {}), '(q)\n', (3272, 3275), True, 'import numpy as np\n'), ((4109, 4134), 'numpy.sum', 'np.sum', (['rixs'], {'axis': '(0, 1)'}), '(rixs, axis=(0, 1))\n', (4115, 4134), True, 'import numpy as np\n'), ((4594, 4619), 'numpy.sum', 'np.sum', (['rixs'], {'axis': '(0, 1)'}), '(rixs, axis=(0, 1))\n', (4600, 4619), True, 'import numpy as np\n'), ((7305, 7319), 'numpy.shape', 'np.shape', (['axes'], {}), '(axes)\n', (7313, 7319), True, 'import numpy as np\n'), ((7588, 7601), 'numpy.log10', 'np.log10', (['tmp'], {}), '(tmp)\n', (7596, 7601), True, 'import numpy as np\n'), ((7608, 7631), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (7620, 7631), True, 'import matplotlib.pyplot as plt\n')] |
""" 2d array of channels x time series for windowed time series"""
import numpy
import warnings
from pySPACE.resources.data_types import base
class TimeSeries(base.BaseData):
""" Time Series object
Represents a finite length time series consisting (potentially) of
several channels. Objects of this type are called "windows",
"epochs", or "trials" in other contexts.
Normally one channel corresponds to one sensor.
The time series object is a
2d array of channels times time series amplitudes
(*mandatory* first argument in constructor)
with some additional properties.
The additional properties are:
* channel_names (*mandatory* second argument in constructor,
list of stings without underscores)
* sampling_frequency (*mandatory* third argument in constructor,
e.g., 5000.0 for 5kHz)
* start_time (*optional*)
* end_time (*optional*)
* marker_name (the name of the marker used to create this object,
dictionary of included marker names and time stamps,
*optional*)
* name & tag (text format of object meta info, *optional*)
Channels can also be pseudo channels after spatial filtering.
When creating a TimSeries object, first the array has to be given to
the init function and then the other parameters/properties as
keyword arguments.
The array can be specified as two dimensional numpy array or in
list notation. The channels are on the second axes.
For example using the list ``[[1,2,3],[4,5,6]]`` would result in three
channels and two time points.
For accessing the array only without the meta information,
please use the command
.. code-block:: python
x = data.view(numpy.ndarray)
which hides this information.
TimeSeries objects are normally organized/collected in a
:class:`~pySPACE.resources.dataset_defs.time_series.TimeSeriesDataset`.
This type of dataset can be also used to generate the objects,
e.g., from csv files.
For data access in a node chain, data is loaded with a node from the
:mod:`~pySPACE.missions.nodes.source.time_series_source` module
as first node
and saved with the
:class:`~pySPACE.missions.nodes.sink.time_series_sink.TimeSeriesSinkNode`
as the last node.
It is also possible to create time series data from
not segmented data streams as described in the
:class:`~pySPACE.resources.dataset_defs.stream.StreamDataset`.
:Author: <NAME> (<EMAIL>)
:Created: 2008/03/05
:Completely Refactored: 2008/08/18
:BaseData compatibility: David Feess, 2010/09/27
"""
def __new__(subtype, input_array, channel_names, sampling_frequency,
start_time=None, end_time=None, name=None,
marker_name=None, tag=None):
if type(input_array) == dict:
data = []
for channel in channel_names:
data.append(input_array[channel])
input_array = numpy.array(data)
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = base.BaseData.__new__(subtype, numpy.atleast_2d(input_array))
if obj.ndim > 2:
input_array = obj[0]
obj = base.BaseData.__new__(subtype, input_array)
warnings.warn("To many dimensions for Time Series Object!")
# add subclasses attributes to the created instance
obj.channel_names = channel_names
try:
assert(len(channel_names) == obj.shape[1]),\
"Channel names (%s) do not match array dimensions (%s)! Fix this!" \
% (str(channel_names), str(obj.shape))
except:
warnings.warn(
"Array dimensions (%s) do not match channel names (len: %i, names: %s)! Fix this!"
% (str(obj.shape), len(channel_names), str(channel_names)))
obj.sampling_frequency = float(sampling_frequency)
obj.start_time = start_time
obj.end_time = end_time
obj.name = name
obj.marker_name = marker_name
if not tag is None:
obj.tag = tag
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
super(TimeSeries, self).__array_finalize__(obj)
if not obj is None and not type(obj)==numpy.ndarray:
self.channel_names_hash = getattr(obj, 'channel_names_hash', None)
self.sampling_frequency = getattr(obj, 'sampling_frequency', None)
self.start_time = getattr(obj, 'start_time', None)
self.end_time = getattr(obj, 'end_time', None)
self.name = getattr(obj, 'name', None)
self.marker_name = getattr(obj, 'marker_name', None)
else:
# TODO: do we need this part or the other one?
self.channel_names_hash = None
self.sampling_frequency = None
self.start_time = None
self.end_time = None
self.name = None
self.marker_name = None
def __reduce__(self):
# Refer to
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
# for infos about pickling ndarray subclasses
object_state = list(super(TimeSeries, self).__reduce__())
subclass_state = (self.channel_names, self.sampling_frequency,
self.start_time, self.end_time, self.name,
self.marker_name)
object_state[2].append(subclass_state)
object_state[2] = tuple(object_state[2])
return tuple(object_state)
def __setstate__(self, state):
if len(state) == 2: # For compatibility with old TS implementation
nd_state, own_state = state
numpy.ndarray.__setstate__(self, nd_state)
else: # len == 3: new BaseData timeseries.
nd_state, base_state, own_state = state
super(TimeSeries, self).__setstate__((nd_state, base_state))
(self.channel_names, self.sampling_frequency, self.start_time,
self.end_time, self.name, self.marker_name) = own_state
@staticmethod
def _generate_tag(obj):
"""generate new tag based on time series attributes start_time,
end_time and name. The name is usually a sentence, with the last word
indicating the class. """
# if no information present: return None
if getattr(obj, 'name', None) == None and \
getattr(obj, 'start_time', None) == None and \
getattr(obj, 'end_time', None) == None:
return None
else:
# If attribute name is provided, the last word should represent class:
if getattr(obj, 'name', None) == None:
class_name = 'na'
else:
class_name = obj.name.split(' ')[-1]
if getattr(obj, 'start_time', None) == None:
start = 'na'
else:
start = str(int(obj.start_time))
if getattr(obj, 'end_time', None) == None:
end = 'na'
else:
end = str(int(obj.end_time))
return 'Epoch Start: %sms; End: %sms; Class: %s' % \
(start, end, class_name)
# In order to reduce the memory footprint, we do not store the channel
# names once per instance but only once per occurence. Instead we store a
# unique hash once per instance that allows to retrieve the channel names
channel_names_dict = {}
def get_channel_names(self):
return TimeSeries.channel_names_dict[self.channel_names_hash]
def set_channel_names(self, channel_names):
self.channel_names_hash = hash(str(channel_names))
if not TimeSeries.channel_names_dict.has_key(self.channel_names_hash):
TimeSeries.channel_names_dict[self.channel_names_hash] = channel_names
def del_channel_names(self):
pass
channel_names = property(get_channel_names, set_channel_names,
del_channel_names,
"The channel_names property.")
@staticmethod
def replace_data(old, data, **kwargs):
""" Create a new time series with the given data but the old metadata.
A factory method which creates a time series object with the given
data and the metadata from the old time_series
"""
data = TimeSeries(data,
channel_names=kwargs.get('channel_names',
old.channel_names),
sampling_frequency=kwargs.get('sampling_frequency',
old.sampling_frequency),
start_time=kwargs.get('start_time', old.start_time),
end_time=kwargs.get('end_time', old.end_time),
name=kwargs.get('name', old.name),
marker_name=kwargs.get('marker_name', old.marker_name))
data.inherit_meta_from(old)
if "tag" in kwargs.keys():
data.tag=kwargs["tag"]
return data
def get_channel(self, channel_name):
""" Return the values of the channel with name *channel_name* """
channel_index = self.channel_names.index(channel_name)
data = self.view(numpy.ndarray)
return data[:, channel_index]
def reorder(self, ordered_channel_list):
""" Reorder TimeSeries according to ordered_channel_list
This function takes the list given as argument as list of channel names,
orders the given TimeSeries object according to this list and returns a
reordered TimeSeries object.
"""
for elem in ordered_channel_list:
assert elem in self.channel_names, \
"TimeSeries:: Reordering impossible. %s is not present in original data!" % elem
current_pos=ordered_channel_list.index(elem)
#if True, the lines are swapped
if current_pos is not self.channel_names.index(elem):
old=self[:, current_pos]
self[:, current_pos]=self[:, self.channel_names.index(elem)]
self[:, self.channel_names.index(elem)]=old
self.channel_names=ordered_channel_list
def _ms_to_samples(self, ms):
return ms/1000.0*self.sampling_frequency
def _samples_to_ms(self, samples):
return samples/float(self.sampling_frequency)*1000
def __str__(self):
str_repr = "TimeSeriesObject \nChannel_names: "
str_repr+= str(self.channel_names)
str_repr+= "\n"
# str_repr+=str(self.view(numpy.ndarray))
va = self.view(numpy.ndarray)
for index, channel_name in enumerate(self.channel_names):
str_repr += "%s : %s \n" % (channel_name, va[:,index])
str_repr+="\n"
return str_repr
def __eq__(self,other):
""" Same channels (names) and values """
if not type(self) == type(other):
return False
if not set(self.channel_names) == set(other.channel_names):
return False
if not self.shape == other.shape:
return False
if self.channel_names == other.channel_names:
return numpy.allclose(self.view(numpy.ndarray), other.view(numpy.ndarray))
else:
# Comparison by hand
for channel in self.channel_names:
if not numpy.allclose((self[self.channel_names.index(channel)],
other[other.channel_names.index(channel)])):
return False
return True
| [
"numpy.ndarray.__setstate__",
"pySPACE.resources.data_types.base.BaseData.__new__",
"numpy.array",
"warnings.warn",
"numpy.atleast_2d"
] | [((3052, 3069), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (3063, 3069), False, 'import numpy\n'), ((3220, 3249), 'numpy.atleast_2d', 'numpy.atleast_2d', (['input_array'], {}), '(input_array)\n', (3236, 3249), False, 'import numpy\n'), ((3327, 3370), 'pySPACE.resources.data_types.base.BaseData.__new__', 'base.BaseData.__new__', (['subtype', 'input_array'], {}), '(subtype, input_array)\n', (3348, 3370), False, 'from pySPACE.resources.data_types import base\n'), ((3383, 3442), 'warnings.warn', 'warnings.warn', (['"""To many dimensions for Time Series Object!"""'], {}), "('To many dimensions for Time Series Object!')\n", (3396, 3442), False, 'import warnings\n'), ((5878, 5920), 'numpy.ndarray.__setstate__', 'numpy.ndarray.__setstate__', (['self', 'nd_state'], {}), '(self, nd_state)\n', (5904, 5920), False, 'import numpy\n')] |
"""
Suppose you have a bar with n seats in a row. Unfriendly people arrive
and seat themselves randomly. Since they are unfriendly, they will not
sit next to anyone who is already seated.
What is the expected occupancy fraction when no one else can be seated?
"""
import numpy as np
from fractions import Fraction
EMPTY = 0 #seat is empty
UNAVAILABLE = 1 #seat is empty but unavailable
OCCUPIED = 2 #seat is occupied
def occupancy_fraction(seat_status):
"""Compute the fraction of seats occupied in an array of seat statuses."""
return (seat_status==OCCUPIED).mean() if len(seat_status) > 0 else 0
def occupancy(seat_status):
"""Compute the number of occupied seats in an array of seat statuses."""
return (seat_status==OCCUPIED).sum()
def seat_people(num_seats):
"""Simulate seating people in n seats.
Returns a 1xn array of seat statuses (EMPTY, UNAVAILABLE, or OCCUPIED).
Loop through people until no seats are left. For each iteration,
check the seat statuses to see if any are empty. If so, choose a
random seat from the empty ones, and update the status of the chosen
seat and its adjacent seats.
Use numpy array indexing to check which seats are available and to
choose a random available seat on each iteration. This takes O(n)
time per iteration because both the loop test and the random choice
require accessing all n seat statuses in the array.
Thus, one simulation runs in time O(n^2).
"""
#Seats are numbered 0 to num_seats-1
seats = np.arange(num_seats)
#Using the fact that EMPTY=0
seat_status = np.zeros(num_seats, dtype=np.int)
#While a seat is available, put the next person in a random seat
while any(seat_status==EMPTY):
#Choose a random seat from those available, and sit there
seat = np.random.choice(seats[seat_status==EMPTY])
seat_status[seat] = OCCUPIED
#Mark any adjacent EMPTY seats as unavailable
if seat > 0 and seat_status[seat-1] == EMPTY:
seat_status[seat-1] = UNAVAILABLE
if seat < num_seats-1 and seat_status[seat+1] == EMPTY:
seat_status[seat+1] = UNAVAILABLE
return seat_status
def seat_people_faster(num_seats):
"""Simulate seating people in n seats using a rejection algorithm.
Returns a 1xn array of seat statuses (EMPTY, UNAVAILABLE, or OCCUPIED).
Assume we randomly assign n seats to n people without replacement,
so each person from 1 to n has a unique randomly assigned seat
between 1 and n. Each person then approaches the bar and attempts
to sit in their assigned seat. If the seat is unavailable (i.e. taken
or adjacent to a seat that is taken), the person leaves, and the next
person approaches. This algorithm will take O(n) time to run one simulation.
"""
#Assign each person fom 0 to n-1 a unique seat using a random permutation.
seat_choices = np.random.permutation(num_seats)
#Create an array to store the seat statuses.
#Using the fact that EMPTY=0
seat_status = np.zeros(num_seats, dtype=np.int)
#Keep track of the number of remaining available seats
remaining_seats = num_seats
#Go through the chosen seats and seat each person if possible,
#until we run out of available seats.
for seat in seat_choices:
#If the seat is available, seat the person.
#Otherwise, don't do anything, and move onto the next person.
if seat_status[seat] == EMPTY:
seat_status[seat] = OCCUPIED
remaining_seats -=1
#Mark any adjacent EMPTY seats as unavailable
if seat > 0 and seat_status[seat-1] == EMPTY:
seat_status[seat-1] = UNAVAILABLE
remaining_seats -=1
if seat < num_seats-1 and seat_status[seat+1] == EMPTY:
seat_status[seat+1] = UNAVAILABLE
remaining_seats -=1
#If there ar no seats left, we're done
if remaining_seats == 0:
break
return seat_status
def seat_people_recursive(num_seats):
"""Simulate seating people in n seats using a recursive algorithm.
Returns a 1xn array of seat statuses (EMPTY, UNAVAILABLE, or OCCUPIED).
This runs in O(n) time, assuming numpy.random.randint(k) runs in
constant time for any k.
"""
#Using the fact that EMPTY=0
seat_status = np.zeros(num_seats, dtype=np.int)
_seat_subarray(seat_status)
return seat_status
def _seat_subarray(seat_status):
"""Recursively choose a random seat, then seat people to the left and to the right.
The array seat_status is assumed to be completely empty.
"""
if len(seat_status) == 0:
return
#Choose a random seat and sit there
seat = np.random.randint(len(seat_status))
seat_status[seat] = OCCUPIED
#print(seat_status, seat)
#Seat people to the left, if there are seats to the left
if seat > 0:
seat_status[seat-1] = UNAVAILABLE
_seat_subarray(seat_status[:seat-1])
#Seat people to the right, if there are seats to the right
if seat < len(seat_status)-1:
seat_status[seat+1] = UNAVAILABLE
_seat_subarray(seat_status[seat+2:])
def estimate_expected_occupancy_fraction(num_seats, num_trials, seating_function=seat_people_faster):
"""Run num_trials simulations to estimate the expected occupancy fraction
for seating people in num_seats seats.
As n goes to infinity, the occupancy fraction appears to converge to
a value near 43.2%
"""
occupancy_sum = 0
for trial in range(num_trials):
seat_statuses = seating_function(num_seats)
occupancy_sum += occupancy_fraction(seat_statuses)
return occupancy_sum / num_trials
def expected_occupancy(n, all=False):
"""Compute the exact expected final occupancy for (or up to) n seats.
Uses memoized recursion, runs in O(n^2) time.
The values up to 12 are:
{0: '0 = 0.0000',
1: '1 = 1.0000',
2: '1 = 1.0000',
3: '5/3 = 1.6667',
4: '2 = 2.0000',
5: '37/15 = 2.4667',
6: '26/9 = 2.8889',
7: '349/105 = 3.3238',
8: '169/45 = 3.7556',
9: '11873/2835 = 4.1880',
10: '7277/1575 = 4.6203',
11: '157567/31185 = 5.0527',
12: '233249/42525 = 5.4850'}
"""
memo = [-1 for _ in range(n+1)]
_recursive_expected_occupancy(n, memo)
memo[0] = Fraction(0)
if all:
#The value for n-1 seats was not needed to compute the value
#for n seats (because it skips directly to n-2), so fill it in.
_recursive_expected_occupancy(n-1,memo)
return memo
else:
return memo[n]
def _recursive_expected_occupancy(n, memo):
"""memo should have n+1 indices: 0,1,...,n"""
if n <= 0:# or n > len(memo):
return 0
if memo[n] == -1:
numerator = sum([_recursive_expected_occupancy(k-2,memo)+_recursive_expected_occupancy(n-k-1,memo) for k in range(1,n+1)])
memo[n] = 1 + Fraction(numerator, n)
return memo[n]
def expected_occupancy_fraction(n, all=False):
"""Compute the expected final occupancy fraction for (or up to) n seats.
The values up to 12 are:
{0: '0 = 0.0000',
1: '1 = 1.0000',
2: '1/2 = 0.5000',
3: '5/9 = 0.5556',
4: '1/2 = 0.5000',
5: '37/75 = 0.4933',
6: '13/27 = 0.4815',
7: '349/735 = 0.4748',
8: '169/360 = 0.4694',
9: '11873/25515 = 0.4653',
10: '7277/15750 = 0.4620',
11: '157567/343035 = 0.4593',
12: '233249/510300 = 0.4571'}
"""
e_occupancy = expected_occupancy(n,all)
if all:
return [e_occupancy[k] / max(k,1) for k in range(n+1)]
else:
return e_occupancy / max(n,1)
def main():
#Print exact expected occupancy for n=0,1,...,12
print("Expected occupancies up to n=12:\n", {n: f'{str(x)} = {float(x):6.4f}' for n,x in enumerate(expected_occupancy(12,True))})
#Print exact expected occupancy fractions for n=0,1,...,12
print("\nExpected occupancy fractions up to n=12:\n", {n: f'{str(x)} = {float(x):6.4f}' for n,x in enumerate(expected_occupancy_fraction(12,True))})
#Print estimated expected occupancy fractions for n=0,1,...,12
print("\nEstimated expected occupancy fractions up to n=12:\n", {n: f'{x:6.4f}' for n,x in enumerate(estimate_expected_occupancy_fraction(n,1000) for n in range(12+1))})
if __name__=="__main__":
main()
| [
"numpy.zeros",
"numpy.arange",
"numpy.random.choice",
"numpy.random.permutation",
"fractions.Fraction"
] | [((1527, 1547), 'numpy.arange', 'np.arange', (['num_seats'], {}), '(num_seats)\n', (1536, 1547), True, 'import numpy as np\n'), ((1599, 1632), 'numpy.zeros', 'np.zeros', (['num_seats'], {'dtype': 'np.int'}), '(num_seats, dtype=np.int)\n', (1607, 1632), True, 'import numpy as np\n'), ((2917, 2949), 'numpy.random.permutation', 'np.random.permutation', (['num_seats'], {}), '(num_seats)\n', (2938, 2949), True, 'import numpy as np\n'), ((3050, 3083), 'numpy.zeros', 'np.zeros', (['num_seats'], {'dtype': 'np.int'}), '(num_seats, dtype=np.int)\n', (3058, 3083), True, 'import numpy as np\n'), ((4386, 4419), 'numpy.zeros', 'np.zeros', (['num_seats'], {'dtype': 'np.int'}), '(num_seats, dtype=np.int)\n', (4394, 4419), True, 'import numpy as np\n'), ((6394, 6405), 'fractions.Fraction', 'Fraction', (['(0)'], {}), '(0)\n', (6402, 6405), False, 'from fractions import Fraction\n'), ((1819, 1864), 'numpy.random.choice', 'np.random.choice', (['seats[seat_status == EMPTY]'], {}), '(seats[seat_status == EMPTY])\n', (1835, 1864), True, 'import numpy as np\n'), ((6982, 7004), 'fractions.Fraction', 'Fraction', (['numerator', 'n'], {}), '(numerator, n)\n', (6990, 7004), False, 'from fractions import Fraction\n')] |
import ROOT
def _get_outfile(outfile):
if isinstance(outfile, ROOT.TFile):
outfile.cd()
return outfile, False
else:
fout = ROOT.TFile.Open(outfile, 'RECREATE')
return fout, True
def _printout(verbose, msg):
if verbose:
print(msg)
_dtypemap = {'int8': 'B',
'uint8': 'b',
'int16': 'S',
'uint16': 's',
'int32': 'I',
'uint32': 'i',
'float': 'F',
'halffloat': 'f',
'double': 'D',
'int64': 'L',
'uint64': 'l',
'bool': 'O'}
def _check_type_in_map(dtype, msg):
if dtype not in _dtypemap:
raise ValueError(msg)
def _setup_branch_scalar(field, tree, numpybufs, stringvars):
import numpy
if field.type == 'string':
stringvars.add(field.name)
# dummy string
s0 = ROOT.std.string()
tree.Branch(field.name, s0)
else:
_check_type_in_map(field.type,
f'Field {field.name} has type "{field.type}" that is not supported')
numpybufs[field.name] = numpy.zeros(shape=[1], dtype=field.type.to_pandas_dtype())
tree.Branch(field.name, numpybufs[field.name], field.name+'/'+_dtypemap[field.type])
def _setup_branch_list(field, tree, vectorlens, stringarrs):
import numpy
if field.type.value_type == 'string':
# vector of strings
sv0 = ROOT.std.vector(ROOT.std.string)()
stringarrs[field.name] = sv0
tree.Branch(field.name, sv0)
else:
_check_type_in_map(field.type.value_type,
f'Field {field.name} is array of type "{field.type.value_type}" that is not supported')
# Apache Arrow spec allows array lengths to be *signed* 64 bit integers,
# but ROOT tends to complain (e.g. RDataFrame) if array lengths are longer than 32 bits
vectorlens[field.name] = numpy.zeros(shape=[1], dtype='int32')
tree.Branch(f'{field.name}_parquet_n', vectorlens[field.name], f'{field.name}_parquet_n/I')
# temp array for initialization
v0 = numpy.zeros(shape=[1], dtype=field.type.value_type.to_pandas_dtype())
tree.Branch(field.name, v0,
f'{field.name}[{field.name}_parquet_n]/{_dtypemap[field.type.value_type]}')
def _do_fill(tree, entry, table, numpyzips, stringvars, vectorlens, stringarrs):
ptrs = []
for target, source in numpyzips:
target[0] = source[entry]
for branch in stringvars:
s0 = ROOT.std.string(table[branch][entry].as_py())
tree.SetBranchAddress(branch, s0)
ptrs.append(s0)
for branch, lentarget, source in vectorlens:
values_arrow = source[entry]
lentarget[0] = len(values_arrow)
# Booleans don't work with zero copy but everything else should
values = values_arrow.values.to_numpy(zero_copy_only=False)
ptrs.append(values)
tree.SetBranchAddress(branch, values)
for branch, vec in stringarrs.items():
vec.clear()
for string in table[branch][entry].as_py():
vec.push_back(string)
tree.Fill()
def normalize_parquet(infiles):
'''Convert infiles argument to list; verify schema match across all files'''
import pyarrow.parquet as pq
import io
# convert to a list
if isinstance(infiles, str) or isinstance(infiles, io.IOBase):
lfiles = [infiles]
else:
try:
lfiles = list(infiles)
except TypeError: # pragma: no cover
# This really shouldn't be hit, but maybe there's an edge case
lfiles = [infiles]
schema = pq.read_schema(lfiles[0])
for f in lfiles[1:]:
schema2 = pq.read_schema(f)
if schema != schema2:
raise ValueError(f"Mismatched Parquet schemas between {infiles[0]} and {f}")
return lfiles, schema
def parquet_to_root_pyroot(infiles, outfile, treename='parquettree',
verbose=False):
import pyarrow.parquet as pq
import pyarrow
# Interpret files
infiles, schema = normalize_parquet(infiles)
fout, local_root_file_creation = _get_outfile(outfile)
tree = ROOT.TTree(treename, 'Parquet tree')
tree.SetAutoSave(0)
# Buffers for primitive types
numpybufs = {}
# Buffers for lengths of list types
vectorlens = {}
# Strings need to be treated differently due to memory layout
# These variables are strings
stringvars = set()
# Vectors of strings
stringarrs = {}
_printout(verbose, 'Translating branches:')
for branch in schema.names:
field = schema.field(branch)
_printout(verbose, f'{field.name}, {field.type}')
if field.type.num_fields == 0:
# primitive types
_setup_branch_scalar(field, tree, numpybufs, stringvars)
elif field.type.num_fields == 1 and isinstance(field.type, pyarrow.lib.ListType):
# lists of a single type
_setup_branch_list(field, tree, vectorlens, stringarrs)
else:
raise ValueError(f'Cannot translate field "{branch}" of input Parquet schema. Field is described as {field.type}')
# Fill loop
for infile in infiles:
table = pq.read_table(infile)
numpyzips = []
vectorzips = []
for branch, numpybuf in numpybufs.items():
numpyzips.append((numpybuf, table[branch].to_numpy()))
for branch, lenvec in vectorlens.items():
vectorzips.append((branch, lenvec, table[branch]))
for entry in range(len(table)):
_do_fill(tree, entry, table, numpyzips, stringvars, vectorzips, stringarrs)
tree.Write()
if local_root_file_creation:
fout.Close()
if __name__ == '__main__': # pragma: no cover
import sys
import os
foutname = os.path.basename(os.path.splitext(sys.argv[1])[0])+'.root'
parquet_to_root_pyroot(sys.argv[1], foutname,
'parquettree' if len(sys.argv) < 3 else sys.argv[2],
True)
| [
"pyarrow.parquet.read_schema",
"numpy.zeros",
"ROOT.std.string",
"os.path.splitext",
"ROOT.TFile.Open",
"pyarrow.parquet.read_table",
"ROOT.TTree",
"ROOT.std.vector"
] | [((3667, 3692), 'pyarrow.parquet.read_schema', 'pq.read_schema', (['lfiles[0]'], {}), '(lfiles[0])\n', (3681, 3692), True, 'import pyarrow.parquet as pq\n'), ((4209, 4245), 'ROOT.TTree', 'ROOT.TTree', (['treename', '"""Parquet tree"""'], {}), "(treename, 'Parquet tree')\n", (4219, 4245), False, 'import ROOT\n'), ((157, 193), 'ROOT.TFile.Open', 'ROOT.TFile.Open', (['outfile', '"""RECREATE"""'], {}), "(outfile, 'RECREATE')\n", (172, 193), False, 'import ROOT\n'), ((900, 917), 'ROOT.std.string', 'ROOT.std.string', ([], {}), '()\n', (915, 917), False, 'import ROOT\n'), ((1941, 1978), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '[1]', 'dtype': '"""int32"""'}), "(shape=[1], dtype='int32')\n", (1952, 1978), False, 'import numpy\n'), ((3736, 3753), 'pyarrow.parquet.read_schema', 'pq.read_schema', (['f'], {}), '(f)\n', (3750, 3753), True, 'import pyarrow.parquet as pq\n'), ((5263, 5284), 'pyarrow.parquet.read_table', 'pq.read_table', (['infile'], {}), '(infile)\n', (5276, 5284), True, 'import pyarrow.parquet as pq\n'), ((1447, 1479), 'ROOT.std.vector', 'ROOT.std.vector', (['ROOT.std.string'], {}), '(ROOT.std.string)\n', (1462, 1479), False, 'import ROOT\n'), ((5872, 5901), 'os.path.splitext', 'os.path.splitext', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (5888, 5901), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
PRA second-order code -- algorithms only
Modified on Tuesday June 15, 2021
@authors: <NAME> and <NAME>
"""
import numpy as np
import scipy
import scipy.linalg
from scipy.sparse import csr_matrix
class Cone():
"""
Product of second order cones
"""
def __init__(self,dim):
self.dim = dim
self.r = len(dim)
sumdim = np.insert(self.dim.cumsum(),0,0)
barcomp = np.ndarray((0,sumdim[-1]))
self.sdim = np.delete(sumdim,len(sumdim)-1)
for i in range(self.r):
indx = np.arange(self.dim[i])+self.sdim[i]
rowi = np.zeros(sum(self.dim))
rowi[indx[1:]] = 1
barcomp = np.vstack((barcomp,rowi))
self.barcomp = csr_matrix(barcomp) # matrix to extract the "bar" components of a vector in the cone
def eigenvalues(self,x):
if len(x)!=np.sum(self.dim):
print('mismatch dimensions')
return
else:
norms = (self.barcomp@(x**2))**0.5 # norms of the bar components
eigval = np.kron(x[self.sdim],[1,1])+np.kron(norms,[-1,1])
normalizer = self.barcomp.T@norms
normalizer[normalizer > 0] = 1/normalizer[normalizer>0]
eigvec = x*normalizer
eigvec[self.sdim] = 1
return eigval,eigvec
def Umu(self,g,mu,x0):
# Finds armgmin g'x + 0.5*mu*\|x-x0\|^2 over the secondplex
gg = g/mu - x0
eigval,eigvec = self.eigenvalues(gg)
ll = softmax(-eigval)
u = np.diag(self.barcomp.T@(np.kron(np.eye(self.r),[-1,1])@ll))@eigvec
u[self.sdim] = np.kron(np.eye(self.r),[1,1])@ll
return u/2
def PRA(A, AA, K, z0, aggressive = True, RescalingLimit = 50):
""" Projection and Rescaling Algorithm
*Inputs:*
A and AA : Matrices such that L = null(A) and Lperp = null(AA)
K : Direct product of second-order cones
z0 : Initial point in K
aggressive : Boolean variable to enable agressive rescaling heuristic
RescalingLimit : Upper bound on the number of rescaling rounds
*Outputs:*
feas : Feasibility status of the problem identified by the algorithm.
feas = 1 when found an interior point xL in L \cap K
feas = 2 when found an interior point xLperp in L \cap K
feas = -1 when reached RescalingLimit without solving either problem
xL : Solution found in L\cap K
xLperp : Solution found in Lperp \cap K
k : Number of rescaling rounds
Total : Total number of iterations
"""
Q,R = np.linalg.qr(AA.T)
P = Q@Q.T
QQ, R = np.linalg.qr(A.T)
PP = QQ@QQ.T
# Initialization*
Total = 0; k = 0; feas = 0
zz0 = z0
D = np.eye(len(z0)); DD = D
while (feas == 0) and (k <= RescalingLimit):
# ** Basic procedure phase (smooth perceptron algorithm)
# Primal
y, z, totalitrsmooth, feasprimal = basic(K, P, z0, 0.2)
Total += totalitrsmooth
# Dual
yLperp, zLperp, totalitrsmooth,feasdual = basic(K, PP, zz0, 0.2)
Total += totalitrsmooth
# ** Check if basic procedure succeeded
if feasprimal:
feas = 1
xL = np.linalg.solve(D,y)
xLperp = xL*0
return feas,xL,xLperp,k,Total
if feasdual:
feas = 2
xLperp = np.linalg.solve(DD,yLperp)
xL = xLperp*0
return feas,xL,xLperp,k,Total
# ** Rescaling phase -- primal
B = rescale(K,P,z,aggressive)
D = B@D
# Update the projection matrix after rescaling
Q,R = np.linalg.qr(D@AA.T)
P = Q@Q.T
# ** Rescaling phase -- dual
B = rescale(K,PP,zLperp,aggressive)
DD = B@DD
# Update the projection matrix after rescaling
QQ,R = np.linalg.qr(DD@A.T)
PP = QQ@QQ.T
k = k+1
if (k > RescalingLimit):
print('Could not finish within '+str(RescalingLimit)+' iterations')
feas = -1 ; xL = 0*z0; xLperp = 0*z0
return feas,xL,xLperp,k,Total
def rescale(K,P,z,aggressive):
""" Compute rescaling matrix
"""
Pzplus,ev = K.eigenvalues(P@z)
Pzplus = Pzplus*(Pzplus>0)
onePz = np.sum(Pzplus)
B=np.ndarray((0,0))
for i in range(K.r):
# Update the rescaling matrix in i-th block
indx = np.arange(K.dim[i])+K.sdim[i]
eps = onePz/z[K.sdim[i]]
if eps < 1:
if aggressive: # rescale aggressively
eps = eps/K.r
zblock = z[indx]/z[K.sdim[i]]
Bblk = Bblock(zblock,eps)
B = scipy.linalg.block_diag(B,Bblk)
else:
B = scipy.linalg.block_diag(B,np.eye(K.dim[i]))
return B
def Bblock(z,eps):
# Construct the ith block of the rescaling matrix
a = ((2/eps-1)**0.5-1)/2
v = a*z; v[0] = v[0]+1
v = v/((1+2*a*(a+1)*eps)**(0.5))
R = -np.eye(len(v))
R[0,0]=1
B = 2*np.tensordot(v, v, axes=0)-(v[0]**2-np.dot(v[1:],v[1:]))*R
return B
def basic(K,P,u0,epsilon=0.5):
"""
Smooth perceptron for a second-order conic system L \cap K
*Inputs:*
P : The projection matrix onto L
u0 : Initial solution in Delta(K),
epsilon : An upper bound on the rescaling condition in ||(Pz)+||_1/max(z)<= epsilon ;
*Output:*
y : Pu,
z : A solution satisfying either Pz in int(K) or the rescaling condition
sum(max(Pz,0)) <= epsilon*lmax(z),
k : Number of iterations taken by the smooth perceptron algorithm
feas: binary variable to indicate if y is in the cone
"""
# Initialization*
k = 0 ; mu = 2; u = u0 ;
y = P@u; z = K.Umu(y,mu,u0); w = P@z
lw,ew = K.eigenvalues(w) ; lz,ew = K.eigenvalues(z) ; ly,ey = K.eigenvalues(y)
# Smooth perceptron updates
while (np.sum(lw*(lw>0))/np.max(lz) > epsilon) and (np.sum(ly < 0) > 0):
theta = 2/(k+3);
u = (1-theta)*(u+theta*z) + (theta**2)*K.Umu(y,mu,u0)
mu = (1-theta)*mu
y = P@u
z = (1-theta)*z + theta*K.Umu(y,mu,u0)
w = P@z
lw,ew = K.eigenvalues(w) ; lz,ew = K.eigenvalues(z) ; ly,ey = K.eigenvalues(y)
k = k+1
if k > 0:
k = k-1
feas = (np.sum(ly<0) == 0)
return y,z,k,feas
def softmax(g):
"""
Finds argmin 0.5*\|x\|^2-g'*x over the standard simplex
Think of x as a 'softmax' of g. The vector x is a discrete distribution:
x = (g-lmin)^+ where lmin is so that sum(x) == 1.
The vector x is concentrated on a single component if that component
of g is sufficiently larger than all of the others.
"""
lmin = np.max(g) - 1
lmax = lmin + (np.sum((g>lmin)*(g-lmin))-1)/np.sum(g>lmin)
while lmax > lmin+1e-10 :
lmin = lmax - (1-np.sum((g>lmax)*(g-lmax)))/np.sum(g>lmax)
lmax = lmin + (np.sum((g>lmin)*(g-lmin))-1)/np.sum(g>lmin)
x=np.where(g>lmin,g-lmin,0)
return x
def NaiveInstance(m,n):
"""
Generate random A and AA naively so that null(A) and null(AA)
are orthogonal complements of each other.
"""
M = np.random.randn(n,n)
q,r = np.linalg.qr(M)
A = q[0:m,:]
AA = q[m:n,:]
return A, AA
def ControlledInstance(m, x, K):
"""
Generate random instances with controlled condition measures
Given x \in K, "matrix_controlledcondition" generates instance A
such that x is the most interior solutions to null(A)\cap K.
Generate also AA so that null(AA) is the orthogonal complement of null(A)
*Inputs:*
m : Number of rows ;
n : Number of columns ;
x : The most interior solution. Assume \|x\|_\inf = 1 ;
*Outputs:*
A: random balanced matrix A such that Ax = 0 ;
AA: balanced matrix such that null(AA) is the orthogonal complement of null(A)
"""
n = len(x)
r = K.r
A = np.random.randn(m-1,n)
A = A - np.tensordot(A@x,x,axes=0)/(x.T@x)
lx,ev = K.eigenvalues(x)
if (np.min(lx)<=0):
print('provided x is not in the cone')
return
# compute norms of blocks
nx = np.kron(np.eye(r),np.ones(2))@lx**2
lu = nx*0
indx = (nx==np.max(nx)).nonzero()[0]
lu[indx] = np.random.rand(len(indx))
lu = K.r*lu/sum(abs(lu))
u = np.zeros(n)
xinv = -x
xinv[K.sdim]=x[K.sdim]
for i in range(K.r):
indx = np.arange(K.dim[i])+K.sdim[i]
u[indx] = x[indx]*lu[i]
xinv[indx]=xinv[indx]/(xinv[indx[0]]**2-np.dot(xinv[indx[1:]],xinv[indx[1:]]))
A = np.vstack((A,u.T-xinv.T))
Q,R = np.linalg.qr(A.T,mode='complete') ;
A = Q[:,0:m].T
B = Q[:,m:n].T
return A,B
| [
"numpy.sum",
"numpy.eye",
"numpy.random.randn",
"numpy.tensordot",
"scipy.linalg.block_diag",
"numpy.linalg.qr",
"numpy.zeros",
"numpy.ones",
"numpy.max",
"numpy.where",
"scipy.sparse.csr_matrix",
"numpy.min",
"numpy.arange",
"numpy.kron",
"numpy.dot",
"numpy.linalg.solve",
"numpy.nd... | [((2720, 2738), 'numpy.linalg.qr', 'np.linalg.qr', (['AA.T'], {}), '(AA.T)\n', (2732, 2738), True, 'import numpy as np\n'), ((2767, 2784), 'numpy.linalg.qr', 'np.linalg.qr', (['A.T'], {}), '(A.T)\n', (2779, 2784), True, 'import numpy as np\n'), ((4431, 4445), 'numpy.sum', 'np.sum', (['Pzplus'], {}), '(Pzplus)\n', (4437, 4445), True, 'import numpy as np\n'), ((4452, 4470), 'numpy.ndarray', 'np.ndarray', (['(0, 0)'], {}), '((0, 0))\n', (4462, 4470), True, 'import numpy as np\n'), ((7148, 7179), 'numpy.where', 'np.where', (['(g > lmin)', '(g - lmin)', '(0)'], {}), '(g > lmin, g - lmin, 0)\n', (7156, 7179), True, 'import numpy as np\n'), ((7350, 7371), 'numpy.random.randn', 'np.random.randn', (['n', 'n'], {}), '(n, n)\n', (7365, 7371), True, 'import numpy as np\n'), ((7381, 7396), 'numpy.linalg.qr', 'np.linalg.qr', (['M'], {}), '(M)\n', (7393, 7396), True, 'import numpy as np\n'), ((8098, 8123), 'numpy.random.randn', 'np.random.randn', (['(m - 1)', 'n'], {}), '(m - 1, n)\n', (8113, 8123), True, 'import numpy as np\n'), ((8496, 8507), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (8504, 8507), True, 'import numpy as np\n'), ((8751, 8779), 'numpy.vstack', 'np.vstack', (['(A, u.T - xinv.T)'], {}), '((A, u.T - xinv.T))\n', (8760, 8779), True, 'import numpy as np\n'), ((8787, 8821), 'numpy.linalg.qr', 'np.linalg.qr', (['A.T'], {'mode': '"""complete"""'}), "(A.T, mode='complete')\n", (8799, 8821), True, 'import numpy as np\n'), ((458, 485), 'numpy.ndarray', 'np.ndarray', (['(0, sumdim[-1])'], {}), '((0, sumdim[-1]))\n', (468, 485), True, 'import numpy as np\n'), ((772, 791), 'scipy.sparse.csr_matrix', 'csr_matrix', (['barcomp'], {}), '(barcomp)\n', (782, 791), False, 'from scipy.sparse import csr_matrix\n'), ((3815, 3837), 'numpy.linalg.qr', 'np.linalg.qr', (['(D @ AA.T)'], {}), '(D @ AA.T)\n', (3827, 3837), True, 'import numpy as np\n'), ((4032, 4054), 'numpy.linalg.qr', 'np.linalg.qr', (['(DD @ A.T)'], {}), '(DD @ A.T)\n', (4044, 4054), True, 'import numpy as np\n'), ((6491, 6505), 'numpy.sum', 'np.sum', (['(ly < 0)'], {}), '(ly < 0)\n', (6497, 6505), True, 'import numpy as np\n'), ((6901, 6910), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (6907, 6910), True, 'import numpy as np\n'), ((8208, 8218), 'numpy.min', 'np.min', (['lx'], {}), '(lx)\n', (8214, 8218), True, 'import numpy as np\n'), ((723, 749), 'numpy.vstack', 'np.vstack', (['(barcomp, rowi)'], {}), '((barcomp, rowi))\n', (732, 749), True, 'import numpy as np\n'), ((907, 923), 'numpy.sum', 'np.sum', (['self.dim'], {}), '(self.dim)\n', (913, 923), True, 'import numpy as np\n'), ((3379, 3400), 'numpy.linalg.solve', 'np.linalg.solve', (['D', 'y'], {}), '(D, y)\n', (3394, 3400), True, 'import numpy as np\n'), ((3532, 3559), 'numpy.linalg.solve', 'np.linalg.solve', (['DD', 'yLperp'], {}), '(DD, yLperp)\n', (3547, 3559), True, 'import numpy as np\n'), ((4562, 4581), 'numpy.arange', 'np.arange', (['K.dim[i]'], {}), '(K.dim[i])\n', (4571, 4581), True, 'import numpy as np\n'), ((4832, 4864), 'scipy.linalg.block_diag', 'scipy.linalg.block_diag', (['B', 'Bblk'], {}), '(B, Bblk)\n', (4855, 4864), False, 'import scipy\n'), ((5189, 5215), 'numpy.tensordot', 'np.tensordot', (['v', 'v'], {'axes': '(0)'}), '(v, v, axes=0)\n', (5201, 5215), True, 'import numpy as np\n'), ((6122, 6136), 'numpy.sum', 'np.sum', (['(ly < 0)'], {}), '(ly < 0)\n', (6128, 6136), True, 'import numpy as np\n'), ((6963, 6979), 'numpy.sum', 'np.sum', (['(g > lmin)'], {}), '(g > lmin)\n', (6969, 6979), True, 'import numpy as np\n'), ((8134, 8164), 'numpy.tensordot', 'np.tensordot', (['(A @ x)', 'x'], {'axes': '(0)'}), '(A @ x, x, axes=0)\n', (8146, 8164), True, 'import numpy as np\n'), ((8333, 8342), 'numpy.eye', 'np.eye', (['r'], {}), '(r)\n', (8339, 8342), True, 'import numpy as np\n'), ((8343, 8353), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (8350, 8353), True, 'import numpy as np\n'), ((8590, 8609), 'numpy.arange', 'np.arange', (['K.dim[i]'], {}), '(K.dim[i])\n', (8599, 8609), True, 'import numpy as np\n'), ((588, 610), 'numpy.arange', 'np.arange', (['self.dim[i]'], {}), '(self.dim[i])\n', (597, 610), True, 'import numpy as np\n'), ((1098, 1127), 'numpy.kron', 'np.kron', (['x[self.sdim]', '[1, 1]'], {}), '(x[self.sdim], [1, 1])\n', (1105, 1127), True, 'import numpy as np\n'), ((1126, 1149), 'numpy.kron', 'np.kron', (['norms', '[-1, 1]'], {}), '(norms, [-1, 1])\n', (1133, 1149), True, 'import numpy as np\n'), ((1663, 1677), 'numpy.eye', 'np.eye', (['self.r'], {}), '(self.r)\n', (1669, 1677), True, 'import numpy as np\n'), ((4920, 4936), 'numpy.eye', 'np.eye', (['K.dim[i]'], {}), '(K.dim[i])\n', (4926, 4936), True, 'import numpy as np\n'), ((5225, 5245), 'numpy.dot', 'np.dot', (['v[1:]', 'v[1:]'], {}), '(v[1:], v[1:])\n', (5231, 5245), True, 'import numpy as np\n'), ((6077, 6098), 'numpy.sum', 'np.sum', (['(lw * (lw > 0))'], {}), '(lw * (lw > 0))\n', (6083, 6098), True, 'import numpy as np\n'), ((6095, 6105), 'numpy.max', 'np.max', (['lz'], {}), '(lz)\n', (6101, 6105), True, 'import numpy as np\n'), ((6934, 6965), 'numpy.sum', 'np.sum', (['((g > lmin) * (g - lmin))'], {}), '((g > lmin) * (g - lmin))\n', (6940, 6965), True, 'import numpy as np\n'), ((7060, 7076), 'numpy.sum', 'np.sum', (['(g > lmax)'], {}), '(g > lmax)\n', (7066, 7076), True, 'import numpy as np\n'), ((7127, 7143), 'numpy.sum', 'np.sum', (['(g > lmin)'], {}), '(g > lmin)\n', (7133, 7143), True, 'import numpy as np\n'), ((8700, 8738), 'numpy.dot', 'np.dot', (['xinv[indx[1:]]', 'xinv[indx[1:]]'], {}), '(xinv[indx[1:]], xinv[indx[1:]])\n', (8706, 8738), True, 'import numpy as np\n'), ((7033, 7064), 'numpy.sum', 'np.sum', (['((g > lmax) * (g - lmax))'], {}), '((g > lmax) * (g - lmax))\n', (7039, 7064), True, 'import numpy as np\n'), ((7098, 7129), 'numpy.sum', 'np.sum', (['((g > lmin) * (g - lmin))'], {}), '((g > lmin) * (g - lmin))\n', (7104, 7129), True, 'import numpy as np\n'), ((8391, 8401), 'numpy.max', 'np.max', (['nx'], {}), '(nx)\n', (8397, 8401), True, 'import numpy as np\n'), ((1597, 1611), 'numpy.eye', 'np.eye', (['self.r'], {}), '(self.r)\n', (1603, 1611), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import random
import time
def bubble_sort(items):
for i in range(len(items)):
for j in range(len(items)-1-i):
if items[j] > items[j+1]:
items[j], items[j+1] = items[j+1], items[j]
def selection_sort(items):
for i in range(len(items)-1, 0, -1):
posOfMax = 0
for j in range(1, i+1):
if items[i] > items[posOfMax]:
posOfMax = i
temp = items[i]
items[i] = items[posOfMax]
items[posOfMax] = temp
def insertion_sort(items):
for i in range(1, len(items)):
j = i
while j > 0 and items[j] < items[j-1]:
items[j], items[j-1] = items[j-1], items[j]
j -= 1
def quick_sort(items):
if len(items) > 1:
pivot_index = int(len(items) / 2)
smaller_items = []
larger_items = []
for i, val in enumerate(items):
if i != pivot_index:
if val < items[pivot_index]:
smaller_items.append(val)
else:
larger_items.append(val)
quick_sort(smaller_items)
quick_sort(larger_items)
items[:] = smaller_items + [items[pivot_index]] + larger_items
if __name__ == '__main__':
x = np.array([10, 20, 30, 40])
one = random.sample(range(-500, 500), k=x[0])
two = random.sample(range(-5000, 5000), k=x[1])
three = random.sample(range(-50000, 50000), k=x[2])
four = random.sample(range(-500000, 500000), k=x[3])
plt.plot(x, x)
plt.plot(x, x*np.log(x))
plt.plot(x, x**2)
plt.xticks(x)
plt.xlabel('x')
plt.ylabel('y')
plt.legend(['n', 'nlogn', 'n2'], loc='upper left')
plt.savefig('func.png', dpi=250)
circle = {'1': one,
'2': two,
'3': three,
'4': four}
times = {}
for key in circle:
print(key)
print(circle[key], times, time.time())
print(key)
start = time.time()
bubble_sort(circle[key])
times['b' + key] = time.time() - start
start = time.time()
selection_sort(circle[key])
times['s' + key] = time.time() - start
start = time.time()
insertion_sort(circle[key])
times['i' + key] = time.time() - start
start = time.time()
quick_sort(circle[key])
times['q' + key] = time.time() - start
bubble = np.array([times['b1'],
times['b2'],
times['b3'],
times['b4']])
selection = np.array([times['s1'],
times['s2'],
times['s3'],
times['s4']])
insertion = np.array([times['i1'],
times['i2'],
times['i3'],
times['i4']])
quick = np.array([times['i1'],
times['i2'],
times['i3'],
times['i4']])
plt.plot(x, bubble)
plt.plot(x, selection)
plt.plot(x, insertion)
plt.plot(x, quick)
plt.xticks(x)
plt.xlabel('x')
plt.ylabel('y')
plt.legend(['bubble', 'selection', 'insertion', 'quick'], loc='upper left')
plt.savefig('sort.png', dpi=250)
| [
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"time.time",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel"
] | [((1333, 1359), 'numpy.array', 'np.array', (['[10, 20, 30, 40]'], {}), '([10, 20, 30, 40])\n', (1341, 1359), True, 'import numpy as np\n'), ((1585, 1599), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x'], {}), '(x, x)\n', (1593, 1599), True, 'import matplotlib.pyplot as plt\n'), ((1633, 1652), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(x ** 2)'], {}), '(x, x ** 2)\n', (1641, 1652), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1669), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x'], {}), '(x)\n', (1666, 1669), True, 'import matplotlib.pyplot as plt\n'), ((1674, 1689), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1684, 1689), True, 'import matplotlib.pyplot as plt\n'), ((1694, 1709), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1704, 1709), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1764), 'matplotlib.pyplot.legend', 'plt.legend', (["['n', 'nlogn', 'n2']"], {'loc': '"""upper left"""'}), "(['n', 'nlogn', 'n2'], loc='upper left')\n", (1724, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1769, 1801), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""func.png"""'], {'dpi': '(250)'}), "('func.png', dpi=250)\n", (1780, 1801), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2542), 'numpy.array', 'np.array', (["[times['b1'], times['b2'], times['b3'], times['b4']]"], {}), "([times['b1'], times['b2'], times['b3'], times['b4']])\n", (2488, 2542), True, 'import numpy as np\n'), ((2628, 2690), 'numpy.array', 'np.array', (["[times['s1'], times['s2'], times['s3'], times['s4']]"], {}), "([times['s1'], times['s2'], times['s3'], times['s4']])\n", (2636, 2690), True, 'import numpy as np\n'), ((2785, 2847), 'numpy.array', 'np.array', (["[times['i1'], times['i2'], times['i3'], times['i4']]"], {}), "([times['i1'], times['i2'], times['i3'], times['i4']])\n", (2793, 2847), True, 'import numpy as np\n'), ((2938, 3000), 'numpy.array', 'np.array', (["[times['i1'], times['i2'], times['i3'], times['i4']]"], {}), "([times['i1'], times['i2'], times['i3'], times['i4']])\n", (2946, 3000), True, 'import numpy as np\n'), ((3072, 3091), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'bubble'], {}), '(x, bubble)\n', (3080, 3091), True, 'import matplotlib.pyplot as plt\n'), ((3096, 3118), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'selection'], {}), '(x, selection)\n', (3104, 3118), True, 'import matplotlib.pyplot as plt\n'), ((3123, 3145), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'insertion'], {}), '(x, insertion)\n', (3131, 3145), True, 'import matplotlib.pyplot as plt\n'), ((3150, 3168), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'quick'], {}), '(x, quick)\n', (3158, 3168), True, 'import matplotlib.pyplot as plt\n'), ((3174, 3187), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x'], {}), '(x)\n', (3184, 3187), True, 'import matplotlib.pyplot as plt\n'), ((3192, 3207), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (3202, 3207), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3227), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (3222, 3227), True, 'import matplotlib.pyplot as plt\n'), ((3232, 3307), 'matplotlib.pyplot.legend', 'plt.legend', (["['bubble', 'selection', 'insertion', 'quick']"], {'loc': '"""upper left"""'}), "(['bubble', 'selection', 'insertion', 'quick'], loc='upper left')\n", (3242, 3307), True, 'import matplotlib.pyplot as plt\n'), ((3312, 3344), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""sort.png"""'], {'dpi': '(250)'}), "('sort.png', dpi=250)\n", (3323, 3344), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2053), 'time.time', 'time.time', ([], {}), '()\n', (2051, 2053), False, 'import time\n'), ((2151, 2162), 'time.time', 'time.time', ([], {}), '()\n', (2160, 2162), False, 'import time\n'), ((2263, 2274), 'time.time', 'time.time', ([], {}), '()\n', (2272, 2274), False, 'import time\n'), ((2375, 2386), 'time.time', 'time.time', ([], {}), '()\n', (2384, 2386), False, 'import time\n'), ((1618, 1627), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1624, 1627), True, 'import numpy as np\n'), ((1993, 2004), 'time.time', 'time.time', ([], {}), '()\n', (2002, 2004), False, 'import time\n'), ((2114, 2125), 'time.time', 'time.time', ([], {}), '()\n', (2123, 2125), False, 'import time\n'), ((2226, 2237), 'time.time', 'time.time', ([], {}), '()\n', (2235, 2237), False, 'import time\n'), ((2338, 2349), 'time.time', 'time.time', ([], {}), '()\n', (2347, 2349), False, 'import time\n'), ((2446, 2457), 'time.time', 'time.time', ([], {}), '()\n', (2455, 2457), False, 'import time\n')] |
import pytest
import numpy as np
import wagedyn as wd
from scipy.misc import derivative
# content of test_sample.py
def test_utility():
p = wd.Parameters()
p.tax_lambda = 0.9
p.tax_tau = 1.1
pref = wd.Preferences(p)
input_w = np.linspace(0.01, 10, 1000)
input_u = pref.utility(input_w)
assert np.allclose(
pref.inv_utility(pref.utility(input_w)),
input_w), \
"Utility function and its inverse utility are not compatible"
assert np.allclose(
pref.utility_1d(input_w),
derivative(pref.utility, input_w, dx=1e-7)), \
"Utility function and its derivative are not compatible"
assert np.allclose(
pref.inv_utility_1d(input_u),
derivative(pref.inv_utility, input_u, dx=1e-7)), \
"Inverse utility function and its derivative are not compatible"
def test_effort():
p = wd.Parameters()
pref = wd.Preferences(p)
delta_grid = np.linspace( p.efcost_sep , 1, 100)
cp = derivative(pref.effort_cost, delta_grid, dx=1e-7)
delta_hat = pref.inv_effort_cost_1d(cp)
assert np.allclose(delta_grid, delta_hat), \
"Inverse of derivative of effort cost effort cost are not compatible"
def test_taxes():
p1 = wd.Parameters()
p2 = wd.Parameters()
p2.tax_lambda = 0.9
p2.tax_tau = 1.2
pref = wd.Preferences(p2)
input_w = np.linspace(0.01, 10, 1000)
assert np.allclose(
pref.utility(input_w),
(p1.u_a * np.power( 1.2 * np.power(input_w, 0.9), (1.0- p1.u_rho)) - p1.u_b)/ (1-p1.u_rho)) , \
"applying tax parameter does not work"
if __name__ == '__main__':
unittest.main() | [
"scipy.misc.derivative",
"numpy.power",
"numpy.allclose",
"wagedyn.Parameters",
"wagedyn.Preferences",
"numpy.linspace"
] | [((150, 165), 'wagedyn.Parameters', 'wd.Parameters', ([], {}), '()\n', (163, 165), True, 'import wagedyn as wd\n'), ((233, 250), 'wagedyn.Preferences', 'wd.Preferences', (['p'], {}), '(p)\n', (247, 250), True, 'import wagedyn as wd\n'), ((270, 297), 'numpy.linspace', 'np.linspace', (['(0.01)', '(10)', '(1000)'], {}), '(0.01, 10, 1000)\n', (281, 297), True, 'import numpy as np\n'), ((1014, 1029), 'wagedyn.Parameters', 'wd.Parameters', ([], {}), '()\n', (1027, 1029), True, 'import wagedyn as wd\n'), ((1045, 1062), 'wagedyn.Preferences', 'wd.Preferences', (['p'], {}), '(p)\n', (1059, 1062), True, 'import wagedyn as wd\n'), ((1085, 1118), 'numpy.linspace', 'np.linspace', (['p.efcost_sep', '(1)', '(100)'], {}), '(p.efcost_sep, 1, 100)\n', (1096, 1118), True, 'import numpy as np\n'), ((1135, 1185), 'scipy.misc.derivative', 'derivative', (['pref.effort_cost', 'delta_grid'], {'dx': '(1e-07)'}), '(pref.effort_cost, delta_grid, dx=1e-07)\n', (1145, 1185), False, 'from scipy.misc import derivative\n'), ((1249, 1283), 'numpy.allclose', 'np.allclose', (['delta_grid', 'delta_hat'], {}), '(delta_grid, delta_hat)\n', (1260, 1283), True, 'import numpy as np\n'), ((1412, 1427), 'wagedyn.Parameters', 'wd.Parameters', ([], {}), '()\n', (1425, 1427), True, 'import wagedyn as wd\n'), ((1441, 1456), 'wagedyn.Parameters', 'wd.Parameters', ([], {}), '()\n', (1454, 1456), True, 'import wagedyn as wd\n'), ((1525, 1543), 'wagedyn.Preferences', 'wd.Preferences', (['p2'], {}), '(p2)\n', (1539, 1543), True, 'import wagedyn as wd\n'), ((1562, 1589), 'numpy.linspace', 'np.linspace', (['(0.01)', '(10)', '(1000)'], {}), '(0.01, 10, 1000)\n', (1573, 1589), True, 'import numpy as np\n'), ((626, 669), 'scipy.misc.derivative', 'derivative', (['pref.utility', 'input_w'], {'dx': '(1e-07)'}), '(pref.utility, input_w, dx=1e-07)\n', (636, 669), False, 'from scipy.misc import derivative\n'), ((844, 891), 'scipy.misc.derivative', 'derivative', (['pref.inv_utility', 'input_u'], {'dx': '(1e-07)'}), '(pref.inv_utility, input_u, dx=1e-07)\n', (854, 891), False, 'from scipy.misc import derivative\n'), ((1702, 1724), 'numpy.power', 'np.power', (['input_w', '(0.9)'], {}), '(input_w, 0.9)\n', (1710, 1724), True, 'import numpy as np\n')] |
import os
import re
import pandas
import string
import numpy as np
import gensim.models.keyedvectors as word2vec
from mlxtend.preprocessing import one_hot
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import PCA
stop_words = stopwords.words('english')
embeddings_index = dict()
dimensions = {
"IMDB": 52,
"ProcCons": 28,
'MR': 28,
'SST-1': 28,
'SST-2': 28,
'SUBJ': 28,
'TREC': 28
}
def clean_str(s):
s = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", s)
s = re.sub(r"\'s", " \'s", s)
s = re.sub(r"\'ve", " \'ve", s)
s = re.sub(r"n\'t", " n\'t", s)
s = re.sub(r"\'re", " \'re", s)
s = re.sub(r"\'d", " \'d", s)
s = re.sub(r"\'ll", " \'ll", s)
s = re.sub(r",", " , ", s)
s = re.sub(r"!", " ! ", s)
s = re.sub(r"\(", " \( ", s)
s = re.sub(r"\)", " \) ", s)
s = re.sub(r"\?", " \? ", s)
s = re.sub(r"\s{2,}", " ", s)
s.strip('\"')
s.strip('\'')
# split into words
tokens = word_tokenize(s)
# convert to lower case
tokens = [w.lower() for w in tokens]
# remove punctuation from each word
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in stripped if word.isalpha()]
# filter out stop words
return [w for w in words if not w in stop_words]
def prepare_x(x_text, dimension):
vec = CountVectorizer(tokenizer=clean_str)
vec.fit_transform(x_text)
total_empty = 0
vocab_names = vec.get_feature_names()
vocab_values = np.zeros((len(vocab_names), 300))
for i in range(len(vocab_names)):
embedding_vector = embeddings_index.get(vocab_names[i])
if embedding_vector is not None:
vocab_values[i] = embedding_vector
else:
total_empty += 1
pca = PCA(n_components=dimension)
vocab = pca.fit_transform(vocab_values)
x_final = [clean_str(sent) for sent in x_text]
X = np.zeros((len(x_final), 1, dimension, dimension))
for i in range(len(x_final)):
x = x_final[i]
text = np.zeros((dimension, dimension))
for j in range(dimension):
if j < len(x):
if x[j] in vocab_names:
text[j] = vocab[vocab_names.index(x[j])]
else:
break
X[i][0] = text
return X, total_empty
def load_imdb(folder, output, dimension=dimensions['IMDB']):
x_text = list()
y_text = list()
for file in os.listdir(folder + '/test/pos'):
review_file = open(folder + '/test/pos/' + file, 'r', encoding='utf-8')
x_text.append(review_file.readline())
y_text.append(1)
review_file.close()
for file in os.listdir(folder + '/test/neg'):
review_file = open(folder + '/test/neg/' + file, 'r', encoding='utf-8')
x_text.append(review_file.readline())
y_text.append(0)
review_file.close()
for file in os.listdir(folder + '/train/pos'):
review_file = open(folder + '/train/pos/' + file, 'r', encoding='utf-8')
x_text.append(review_file.readline())
y_text.append(1)
review_file.close()
for file in os.listdir(folder + '/train/neg'):
review_file = open(folder + '/train/neg/' + file, 'r', encoding='utf-8')
x_text.append(review_file.readline())
y_text.append(0)
review_file.close()
# Generate X
X, total_empty = prepare_x(x_text, dimension)
Y = y_text
np.save(output + '/X', X)
np.save(output + '/Y', Y)
f = open(output + '/empty_words', 'w+')
f.write(str(total_empty))
f.close()
print('imdb done')
def load_pc(pos, neg, output, dimension=dimensions['ProcCons']):
positive_examples = list(open(pos, encoding='utf-8').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(neg, encoding='utf-8').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = negative_examples + positive_examples
# Generate X
X, total_empty = prepare_x(x_text, dimension)
# Generate labels
negative_labels = [0 for _ in negative_examples]
positive_labels = [1 for _ in positive_examples]
Y = np.concatenate([negative_labels, positive_labels], 0)
np.save(output + '/X', X)
np.save(output + '/Y', Y)
f = open(output + '/empty_words', 'w+')
f.write(str(total_empty))
f.close()
print('cr done')
def load_mr(pos, neg, output, dimension=dimensions['MR']):
positive_examples = list(open(pos, encoding='latin-1').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(neg, encoding='latin-1').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = negative_examples + positive_examples
# Generate X
X, total_empty = prepare_x(x_text, dimension)
# Generate labels
negative_labels = [0 for _ in negative_examples]
positive_labels = [1 for _ in positive_examples]
Y = np.concatenate([negative_labels, positive_labels], 0)
np.save(output + '/X', X)
np.save(output + '/Y', Y)
f = open(output + '/empty_words', 'w+')
f.write(str(total_empty))
f.close()
print('mr done')
def load_sst1(train, dev, test, output, dimension=dimensions['SST-1']):
x_text = list()
y_text = list()
# Split by words
for line in [line.split(',', 1) for line in open(train, encoding='utf-8').readlines()]:
y_text.append(int(line[0])-1)
x_text.append(line[1])
for line in [line.split(',', 1) for line in open(dev, encoding='utf-8').readlines()]:
y_text.append(int(line[0])-1)
x_text.append(line[1])
for line in [line.split(',', 1) for line in open(test, encoding='utf-8').readlines()]:
y_text.append(int(line[0])-1)
x_text.append(line[1])
# Generate X
X, total_empty = prepare_x(x_text, dimension)
# Generate labels
Y = y_text
np.save(output + '/X', X)
np.save(output + '/Y', Y)
f = open(output + '/empty_words', 'w+')
f.write(str(total_empty))
f.close()
print('sst1 done')
def load_sst2(train, dev, test, output, dimension=dimensions['SST-2']):
x_text = list()
y_text = list()
# Split by words
for line in [line.split(',', 1) for line in open(train, encoding='utf-8').readlines()]:
y_text.append(int(line[0])-1)
x_text.append(line[1])
for line in [line.split(',', 1) for line in open(dev, encoding='utf-8').readlines()]:
y_text.append(int(line[0])-1)
x_text.append(line[1])
for line in [line.split(',', 1) for line in open(test, encoding='utf-8').readlines()]:
y_text.append(int(line[0])-1)
x_text.append(line[1])
# Generate X
X, total_empty = prepare_x(x_text, dimension)
# Generate labels
Y = y_text
np.save(output + '/X', X)
np.save(output + '/Y', Y)
f = open(output + '/empty_words', 'w+')
f.write(str(total_empty))
f.close()
print('sst2 done')
def load_subj(pos, neg, output, dimension=dimensions['SUBJ']):
positive_examples = list(open(pos, encoding='latin-1').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(neg, encoding='latin-1').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = negative_examples + positive_examples
# Generate X
X, total_empty = prepare_x(x_text, dimension)
# Generate labels
negative_labels = [0 for _ in negative_examples]
positive_labels = [1 for _ in positive_examples]
Y = np.concatenate([negative_labels, positive_labels], 0)
np.save(output + '/X', X)
np.save(output + '/Y', Y)
f = open(output + '/empty_words', 'w+')
f.write(str(total_empty))
f.close()
print('subj done')
def load_trec(dev, test, output, dimension=dimensions['TREC']):
categories = {'ABBR':0, 'ENTY':1, 'DESC':2, 'HUM':3, 'LOC':4, 'NUM':5}
x_text = list()
y_text = list()
# Split by words
for line in [line.split(' ', 1) for line in open(dev, encoding='utf-8').readlines()]:
i = line[0].split(':')
y_text.append(categories[i[0]])
x_text.append(line[1])
for line in [line.split(' ', 1) for line in open(test, encoding='utf-8').readlines()]:
i = line[0].split(':')
y_text.append(categories[i[0]])
x_text.append(line[1])
# Generate X
X, total_empty = prepare_x(x_text, dimension)
# Generate labels
Y = y_text
np.save(output + '/X', X)
np.save(output + '/Y', Y)
f = open(output + '/empty_words', 'w+')
f.write(str(total_empty))
f.close()
print('trec done')
def load_glove(file_name):
f = open(file_name, encoding='utf-8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(embeddings_index))
def load_word2vec(file_name):
word2vecDict = word2vec.KeyedVectors.load_word2vec_format(file_name, binary=True)
for word in word2vecDict.wv.vocab:
embeddings_index[word] = word2vecDict.word_vec(word)
print('Loaded %s word vectors.' % len(embeddings_index))
def load_fasttext(file_name):
f = open(file_name, encoding='utf-8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(embeddings_index))
def data_prepare(name, input='./datasets', output='./datasets_prepared'):
print(name)
if name == 'glove':
load_glove('embeddings/glove.6B.300d.txt')
elif name == 'word2vec':
load_word2vec('embeddings/GoogleNews-vectors-negative300.bin')
elif name == 'fasttext':
load_fasttext('embeddings/wiki-news-300d-1M.vec')
load_imdb(input + '/IMDB/IMDB', output + '/IMDB' + '/' + name)
load_pc(input + '/ProcCons/ProcCons/IntegratedPros.txt', input + '/ProcCons/ProcCons/IntegratedCons.txt', output + '/ProcCons' + '/' + name)
load_mr(input + '/MR/MR/rt-polarity.pos', input + '/MR/MR/rt-polarity.neg', output + '/MR' + '/' + name)
load_sst1(input + '/SST-1/train.csv', input + '/SST-1/dev.csv', input + '/SST-1/test.csv', output + '/SST-1' + '/' + name)
load_sst2(input + '/SST-2/train.csv', input + '/SST-2/dev.csv', input + '/SST-2/test.csv', output + '/SST-2' + '/' + name)
load_subj(input + '/SUBJ/Subj/plot.tok.gt9.5000', input + '/SUBJ/Subj/quote.tok.gt9.5000', output + '/SUBJ' + '/' + name)
load_trec(input + '/TREC/TREC/train_5500.label.txt', input + '/TREC/TREC/TREC_10.label.txt', output + '/TREC' + '/' + name)
data_prepare('glove')
data_prepare('word2vec')
data_prepare('fasttext')
| [
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.save",
"numpy.concatenate",
"numpy.asarray",
"numpy.zeros",
"gensim.models.keyedvectors.KeyedVectors.load_word2vec_format",
"sklearn.decomposition.PCA",
"nltk.corpus.stopwords.words",
"re.sub",
"os.listdir",
"nltk.tokenize.word_tokenize"
... | [((341, 367), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (356, 367), False, 'from nltk.corpus import stopwords\n'), ((556, 597), 're.sub', 're.sub', (['"""[^A-Za-z0-9(),!?\\\\\'\\\\`]"""', '""" """', 's'], {}), '("[^A-Za-z0-9(),!?\\\\\'\\\\`]", \' \', s)\n', (562, 597), False, 'import re\n'), ((605, 629), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" \'s"""', 's'], {}), '("\\\\\'s", " \'s", s)\n', (611, 629), False, 'import re\n'), ((639, 665), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" \'ve"""', 's'], {}), '("\\\\\'ve", " \'ve", s)\n', (645, 665), False, 'import re\n'), ((675, 701), 're.sub', 're.sub', (['"""n\\\\\'t"""', '""" n\'t"""', 's'], {}), '("n\\\\\'t", " n\'t", s)\n', (681, 701), False, 'import re\n'), ((711, 737), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" \'re"""', 's'], {}), '("\\\\\'re", " \'re", s)\n', (717, 737), False, 'import re\n'), ((747, 771), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" \'d"""', 's'], {}), '("\\\\\'d", " \'d", s)\n', (753, 771), False, 'import re\n'), ((781, 807), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" \'ll"""', 's'], {}), '("\\\\\'ll", " \'ll", s)\n', (787, 807), False, 'import re\n'), ((817, 838), 're.sub', 're.sub', (['""","""', '""" , """', 's'], {}), "(',', ' , ', s)\n", (823, 838), False, 'import re\n'), ((848, 869), 're.sub', 're.sub', (['"""!"""', '""" ! """', 's'], {}), "('!', ' ! ', s)\n", (854, 869), False, 'import re\n'), ((879, 904), 're.sub', 're.sub', (['"""\\\\("""', '""" \\\\( """', 's'], {}), "('\\\\(', ' \\\\( ', s)\n", (885, 904), False, 'import re\n'), ((912, 937), 're.sub', 're.sub', (['"""\\\\)"""', '""" \\\\) """', 's'], {}), "('\\\\)', ' \\\\) ', s)\n", (918, 937), False, 'import re\n'), ((945, 970), 're.sub', 're.sub', (['"""\\\\?"""', '""" \\\\? """', 's'], {}), "('\\\\?', ' \\\\? ', s)\n", (951, 970), False, 'import re\n'), ((978, 1003), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 's'], {}), "('\\\\s{2,}', ' ', s)\n", (984, 1003), False, 'import re\n'), ((1077, 1093), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['s'], {}), '(s)\n', (1090, 1093), False, 'from nltk.tokenize import word_tokenize\n'), ((1552, 1588), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'tokenizer': 'clean_str'}), '(tokenizer=clean_str)\n', (1567, 1588), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1981, 2008), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'dimension'}), '(n_components=dimension)\n', (1984, 2008), False, 'from sklearn.decomposition import PCA\n'), ((2644, 2676), 'os.listdir', 'os.listdir', (["(folder + '/test/pos')"], {}), "(folder + '/test/pos')\n", (2654, 2676), False, 'import os\n'), ((2874, 2906), 'os.listdir', 'os.listdir', (["(folder + '/test/neg')"], {}), "(folder + '/test/neg')\n", (2884, 2906), False, 'import os\n'), ((3104, 3137), 'os.listdir', 'os.listdir', (["(folder + '/train/pos')"], {}), "(folder + '/train/pos')\n", (3114, 3137), False, 'import os\n'), ((3336, 3369), 'os.listdir', 'os.listdir', (["(folder + '/train/neg')"], {}), "(folder + '/train/neg')\n", (3346, 3369), False, 'import os\n'), ((3640, 3665), 'numpy.save', 'np.save', (["(output + '/X')", 'X'], {}), "(output + '/X', X)\n", (3647, 3665), True, 'import numpy as np\n'), ((3670, 3695), 'numpy.save', 'np.save', (["(output + '/Y')", 'Y'], {}), "(output + '/Y', Y)\n", (3677, 3695), True, 'import numpy as np\n'), ((4420, 4473), 'numpy.concatenate', 'np.concatenate', (['[negative_labels, positive_labels]', '(0)'], {}), '([negative_labels, positive_labels], 0)\n', (4434, 4473), True, 'import numpy as np\n'), ((4479, 4504), 'numpy.save', 'np.save', (["(output + '/X')", 'X'], {}), "(output + '/X', X)\n", (4486, 4504), True, 'import numpy as np\n'), ((4509, 4534), 'numpy.save', 'np.save', (["(output + '/Y')", 'Y'], {}), "(output + '/Y', Y)\n", (4516, 4534), True, 'import numpy as np\n'), ((5254, 5307), 'numpy.concatenate', 'np.concatenate', (['[negative_labels, positive_labels]', '(0)'], {}), '([negative_labels, positive_labels], 0)\n', (5268, 5307), True, 'import numpy as np\n'), ((5313, 5338), 'numpy.save', 'np.save', (["(output + '/X')", 'X'], {}), "(output + '/X', X)\n", (5320, 5338), True, 'import numpy as np\n'), ((5343, 5368), 'numpy.save', 'np.save', (["(output + '/Y')", 'Y'], {}), "(output + '/Y', Y)\n", (5350, 5368), True, 'import numpy as np\n'), ((6208, 6233), 'numpy.save', 'np.save', (["(output + '/X')", 'X'], {}), "(output + '/X', X)\n", (6215, 6233), True, 'import numpy as np\n'), ((6238, 6263), 'numpy.save', 'np.save', (["(output + '/Y')", 'Y'], {}), "(output + '/Y', Y)\n", (6245, 6263), True, 'import numpy as np\n'), ((7105, 7130), 'numpy.save', 'np.save', (["(output + '/X')", 'X'], {}), "(output + '/X', X)\n", (7112, 7130), True, 'import numpy as np\n'), ((7135, 7160), 'numpy.save', 'np.save', (["(output + '/Y')", 'Y'], {}), "(output + '/Y', Y)\n", (7142, 7160), True, 'import numpy as np\n'), ((7886, 7939), 'numpy.concatenate', 'np.concatenate', (['[negative_labels, positive_labels]', '(0)'], {}), '([negative_labels, positive_labels], 0)\n', (7900, 7939), True, 'import numpy as np\n'), ((7945, 7970), 'numpy.save', 'np.save', (["(output + '/X')", 'X'], {}), "(output + '/X', X)\n", (7952, 7970), True, 'import numpy as np\n'), ((7975, 8000), 'numpy.save', 'np.save', (["(output + '/Y')", 'Y'], {}), "(output + '/Y', Y)\n", (7982, 8000), True, 'import numpy as np\n'), ((8814, 8839), 'numpy.save', 'np.save', (["(output + '/X')", 'X'], {}), "(output + '/X', X)\n", (8821, 8839), True, 'import numpy as np\n'), ((8844, 8869), 'numpy.save', 'np.save', (["(output + '/Y')", 'Y'], {}), "(output + '/Y', Y)\n", (8851, 8869), True, 'import numpy as np\n'), ((9348, 9414), 'gensim.models.keyedvectors.KeyedVectors.load_word2vec_format', 'word2vec.KeyedVectors.load_word2vec_format', (['file_name'], {'binary': '(True)'}), '(file_name, binary=True)\n', (9390, 9414), True, 'import gensim.models.keyedvectors as word2vec\n'), ((2236, 2268), 'numpy.zeros', 'np.zeros', (['(dimension, dimension)'], {}), '((dimension, dimension))\n', (2244, 2268), True, 'import numpy as np\n'), ((9143, 9182), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (9153, 9182), True, 'import numpy as np\n'), ((9741, 9780), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (9751, 9780), True, 'import numpy as np\n')] |
# coding: utf-8
# In[1]:
from numba import jit
import numpy as np
import pandas as pd
from datetime import datetime as dt
import os
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import lightgbm as lgb
import xgboost as xgb
import time
import datetime
from tqdm import tqdm_notebook as tqdm
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold, KFold, TimeSeriesSplit
from sklearn.metrics import mean_squared_error, roc_auc_score
from sklearn import metrics
from itertools import combinations
import gc
import pickle
import warnings
warnings.filterwarnings("ignore")
import os
from functools import wraps
from timeit import default_timer as timer
import os
print(os.listdir("./"))
dtypes = {
'MachineIdentifier': 'category',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage(deep=True).sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage(deep=True).sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
print('Thanks!!')
return df
def add_count(df, c):
new_col = 'fe_count_'+ c
df[new_col] = df.groupby(c)[c].transform('count')
# In[ ]:
numerics = ['int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64']
numerical_columns = [c for c,v in dtypes.items() if v in numerics]
categorical_columns = [c for c,v in dtypes.items() if v not in numerics]
# In[ ]:
train = pd.read_csv('../input/train.csv', dtype=dtypes)
train_y = train['HasDetections']
test = pd.read_csv('../input/test.csv', dtype=dtypes)
#display section
train['fe_aspect_ratio'] = train['Census_InternalPrimaryDisplayResolutionHorizontal']/ train['Census_InternalPrimaryDisplayResolutionVertical']
test['fe_aspect_ratio'] = test['Census_InternalPrimaryDisplayResolutionHorizontal']/ train['Census_InternalPrimaryDisplayResolutionVertical']
train['Census_InternalPrimaryDisplayResolutionHorizontal'] = train['Census_InternalPrimaryDisplayResolutionHorizontal'].astype(np.float64)
test['Census_InternalPrimaryDisplayResolutionHorizontal'] = test['Census_InternalPrimaryDisplayResolutionHorizontal'].astype(np.float64)
train['Census_InternalPrimaryDisplayResolutionVertical'] = train['Census_InternalPrimaryDisplayResolutionVertical'].astype(np.float64)
test['Census_InternalPrimaryDisplayResolutionVertical'] = test['Census_InternalPrimaryDisplayResolutionVertical'].astype(np.float64)
train['fe_dpi'] = ((train['Census_InternalPrimaryDisplayResolutionHorizontal']**2 + train['Census_InternalPrimaryDisplayResolutionVertical']**2)**.5)/(train['Census_InternalPrimaryDiagonalDisplaySizeInInches'])
test['fe_dpi'] = ((test['Census_InternalPrimaryDisplayResolutionHorizontal']**2 + test['Census_InternalPrimaryDisplayResolutionVertical']**2)**.5)/(test['Census_InternalPrimaryDiagonalDisplaySizeInInches'])
train['fe_MegaPixels'] = (train['Census_InternalPrimaryDisplayResolutionHorizontal'] * train['Census_InternalPrimaryDisplayResolutionVertical'])/1e6
test['fe_MegaPixels'] = (test['Census_InternalPrimaryDisplayResolutionHorizontal'] * test['Census_InternalPrimaryDisplayResolutionVertical'])/1e6
print('Done Display Features\n')
def encode_categorical_columns(x_train, x_test, columns, sort=True):
train_length = x_train.shape[0]
for col in tqdm(columns):
if col == 'MachineIdentifier' or col == 'HasDetections':
continue
combined_data = pd.concat([x_train[col], x_test[col]])
combined_data, _ = pd.factorize(combined_data, sort=sort)
combined_data = pd.Series(combined_data).astype('int32')
x_train[col] = combined_data.iloc[:train_length].values
x_test[col] = combined_data.iloc[train_length:].values
x_train[col] = x_train[col].fillna(0)
x_test[col] = x_test[col].fillna(0)
del combined_data
gc.collect()
return x_train, x_test
# In[ ]:
def fe(df):
print(gc.collect())
print('Cooking Pointless Things....')
df['fe_EngineVersion_2'] = df['EngineVersion'].apply(lambda x: x.split('.')[2]).astype('category')
df['fe_one_less_AVproductInstalled'] = df['AVProductsInstalled'] - 1
df['fe_AvSigVersion_minor'] = df['AvSigVersion'].apply(lambda x: x.split('.')[1]).astype('category')
df['fe_AvSigVersion_build'] = df['AvSigVersion'].apply(lambda x: x.split('.')[2]).astype('category')
df['fe_AvSigVersion_minor_build'] = df['AvSigVersion'].str.replace('1.23.1144.0','1.273.1144.0').apply(lambda x: float((x.split('.')[1]) +'.'+(x.split('.')[2]))).astype('float32')
df['fe_AvSigVersion_sum'] = df['AvSigVersion'].str.replace('1.23.1144.0','1.273.1144.0').apply(lambda x: float(x.split('.')[1]) + float(x.split('.')[2])).astype(int).values
df['AvSigVersion'] = df['AvSigVersion'].astype('category')
top_20 = df['AVProductStatesIdentifier'].value_counts(dropna=False, normalize=True).cumsum().index[:20]
df['fe_magic_4'] = 0
df.loc[df['AVProductStatesIdentifier'].isin(top_20) == True, 'fe_magic_4'] = 1
del top_20
gc.collect()
df['fe_primary_drive_c_ratio'] = df['Census_SystemVolumeTotalCapacity']/ df['Census_PrimaryDiskTotalCapacity']
df['fe_Census_SystemVolumeTotalCapacity_GB'] = df['Census_SystemVolumeTotalCapacity']/1024.
df['fe_non_primary_drive_MB'] = df['Census_PrimaryDiskTotalCapacity'] - df['Census_SystemVolumeTotalCapacity']
df['fe_ram_per_processor'] = df['Census_TotalPhysicalRAM']/ df['Census_ProcessorCoreCount']
df['fe_physical_cores'] = df['Census_ProcessorCoreCount'] / 2
print("Preparing ratios")
#faster thanks to cpmp
nrows = df.shape[0]
df['fe_avsig_gamer_freq'] = df.groupby(['AvSigVersion','Wdft_IsGamer'])['OsBuild'].transform('count') / nrows
df['fe_cpucores_region_freq'] = df.groupby(['Census_ProcessorCoreCount','Wdft_RegionIdentifier'])['OsBuild'].transform('count') / nrows
df['fe_cpucores_oemname_freq'] = df.groupby(['Census_ProcessorCoreCount','Census_OEMNameIdentifier'])['OsBuild'].transform('count') / nrows
df['fe_geoname_oemname_freq'] = df.groupby(['GeoNameIdentifier','Census_OEMNameIdentifier'])['OsBuild'].transform('count') / nrows
df['fe_cntiden_oemname_freq'] = df.groupby(['CountryIdentifier','Census_OEMNameIdentifier'])['OsBuild'].transform('count') / nrows
##### testing feats
df['fe_hghdec_cnt1'] = 0
df.loc[df['CountryIdentifier'].isin([214,89,195,4,141,158,43,201,41,9,29,203,171,60,93,142,66,149,207,97,107,68,5,35,160]) == True, 'fe_hghdec_cnt1'] = 1
df['fe_hghdec_cnt_2'] = 0
df.loc[df['EngineVersion'].isin(['1.1.15100.1', '1.1.15200.1', '1.1.14600.4', '1.1.15000.2', '1.1.14901.4']) == True, 'fe_hghdec_cnt_2'] = 1
df['fe_hghdec_cnt_3'] = 0
df.loc[df['AppVersion'].isin(['4.18.1807.18075', '4.18.1806.18062', '4.12.16299.15', '4.10.209.0', '4.13.17134.1', '4.16.17656.18052']) == True, 'fe_hghdec_cnt_3'] = 1
df['fe_hghdec_cnt_8'] = 0
df.loc[df['Census_ProcessorModelIdentifier'].isin([2696.,1998.,2660.,2372.,1992.,2382.,2640.,2524.,1985.,2096.]) == True, 'fe_hghdec_cnt_8'] = 1
df['fe_hghdec_cnt_9'] = 0
df.loc[df['Census_OSInstallTypeName'].isin(['UUPUgrade','IBSClean', 'Update', 'Upgrade', 'Other','Reset']) == True, 'fe_hghdec_cnt_9'] = 1
df['fe_hghdec_cnt_10'] = 0
df.loc[df['Census_FirmwareManufacturerIdentifier'].isin([142.,628.,554.,355.,556.,500.,93.,807.,513.]) == True, 'fe_hghdec_cnt_10'] = 1
df = reduce_mem_usage(df)
gc.collect()
return df
# In[ ]:
# In[ ]:
train = fe(train)
test = fe(test)
# In[ ]:
numerical_columns = list(train.select_dtypes(include=numerics).columns)
categorical_columns = categorical_columns + ['fe_EngineVersion_2', 'fe_AvSigVersion_minor', 'fe_AvSigVersion_build']
train, test = encode_categorical_columns(train, test, categorical_columns)
print(train.shape, test.shape)
# In[ ]:
train = reduce_mem_usage(train)
test = reduce_mem_usage(test)
# In[ ]:
numerical_columns.remove('HasDetections')
numerical_columns.remove('PuaMode')
numerical_columns.remove('DefaultBrowsersIdentifier')
remove = ['MachineIdentifier','Census_ChassisTypeName','Census_OSEdition','Census_OSArchitecture',
'OsPlatformSubRelease','OsVer', 'Census_DeviceFamily']
for col in remove:categorical_columns.remove(col)
train = train[numerical_columns+categorical_columns]
test = test[numerical_columns+categorical_columns]
# In[ ]:
col_vals_dict = {c: list(train[c].unique()) for c in categorical_columns if c not in ['MachineIdentifier', 'ProductName']}
# In[ ]:
nb_numeric = len(train.columns) - len(col_vals_dict)
nb_categoric = len(col_vals_dict)
print('Number of Numerical features:', nb_numeric)
print('Number of Categorical features:', nb_categoric)
# ### Create the network
# In order to create our embedding model we need to have a look at the spatiality of the cat features. We choose here to use Embedding only on cat features that present more than 2 outcomes otherwise it is count as a numeric value (0 or 1).
# In[ ]:
embed_cols = []
len_embed_cols = []
for c in col_vals_dict:
if len(col_vals_dict[c])>2:
embed_cols.append(c)
len_embed_cols.append((c, len(col_vals_dict[c])))
print(c + ': %d values' % len(col_vals_dict[c])) #look at value counts to know the embedding dimensions
print('\n Number of embed features :', len(embed_cols))
# - We are including 24 features out of 33 categorical features into our Embedding.
# - **embedding size = min(50, number of categories/2)**
# In[ ]:
print(len_embed_cols)
# In[ ]:
def build_embedding_network(len_embed_cols):
model_out = []
model_in = []
for name, dim in len_embed_cols:
input_dim = Input(shape=(1,), dtype='int32')
embed_dim = Embedding(dim, dim//2 + 1, input_length=1, name= name)(input_dim) #1 for unknowns
embed_dim = Dropout(0.2)(embed_dim)
embed_dim = Reshape((dim//2 + 1,))(embed_dim)
model_out.append(embed_dim)
model_in.append(input_dim)
input_num = Input(shape=(len(numerical_columns)+ len(categorical_columns) - len(len_embed_cols),) , dtype='float32')
outputs = Concatenate(axis=1)([*model_out, input_num])
outputs = (Dense(128))(outputs)
outputs = (Activation('relu'))(outputs)
outputs = (Dropout(.2))(outputs)
outputs = (Dense(32))(outputs)
outputs = (Activation('relu'))(outputs)
outputs = (Dropout(.15))(outputs)
outputs = (Dense(1))(outputs)
outputs = (Activation('sigmoid'))(outputs)
model = Model([*model_in, input_num], outputs)
model.compile(loss='binary_crossentropy', optimizer='adam')
return model
# In[ ]:
def preproc(X_train, X_val, X_test):
input_list_train = []
input_list_val = []
input_list_test = []
#the cols to be embedded: rescaling to range [0, # values)
for c in embed_cols:
raw_vals = np.unique(X_train[c])
val_map = {}
for i in range(len(raw_vals)):
val_map[raw_vals[i]] = i
input_list_train.append(X_train[c].map(val_map).values)
input_list_val.append(X_val[c].map(val_map).fillna(0).values)
input_list_test.append(X_test[c].map(val_map).fillna(0).values)
#the rest of the columns
other_cols = [c for c in X_train.columns if (not c in embed_cols+['HasDetections', 'MachineIdentifier'])]
input_list_train.append(X_train[other_cols].values)
input_list_val.append(X_val[other_cols].values)
input_list_test.append(X_test[other_cols].values)
return input_list_train, input_list_val, input_list_test
# In[ ]:
# Impute missing values in order to scale
from sklearn.preprocessing import MinMaxScaler
train[numerical_columns] = train[numerical_columns].fillna(value = 0)
test[numerical_columns] = test[numerical_columns].fillna(value = 0)
# Fit the scaler only on train data
scaler = MinMaxScaler().fit(train[numerical_columns])
train.loc[:,numerical_columns] = scaler.transform(train[numerical_columns])
test.loc[:,numerical_columns] = scaler.transform(test[numerical_columns])
# In[ ]:
print ('neural network....')
from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, Reshape
from keras.layers import BatchNormalization, SpatialDropout1D, Concatenate, Activation
from keras.callbacks import Callback, EarlyStopping
from keras.models import Model
from keras.optimizers import Adam
# In[ ]:
K = 5
runs_per_fold = 1
n_epochs = 10
patience = 5
models = []
cv_aucs = []
full_val_preds = np.zeros(np.shape(train)[0])
y_preds = np.zeros((np.shape(test)[0], K))
kfold = StratifiedKFold(n_splits = K, shuffle = True, random_state=2**10)
for i, (f_ind, outf_ind) in enumerate(kfold.split(train, train_y)):
train_f, X_val_f = train.loc[f_ind].copy(), train.loc[outf_ind].copy()
train_y_f, y_val_f = train_y[f_ind], train_y[outf_ind]
print('Shapes Are', train_f.shape, X_val_f.shape)
test_f = test.copy()
# Shuffle data
idx = np.arange(len(train_f))
np.random.shuffle(idx)
train_f = train_f.iloc[idx]
train_y_f = train_y_f.iloc[idx]
#preprocessing
print('Preprocessing........!!!')
proc_train_f, proc_X_val_f, proc_test_f = preproc(train_f, X_val_f, test_f)
#track oof prediction for cv scores
val_preds = 0
for j in range(runs_per_fold):
print('Build_embedding_network........!!!')
NN = build_embedding_network(len_embed_cols)
# Set callback functions to early stop training and save the best model so far
callbacks = [EarlyStopping(monitor='val_loss', patience=patience)]
print(len(proc_train_f))
NN.fit(proc_train_f, train_y_f.values,
epochs=n_epochs, batch_size= 2**15,
verbose=1,callbacks=callbacks,
validation_data=(proc_X_val_f, y_val_f))
del proc_train_f, train_y_f, callbacks
gc.collect()
print('OOF Val Predictions........!!!')
val_preds += NN.predict(proc_X_val_f)[:,0] / runs_per_fold
del proc_X_val_f
gc.collect()
print('Test Predictions........!!!')
#need to fix the y_preds over batches...
y_preds[:,i] += NN.predict(proc_test_f)[:,0] / runs_per_fold
models.append(NN)
full_val_preds[outf_ind] += val_preds
cv_auc = roc_auc_score(y_val_f.values, val_preds)
cv_aucs.append(cv_auc)
print ('\nFold %i prediction cv AUC: %.5f\n' %(i,cv_auc))
print('Mean out of fold AUC: %.5f' % np.mean(cv_auc))
print('Full validation AUC: %.5f' % roc_auc_score(train_y.values, full_val_preds))
print('Saving OOF Train.......')
np.save('train_oof_nn_with_embedddings.npy', full_val_preds)
print('Saved\n')
print('Saving Predictions.......')
np.save('test_nn_with_embedddings.npy', y_preds)
print('Saved\n')
# In[ ]:
print(models[0].summary())
# In[ ]:
print('Saving Embeddings....')
save_embeddings = True
saved_embeddings_fname = "embeddings.pickle"
all_emb = []
if save_embeddings:
model_ = models[0]
for cols in embed_cols:
emb = model_.get_layer(cols).get_weights()[0]
all_emb.append(emb)
with open(saved_embeddings_fname , 'wb') as f:
pickle.dump(all_emb, f, -1)
# In[ ]:
from keras.models import load_model
print('Saving Models...')
models[0].save('my_model_0.h5')
models[1].save('my_model_1.h5')
models[3].save('my_model_3.h5')
del models # deletes the existing models
# In[ ]:
print(embed_cols)
# In[ ]:
for idx, col in enumerate(train.columns):
if col in embed_cols:
print(idx, col)
##mapping is left for the User to Give it a Go On Purpose:) | [
"pickle.dump",
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"numpy.iinfo",
"keras.models.Model",
"numpy.shape",
"gc.collect",
"matplotlib.pyplot.style.use",
"numpy.mean",
"keras.layers.Input",
"keras.layers.Reshape",
"numpy.unique",
"tqdm.tqdm_notebook",
"numpy.finfo",
"panda... | [((193, 216), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (206, 216), True, 'import matplotlib.pyplot as plt\n'), ((617, 650), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (640, 650), False, 'import warnings\n'), ((8937, 8984), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train.csv"""'], {'dtype': 'dtypes'}), "('../input/train.csv', dtype=dtypes)\n", (8948, 8984), True, 'import pandas as pd\n'), ((9026, 9072), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test.csv"""'], {'dtype': 'dtypes'}), "('../input/test.csv', dtype=dtypes)\n", (9037, 9072), True, 'import pandas as pd\n'), ((20160, 20223), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'K', 'shuffle': '(True)', 'random_state': '(2 ** 10)'}), '(n_splits=K, shuffle=True, random_state=2 ** 10)\n', (20175, 20223), False, 'from sklearn.model_selection import StratifiedKFold, KFold, TimeSeriesSplit\n'), ((22256, 22316), 'numpy.save', 'np.save', (['"""train_oof_nn_with_embedddings.npy"""', 'full_val_preds'], {}), "('train_oof_nn_with_embedddings.npy', full_val_preds)\n", (22263, 22316), True, 'import numpy as np\n'), ((22370, 22418), 'numpy.save', 'np.save', (['"""test_nn_with_embedddings.npy"""', 'y_preds'], {}), "('test_nn_with_embedddings.npy', y_preds)\n", (22377, 22418), True, 'import numpy as np\n'), ((748, 764), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (758, 764), False, 'import os\n'), ((10798, 10811), 'tqdm.tqdm_notebook', 'tqdm', (['columns'], {}), '(columns)\n', (10802, 10811), True, 'from tqdm import tqdm_notebook as tqdm\n'), ((12567, 12579), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12577, 12579), False, 'import gc\n'), ((14996, 15008), 'gc.collect', 'gc.collect', ([], {}), '()\n', (15006, 15008), False, 'import gc\n'), ((18082, 18120), 'keras.models.Model', 'Model', (['[*model_in, input_num]', 'outputs'], {}), '([*model_in, input_num], outputs)\n', (18087, 18120), False, 'from keras.models import Model\n'), ((20576, 20598), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (20593, 20598), True, 'import numpy as np\n'), ((21950, 21990), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_val_f.values', 'val_preds'], {}), '(y_val_f.values, val_preds)\n', (21963, 21990), False, 'from sklearn.metrics import mean_squared_error, roc_auc_score\n'), ((10936, 10974), 'pandas.concat', 'pd.concat', (['[x_train[col], x_test[col]]'], {}), '([x_train[col], x_test[col]])\n', (10945, 10974), True, 'import pandas as pd\n'), ((11002, 11040), 'pandas.factorize', 'pd.factorize', (['combined_data'], {'sort': 'sort'}), '(combined_data, sort=sort)\n', (11014, 11040), True, 'import pandas as pd\n'), ((11357, 11369), 'gc.collect', 'gc.collect', ([], {}), '()\n', (11367, 11369), False, 'import gc\n'), ((11442, 11454), 'gc.collect', 'gc.collect', ([], {}), '()\n', (11452, 11454), False, 'import gc\n'), ((17254, 17286), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""int32"""'}), "(shape=(1,), dtype='int32')\n", (17259, 17286), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, Reshape\n'), ((17698, 17717), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (17709, 17717), False, 'from keras.layers import BatchNormalization, SpatialDropout1D, Concatenate, Activation\n'), ((17763, 17773), 'keras.layers.Dense', 'Dense', (['(128)'], {}), '(128)\n', (17768, 17773), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, Reshape\n'), ((17800, 17818), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17810, 17818), False, 'from keras.layers import BatchNormalization, SpatialDropout1D, Concatenate, Activation\n'), ((17844, 17856), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (17851, 17856), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, Reshape\n'), ((17881, 17890), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (17886, 17890), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, Reshape\n'), ((17917, 17935), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17927, 17935), False, 'from keras.layers import BatchNormalization, SpatialDropout1D, Concatenate, Activation\n'), ((17961, 17974), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (17968, 17974), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, Reshape\n'), ((17999, 18007), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (18004, 18007), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, Reshape\n'), ((18033, 18054), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (18043, 18054), False, 'from keras.layers import BatchNormalization, SpatialDropout1D, Concatenate, Activation\n'), ((18446, 18467), 'numpy.unique', 'np.unique', (['X_train[c]'], {}), '(X_train[c])\n', (18455, 18467), True, 'import numpy as np\n'), ((19441, 19455), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (19453, 19455), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((20088, 20103), 'numpy.shape', 'np.shape', (['train'], {}), '(train)\n', (20096, 20103), True, 'import numpy as np\n'), ((21503, 21515), 'gc.collect', 'gc.collect', ([], {}), '()\n', (21513, 21515), False, 'import gc\n'), ((21664, 21676), 'gc.collect', 'gc.collect', ([], {}), '()\n', (21674, 21676), False, 'import gc\n'), ((22122, 22137), 'numpy.mean', 'np.mean', (['cv_auc'], {}), '(cv_auc)\n', (22129, 22137), True, 'import numpy as np\n'), ((22175, 22220), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['train_y.values', 'full_val_preds'], {}), '(train_y.values, full_val_preds)\n', (22188, 22220), False, 'from sklearn.metrics import mean_squared_error, roc_auc_score\n'), ((22820, 22847), 'pickle.dump', 'pickle.dump', (['all_emb', 'f', '(-1)'], {}), '(all_emb, f, -1)\n', (22831, 22847), False, 'import pickle\n'), ((17307, 17362), 'keras.layers.Embedding', 'Embedding', (['dim', '(dim // 2 + 1)'], {'input_length': '(1)', 'name': 'name'}), '(dim, dim // 2 + 1, input_length=1, name=name)\n', (17316, 17362), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, Reshape\n'), ((17409, 17421), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (17416, 17421), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, Reshape\n'), ((17453, 17477), 'keras.layers.Reshape', 'Reshape', (['(dim // 2 + 1,)'], {}), '((dim // 2 + 1,))\n', (17460, 17477), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate, Reshape\n'), ((20128, 20142), 'numpy.shape', 'np.shape', (['test'], {}), '(test)\n', (20136, 20142), True, 'import numpy as np\n'), ((21140, 21192), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': 'patience'}), "(monitor='val_loss', patience=patience)\n", (21153, 21192), False, 'from keras.callbacks import Callback, EarlyStopping\n'), ((11065, 11089), 'pandas.Series', 'pd.Series', (['combined_data'], {}), '(combined_data)\n', (11074, 11089), True, 'import pandas as pd\n'), ((7409, 7426), 'numpy.iinfo', 'np.iinfo', (['np.int8'], {}), '(np.int8)\n', (7417, 7426), True, 'import numpy as np\n'), ((7443, 7460), 'numpy.iinfo', 'np.iinfo', (['np.int8'], {}), '(np.int8)\n', (7451, 7460), True, 'import numpy as np\n'), ((7996, 8016), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (8004, 8016), True, 'import numpy as np\n'), ((8033, 8053), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (8041, 8053), True, 'import numpy as np\n'), ((7549, 7567), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (7557, 7567), True, 'import numpy as np\n'), ((7584, 7602), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (7592, 7602), True, 'import numpy as np\n'), ((8145, 8165), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (8153, 8165), True, 'import numpy as np\n'), ((8182, 8202), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (8190, 8202), True, 'import numpy as np\n'), ((7692, 7710), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (7700, 7710), True, 'import numpy as np\n'), ((7727, 7745), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (7735, 7745), True, 'import numpy as np\n'), ((7835, 7853), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (7843, 7853), True, 'import numpy as np\n'), ((7870, 7888), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (7878, 7888), True, 'import numpy as np\n')] |
# *******************************************************************************
# Copyright (C) 2021 INAF
#
# This software is distributed under the terms of the BSD-3-Clause license
#
# Authors:
# <NAME> <<EMAIL>>
# *******************************************************************************
import os
import sys
import argparse
import numpy as np
from time import time
from shutil import copy
from astropy.io import fits
from multiprocessing import Pool
from os.path import isdir, join, isfile
from RTAscience.cfg.Config import Config
from RTAscience.lib.RTAManageXml import ManageXml
from RTAscience.lib.RTACtoolsSimulation import RTACtoolsSimulation, make_obslist
from RTAscience.lib.RTACtoolsAnalysis import RTACtoolsAnalysis
from RTAscience.lib.RTAUtils import get_alert_pointing_gw, get_mergermap, get_pointing, str2bool
def main(args):
cfg = Config(args.cfgfile)
# GRB ---!
if cfg.get('runid') == 'all':
runids = [f.replace('.fits', '') for f in os.listdir(cfg.get('catalog')) if isfile(join(cfg.get('catalog'), f))]
elif type(cfg.get('runid')) == str:
runids = [cfg.get('runid')]
else:
runids = cfg.get('runid')
runids = sorted(runids)
# general ---!
trials = cfg.get('trials')
tmax = cfg.get('tobs')-cfg.get('onset')+cfg.get('delay')
# paths ---!
datapath = cfg.get('data')
if not isdir(datapath): # main data folder
raise ValueError('Please specify a valid path')
if not isdir(join(datapath, 'obs')): # obs parent folder
os.mkdir(join(datapath, 'obs'))
# background model ---!
bkg_model = cfg.get('bkg')
# ------------------------------------------------------- loop runid --- !!!
for runid in runids:
print(f"{'-'*50} #\nProcessing runid: {runid}")
# grb path ---!
grbpath = join(datapath, 'obs', runid) # folder that will host the phlist
if not isdir(grbpath):
os.mkdir(grbpath)
modelpath = join(datapath, f'extracted_data/{runid}') # bin model folder
if not isdir(modelpath):
raise ValueError(f'Folder {runid} not found in {modelpath}')
tcsv = join(datapath, f'extracted_data/{runid}/time_slices.csv') # times table
if not isfile(tcsv):
raise ValueError(f'Data from {runid} have not been correctly extracted.')
mergerpath = os.path.expandvars(cfg.get('merger'))
mergermap = get_mergermap(runid, mergerpath)
if mergermap == None:
print(f'Skip runid {runid}. ')
continue
# get alert pointing
if type(cfg.get('offset')) == str and cfg.get('offset').lower() == 'gw':
pointing = get_alert_pointing_gw(mergermap)
else:
pointing = list(get_pointing(f"{os.path.expandvars(cfg.get('catalog'))}/{runid}.fits"))
if pointing[1] < 0:
pointing[0] += 0.0
pointing[1] += -cfg.get('offset')
else:
pointing[0] += 0.0
pointing[1] += cfg.get('offset')
# Dumping the Conf object to txt file
dumpedConfig = os.path.join(grbpath, "config.yaml")
if not os.path.isfile(dumpedConfig):
copy(args.cfgfile, str(dumpedConfig))
# ---------------------------------------------------- loop trials ---!!!
if args.mp_enabled:
with Pool(args.mp_threads) as p:
times = p.map(simulateTrial, [ (i, cfg, pointing, tmax, datapath, runid, tcsv, grbpath, bkg_model) for i in range(trials)])
else:
for i in range(trials):
times = simulateTrial((i, cfg, pointing, tmax, datapath, runid, tcsv, grbpath, bkg_model))
# time ---!
if args.print:
if len(times) > 1:
print(f"Trial elapsed time (mean): {np.array(times).mean()}")
else:
print(f"Trial elapsed time: {times[0]}")
print('\n... done.\n')
def simulateTrial(trial_args):
start_t = time()
i=trial_args[0]
cfg=trial_args[1]
pointing=trial_args[2]
tmax=trial_args[3]
datapath=trial_args[4]
runid=trial_args[5]
tcsv=trial_args[6]
grbpath=trial_args[7]
bkg_model=trial_args[8]
# initialise ---!
count = cfg.get('start_count') + i + 1
name = f'ebl{count:06d}'
# setup ---!
sim = RTACtoolsSimulation()
if type(cfg.get('caldb')) == list:
sim.caldb = cfg.get('caldb')[0]
else:
sim.caldb = cfg.get('caldb')
if type(cfg.get('irf')) == list:
sim.irf = cfg.get('irf')[0]
else:
sim.irf = cfg.get('irf')
sim.fov = cfg.get('roi')
sim.e = [cfg.get('emin'), cfg.get('emax')]
sim.seed = count
sim.set_ebl = cfg.get('set_ebl')
sim.pointing = pointing
if args.print:
print(f'Pointing = {sim.pointing} s')
sim.tmax = tmax
# get time grid ---!
sim.template = join(os.path.expandvars(cfg.get('catalog')).replace(cfg.get('data'), datapath), f'{runid}.fits')
event_bins = []
sim.table = tcsv
tgrid, tbin_start, tbin_stop = sim.getTimeSlices(GTI=(cfg.get('delay'), tmax), return_bins=True)
# -------------------------------------------------------- simulate ---!!!
print(f'Simulate template seed={sim.seed}')
for j in range(tbin_stop-tbin_start-1):
sim.t = [tgrid[j]+cfg.get('onset'), tgrid[j + 1]+cfg.get('onset')]
if args.print:
print(f'GTI (bin) = {sim.t} s')
sim.model = join(datapath, f'extracted_data/{runid}/{runid}_tbin{tbin_start+j:02d}.xml')
event = join(grbpath, f'{name}_tbin{tbin_start+j:02d}.fits')
event_bins.append(event)
sim.output = event
sim.run_simulation()
# -------------------------------------------- shift time --- !!!
if cfg.get('onset') != 0:
if cfg.get('delay') != 0:
raise ValueError('Bad configuration. Either "onset" or "delay" must be equal to 0.')
# ------------------------------------ add background --- !!!
print('Simulate bkg to append before the burst')
bkg = os.path.join(grbpath, f'bkg{count:06d}.fits')
event_bins.insert(0, bkg)
sim.t = [0, cfg.get('onset')]
if args.print:
print(f"GTI (bkg) = {sim.t} s")
sim.model = bkg_model
sim.output = bkg
sim.run_simulation()
# ---------------------------------------- gather bins ---!!!
if args.merge:
print('Merge in photon-list')
phlist = join(grbpath, f'{name}.fits')
sim.input = event_bins
sim.output = phlist
sim.appendEventsSinglePhList(GTI=[cfg.get('delay'), cfg.get('delay')+cfg.get('tobs')])
if args.print:
h = fits.open(phlist)
print('Check GTI and EVENTS time range:')
print('************')
print(h[2].data)
print(h[1].data.field('TIME').min(), h[1].data.field('TIME').max())
print('************')
h.close()
else:
# observation list ---!
obslist = join(grbpath, f'{name}.xml')
if os.path.isfile(obslist):
os.remove(obslist)
make_obslist(obslist=obslist, items=event_bins, names=name)
sim.input = phlist
sim.sortObsEvents()
del sim
# selections ---!
"""for texp in cfg.get('exposure'):
selphlist = phlist.replace(f'{name}', f'texp{texp}s_{name}')
grb = RTACtoolsAnalysis()
grb.caldb = cfg.get('caldb')
grb.irf = cfg.get('irf')
grb.roi = cfg.get('roi')
grb.e = [cfg.get('emin'), cfg.get('emax')]
grb.t = [cfg.get('delay'), cfg.get('delay')+texp]
if args.print:
print(f"Selection t = {grb.t} s")
grb.input = phlist
grb.output = selphlist
if args.merge:
grb.run_selection()
else:
prefix = join(grbpath, f'texp{texp}s_')
grb.run_selection(prefix=prefix) """
# remove files ---!
if args.remove and args.merge:
# remove bins ---!
os.system('rm ' + join(grbpath, f'{name}*tbin*'))
if cfg.get('onset') != 0:
# remove bkg ---!
os.system('rm ' + join(grbpath, f'{name.replace("ebl", "bkg")}*'))
# time ---!
elapsed_t = time()-start_t
if args.print:
print(f"Trial {count} took {elapsed_t} seconds")
return (count, elapsed_t)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='ADD SCRIPT DESCRIPTION HERE')
parser.add_argument('-f', '--cfgfile', type=str, required=True, help="Path to the yaml configuration file")
parser.add_argument('--merge', type=str2bool, default=True, help='Merge in single phlist (true) or use observation library (false)')
parser.add_argument('--remove', type=str2bool, default=True, help='Keep only outputs')
parser.add_argument('--print', type=str2bool, default=False, help='Print out results')
parser.add_argument('-mp', '--mp-enabled', type=str2bool, default=False, help='To parallelize trials loop')
parser.add_argument('-mpt', '--mp-threads', type=int, default=4, help='The size of the threads pool')
args = parser.parse_args()
if args.remove and not args.merge:
raise ValueError('Keyword "remove" cannot be True if keyword "merge" is False.')
main(args)
| [
"os.mkdir",
"os.remove",
"RTAscience.lib.RTACtoolsSimulation.RTACtoolsSimulation",
"argparse.ArgumentParser",
"os.path.isdir",
"RTAscience.lib.RTAUtils.get_mergermap",
"RTAscience.cfg.Config.Config",
"time.time",
"os.path.isfile",
"numpy.array",
"astropy.io.fits.open",
"multiprocessing.Pool",
... | [((863, 883), 'RTAscience.cfg.Config.Config', 'Config', (['args.cfgfile'], {}), '(args.cfgfile)\n', (869, 883), False, 'from RTAscience.cfg.Config import Config\n'), ((4038, 4044), 'time.time', 'time', ([], {}), '()\n', (4042, 4044), False, 'from time import time\n'), ((4386, 4407), 'RTAscience.lib.RTACtoolsSimulation.RTACtoolsSimulation', 'RTACtoolsSimulation', ([], {}), '()\n', (4405, 4407), False, 'from RTAscience.lib.RTACtoolsSimulation import RTACtoolsSimulation, make_obslist\n'), ((8471, 8537), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ADD SCRIPT DESCRIPTION HERE"""'}), "(description='ADD SCRIPT DESCRIPTION HERE')\n", (8494, 8537), False, 'import argparse\n'), ((1374, 1389), 'os.path.isdir', 'isdir', (['datapath'], {}), '(datapath)\n', (1379, 1389), False, 'from os.path import isdir, join, isfile\n'), ((1833, 1861), 'os.path.join', 'join', (['datapath', '"""obs"""', 'runid'], {}), "(datapath, 'obs', runid)\n", (1837, 1861), False, 'from os.path import isdir, join, isfile\n'), ((1980, 2021), 'os.path.join', 'join', (['datapath', 'f"""extracted_data/{runid}"""'], {}), "(datapath, f'extracted_data/{runid}')\n", (1984, 2021), False, 'from os.path import isdir, join, isfile\n'), ((2163, 2220), 'os.path.join', 'join', (['datapath', 'f"""extracted_data/{runid}/time_slices.csv"""'], {}), "(datapath, f'extracted_data/{runid}/time_slices.csv')\n", (2167, 2220), False, 'from os.path import isdir, join, isfile\n'), ((2431, 2463), 'RTAscience.lib.RTAUtils.get_mergermap', 'get_mergermap', (['runid', 'mergerpath'], {}), '(runid, mergerpath)\n', (2444, 2463), False, 'from RTAscience.lib.RTAUtils import get_alert_pointing_gw, get_mergermap, get_pointing, str2bool\n'), ((3128, 3164), 'os.path.join', 'os.path.join', (['grbpath', '"""config.yaml"""'], {}), "(grbpath, 'config.yaml')\n", (3140, 3164), False, 'import os\n'), ((5516, 5594), 'os.path.join', 'join', (['datapath', 'f"""extracted_data/{runid}/{runid}_tbin{tbin_start + j:02d}.xml"""'], {}), "(datapath, f'extracted_data/{runid}/{runid}_tbin{tbin_start + j:02d}.xml')\n", (5520, 5594), False, 'from os.path import isdir, join, isfile\n'), ((5609, 5663), 'os.path.join', 'join', (['grbpath', 'f"""{name}_tbin{tbin_start + j:02d}.fits"""'], {}), "(grbpath, f'{name}_tbin{tbin_start + j:02d}.fits')\n", (5613, 5663), False, 'from os.path import isdir, join, isfile\n'), ((6123, 6168), 'os.path.join', 'os.path.join', (['grbpath', 'f"""bkg{count:06d}.fits"""'], {}), "(grbpath, f'bkg{count:06d}.fits')\n", (6135, 6168), False, 'import os\n'), ((6533, 6562), 'os.path.join', 'join', (['grbpath', 'f"""{name}.fits"""'], {}), "(grbpath, f'{name}.fits')\n", (6537, 6562), False, 'from os.path import isdir, join, isfile\n'), ((7087, 7115), 'os.path.join', 'join', (['grbpath', 'f"""{name}.xml"""'], {}), "(grbpath, f'{name}.xml')\n", (7091, 7115), False, 'from os.path import isdir, join, isfile\n'), ((7127, 7150), 'os.path.isfile', 'os.path.isfile', (['obslist'], {}), '(obslist)\n', (7141, 7150), False, 'import os\n'), ((7191, 7250), 'RTAscience.lib.RTACtoolsSimulation.make_obslist', 'make_obslist', ([], {'obslist': 'obslist', 'items': 'event_bins', 'names': 'name'}), '(obslist=obslist, items=event_bins, names=name)\n', (7203, 7250), False, 'from RTAscience.lib.RTACtoolsSimulation import RTACtoolsSimulation, make_obslist\n'), ((8310, 8316), 'time.time', 'time', ([], {}), '()\n', (8314, 8316), False, 'from time import time\n'), ((1484, 1505), 'os.path.join', 'join', (['datapath', '"""obs"""'], {}), "(datapath, 'obs')\n", (1488, 1505), False, 'from os.path import isdir, join, isfile\n'), ((1546, 1567), 'os.path.join', 'join', (['datapath', '"""obs"""'], {}), "(datapath, 'obs')\n", (1550, 1567), False, 'from os.path import isdir, join, isfile\n'), ((1914, 1928), 'os.path.isdir', 'isdir', (['grbpath'], {}), '(grbpath)\n', (1919, 1928), False, 'from os.path import isdir, join, isfile\n'), ((1942, 1959), 'os.mkdir', 'os.mkdir', (['grbpath'], {}), '(grbpath)\n', (1950, 1959), False, 'import os\n'), ((2057, 2073), 'os.path.isdir', 'isdir', (['modelpath'], {}), '(modelpath)\n', (2062, 2073), False, 'from os.path import isdir, join, isfile\n'), ((2252, 2264), 'os.path.isfile', 'isfile', (['tcsv'], {}), '(tcsv)\n', (2258, 2264), False, 'from os.path import isdir, join, isfile\n'), ((2692, 2724), 'RTAscience.lib.RTAUtils.get_alert_pointing_gw', 'get_alert_pointing_gw', (['mergermap'], {}), '(mergermap)\n', (2713, 2724), False, 'from RTAscience.lib.RTAUtils import get_alert_pointing_gw, get_mergermap, get_pointing, str2bool\n'), ((3180, 3208), 'os.path.isfile', 'os.path.isfile', (['dumpedConfig'], {}), '(dumpedConfig)\n', (3194, 3208), False, 'import os\n'), ((6756, 6773), 'astropy.io.fits.open', 'fits.open', (['phlist'], {}), '(phlist)\n', (6765, 6773), False, 'from astropy.io import fits\n'), ((7164, 7182), 'os.remove', 'os.remove', (['obslist'], {}), '(obslist)\n', (7173, 7182), False, 'import os\n'), ((3404, 3425), 'multiprocessing.Pool', 'Pool', (['args.mp_threads'], {}), '(args.mp_threads)\n', (3408, 3425), False, 'from multiprocessing import Pool\n'), ((8099, 8129), 'os.path.join', 'join', (['grbpath', 'f"""{name}*tbin*"""'], {}), "(grbpath, f'{name}*tbin*')\n", (8103, 8129), False, 'from os.path import isdir, join, isfile\n'), ((3855, 3870), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (3863, 3870), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import gym
from gym import spaces
class GridworldCoexistenceGym(gym.Env):
def __init__(self, headless=True, gridworld_size=11, max_steps=20000, kill_reward=0, step_reward=1, window_size=5):
self.action_space = spaces.Discrete(4)
self.observation_space = spaces.Box(low=-10000000, high=100000000, dtype=np.float, shape=(window_size, window_size, 2))
self.headless = headless
self.gridworld_size = gridworld_size
self.window_size = window_size
self.max_steps = max_steps
self.kill_reward = kill_reward
self.step_reward = step_reward
self.reset()
def reset(self):
self.agent_position = [int(self.gridworld_size/2), int(self.gridworld_size/2)]
self.enemy_positions = [[0,0]]
self.coin_positions = []
self.coins_collected = 0
self.enemy_coins_collected = 0
self.steps = 0
self.agent_can_start = np.random.choice([0, 1])
self.get_observation()
return self.observation
def get_observation(self):
self.observation = np.zeros((self.gridworld_size, self.gridworld_size))
for coin in self.coin_positions:
self.observation[coin[0], coin[1]] = -1
self.observation[max(0, min(self.agent_position[0], self.gridworld_size-1)), max(0, min(self.agent_position[1], self.gridworld_size-1))] = 5*(self.coins_collected + 1)
window = [range(self.agent_position[0] - int(self.window_size / 2),
self.agent_position[0] + int(self.window_size / 2) + 1),
range(self.agent_position[1] - int(self.window_size / 2),
self.agent_position[1] + int(self.window_size / 2) + 1)]
agent_observation = self.observation.take(window[0], axis=0, mode='wrap')
agent_observation = agent_observation.take(window[1], axis=1, mode='wrap')
closest_enemy = None
closest_distance = np.inf
for pos in self.enemy_positions:
self.observation[max(0, min(pos[0], self.gridworld_size-1)), max(0, min(pos[1], self.gridworld_size-1))] = 5*(self.enemy_coins_collected + 1)
distance = np.linalg.norm(np.array(pos)-np.array(self.agent_position))
if distance < closest_distance:
closest_distance = distance
closest_enemy = pos
if closest_enemy:
self.observation[max(0, min(closest_enemy[0], self.gridworld_size - 1)), max(0, min(closest_enemy[1], self.gridworld_size - 1))] = \
5 * (self.enemy_coins_collected + 1)
window = [range(self.agent_position[0] - int(self.window_size / 2),
self.agent_position[0] + int(self.window_size / 2) + 1),
range(self.agent_position[1] - int(self.window_size / 2),
self.agent_position[1] + int(self.window_size / 2) + 1)]
agent_observation = self.observation.take(window[0], axis=0, mode='wrap')
agent_observation = agent_observation.take(window[1], axis=1, mode='wrap')
enemy_window = [range(closest_enemy[0] - int(self.window_size/2), closest_enemy[0] + int(self.window_size/2) + 1), range(closest_enemy[1] - int(self.window_size/2), closest_enemy[1] + int(self.window_size/2) + 1)]
enemy_observation = self.observation.take(enemy_window[0], axis=0, mode='wrap')
enemy_observation = enemy_observation.take(enemy_window[1], axis=1, mode='wrap')
else:
enemy_observation = np.zeros((self.window_size, self.window_size))
self.observation = np.dstack([agent_observation, enemy_observation])
return self.observation
def plot_env(self):
self.observation = self.get_observation()
env = np.zeros((self.gridworld_size, self.gridworld_size))
env[max(0, min(self.agent_position[0], self.gridworld_size-1)), max(0, min(self.agent_position[1], self.gridworld_size-1))] = 1
for pos in self.enemy_positions:
env[max(0, min(pos[0], self.gridworld_size-1)), max(0, min(pos[1], self.gridworld_size-1))] = -1
if not self.headless:
plt.matshow(env, 1, cmap='gray')
plt.draw()
plt.pause(0.01)
def step(self, action_num):
#increase counter
self.steps += 1
#move agent
if action_num == 0:
self.agent_position = [(self.agent_position[0] + 1) % self.gridworld_size, self.agent_position[1]]
elif action_num == 1:
self.agent_position = [(self.agent_position[0] - 1) % self.gridworld_size, self.agent_position[1]]
elif action_num == 2:
self.agent_position = [self.agent_position[0], (self.agent_position[1] + 1) % self.gridworld_size]
elif action_num == 3:
self.agent_position = [self.agent_position[0], (self.agent_position[1] - 1) % self.gridworld_size]
#move enemies
for i, pos in enumerate(self.enemy_positions):
if self.steps % 7 == 0:
action = np.random.choice(range(5))
else:
action = np.random.choice(range(5))
if action == 0:
pos = [(pos[0] + 1) % self.gridworld_size, pos[1]]
elif action == 1:
pos = [(pos[0] - 1) % self.gridworld_size, pos[1]]
elif action == 2:
pos = [pos[0], (pos[1] + 1) % self.gridworld_size]
elif action == 3:
pos = [pos[0], (pos[1] - 1) % self.gridworld_size]
self.enemy_positions[i] = pos
# update deads
dead = (self.agent_position in [[x[0] + 1, x[1]] for x in self.enemy_positions]) or (
self.agent_position in [[x[0], x[1] + 1] for x in self.enemy_positions]) or (
self.agent_position in [[x[0], x[1]] for x in self.enemy_positions] and action_num not in [0,2])
if dead:
num_enemies_killed = 0
else:
num_enemies_killed = self.enemies_to_be_killed()
if not self.headless:
self.plot_env()
reward = self.step_reward + self.kill_reward*num_enemies_killed
info = {'got_killed': dead, 'num_killed': num_enemies_killed, 'coins_collected': 0, 'enemy_coins_collected': 0, 'enemy_reward': 0, 'total_reward': 1, 'total_enemy_reward': 1}
self.observation = self.get_observation()
if num_enemies_killed > 0:
self.observation[:, :, 1] = np.zeros((self.window_size,self.window_size))
if dead:
self.observation[:, :, 0] = np.zeros((self.window_size, self.window_size))
self.kill_enemies()
restart = dead or self.steps > self.max_steps
if restart:
self.reset()
return self.observation, reward, restart, info
def kill_enemies(self):
enemies_to_be_killed = []
for enemy_pos in self.enemy_positions:
if enemy_pos == [self.agent_position[0] +1, self.agent_position[1]] or enemy_pos == [self.agent_position[0], self.agent_position[1] + 1]:
enemies_to_be_killed.append(enemy_pos)
self.enemy_positions = [x for x in self.enemy_positions if x not in enemies_to_be_killed]
return len(enemies_to_be_killed)
def enemies_to_be_killed(self):
enemies_to_be_killed = []
for enemy_pos in self.enemy_positions:
if enemy_pos == [self.agent_position[0] +1, self.agent_position[1]] or enemy_pos == [self.agent_position[0], self.agent_position[1] + 1]:
enemies_to_be_killed.append(enemy_pos)
return len(enemies_to_be_killed)
def render(self, mode='human', close=False):
pass
if __name__ == "__main__":
env = GridworldCoexistenceGym(headless=False, gridworld_size=7, window_size=5)
while True:
action = np.random.choice(range(5))
env.step(action)
| [
"numpy.dstack",
"numpy.zeros",
"gym.spaces.Discrete",
"matplotlib.pyplot.matshow",
"matplotlib.pyplot.draw",
"numpy.array",
"gym.spaces.Box",
"numpy.random.choice",
"matplotlib.pyplot.pause"
] | [((276, 294), 'gym.spaces.Discrete', 'spaces.Discrete', (['(4)'], {}), '(4)\n', (291, 294), False, 'from gym import spaces\n'), ((328, 427), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-10000000)', 'high': '(100000000)', 'dtype': 'np.float', 'shape': '(window_size, window_size, 2)'}), '(low=-10000000, high=100000000, dtype=np.float, shape=(\n window_size, window_size, 2))\n', (338, 427), False, 'from gym import spaces\n'), ((981, 1005), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {}), '([0, 1])\n', (997, 1005), True, 'import numpy as np\n'), ((1129, 1181), 'numpy.zeros', 'np.zeros', (['(self.gridworld_size, self.gridworld_size)'], {}), '((self.gridworld_size, self.gridworld_size))\n', (1137, 1181), True, 'import numpy as np\n'), ((3664, 3713), 'numpy.dstack', 'np.dstack', (['[agent_observation, enemy_observation]'], {}), '([agent_observation, enemy_observation])\n', (3673, 3713), True, 'import numpy as np\n'), ((3836, 3888), 'numpy.zeros', 'np.zeros', (['(self.gridworld_size, self.gridworld_size)'], {}), '((self.gridworld_size, self.gridworld_size))\n', (3844, 3888), True, 'import numpy as np\n'), ((3589, 3635), 'numpy.zeros', 'np.zeros', (['(self.window_size, self.window_size)'], {}), '((self.window_size, self.window_size))\n', (3597, 3635), True, 'import numpy as np\n'), ((4219, 4251), 'matplotlib.pyplot.matshow', 'plt.matshow', (['env', '(1)'], {'cmap': '"""gray"""'}), "(env, 1, cmap='gray')\n", (4230, 4251), True, 'import matplotlib.pyplot as plt\n'), ((4264, 4274), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4272, 4274), True, 'import matplotlib.pyplot as plt\n'), ((4287, 4302), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (4296, 4302), True, 'import matplotlib.pyplot as plt\n'), ((6531, 6577), 'numpy.zeros', 'np.zeros', (['(self.window_size, self.window_size)'], {}), '((self.window_size, self.window_size))\n', (6539, 6577), True, 'import numpy as np\n'), ((6635, 6681), 'numpy.zeros', 'np.zeros', (['(self.window_size, self.window_size)'], {}), '((self.window_size, self.window_size))\n', (6643, 6681), True, 'import numpy as np\n'), ((2230, 2243), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (2238, 2243), True, 'import numpy as np\n'), ((2244, 2273), 'numpy.array', 'np.array', (['self.agent_position'], {}), '(self.agent_position)\n', (2252, 2273), True, 'import numpy as np\n')] |
import allel
import numpy as np
import pandas as pd
import time
import sys
def process_vit(vit_file):
vit_matrix = []
with open(vit_file) as file:
for x in file:
x_split = x.replace('\n', '').split('\t')
vit_matrix.append(np.array(x_split[1:-1]))
ancestry_matrix = np.stack(vit_matrix, axis=0).T
return ancestry_matrix
def process_fbk(fbk_file, num_ancestries, prob_thresh):
df_fbk = pd.read_csv(fbk_file, sep=" ", header=None)
fbk_matrix = df_fbk.values[:, :-1]
ancestry_matrix = np.zeros((fbk_matrix.shape[0], int(fbk_matrix.shape[1] / num_ancestries)), dtype=np.int8)
for i in range(num_ancestries):
ancestry = i+1
ancestry_matrix += (fbk_matrix[:, i::num_ancestries] > prob_thresh) * 1 * ancestry
ancestry_matrix = ancestry_matrix.astype(str)
return ancestry_matrix
def process_tsv_fb(tsv_file, num_ancestries, prob_thresh, positions, gt_matrix):
df_tsv = pd.read_csv(tsv_file, sep="\t", skiprows=1)
tsv_positions = df_tsv['physical_position'].tolist()
df_tsv.drop(columns = ['physical_position', 'chromosome', 'genetic_position', 'genetic_marker_index'], inplace=True)
tsv_matrix = df_tsv.values
i_start = positions.index(tsv_positions[0])
i_end = positions.index(tsv_positions[-1]) + 1
gt_matrix = gt_matrix[i_start:i_end, :]
positions = positions[i_start:i_end]
prob_matrix = np.zeros((len(positions), tsv_matrix.shape[1]), dtype=np.float32)
i_tsv = -1
next_pos_tsv = tsv_positions[i_tsv+1]
for i in range(len(positions)):
pos = positions[i]
if pos >= next_pos_tsv and i_tsv + 1 < tsv_matrix.shape[0]:
i_tsv += 1
probs = tsv_matrix[i_tsv, :]
if i_tsv + 1 < tsv_matrix.shape[0]:
next_pos_tsv = tsv_positions[i_tsv+1]
prob_matrix[i, :] = probs
tsv_matrix = prob_matrix
ancestry_matrix = np.zeros((tsv_matrix.shape[0], int(tsv_matrix.shape[1] / num_ancestries)), dtype=np.int8)
for i in range(num_ancestries):
ancestry = i+1
ancestry_matrix += (tsv_matrix[:, i::num_ancestries] > prob_thresh) * 1 * ancestry
ancestry_matrix -= 1
ancestry_matrix = ancestry_matrix.astype(str)
return ancestry_matrix, gt_matrix
def process_tsv_msp(tsv_file, positions, gt_matrix):
df_tsv = pd.read_csv(tsv_file, sep="\t", skiprows=1)
tsv_spos = df_tsv['spos'].tolist()
tsv_epos = df_tsv['epos'].tolist()
df_tsv.drop(columns = ['#chm', 'spos', 'epos', 'sgpos', 'egpos', 'n snps'], inplace=True)
tsv_matrix = df_tsv.values
i_start = positions.index(tsv_spos[0])
i_end = positions.index(tsv_epos[-1])
gt_matrix = gt_matrix[i_start:i_end, :]
positions = positions[i_start:i_end]
ancestry_matrix = np.zeros((len(positions), tsv_matrix.shape[1]), dtype=np.int8)
i_tsv = -1
next_pos_tsv = tsv_spos[i_tsv+1]
for i in range(len(positions)):
pos = positions[i]
if pos >= next_pos_tsv and i_tsv + 1 < tsv_matrix.shape[0]:
i_tsv += 1
ancs = tsv_matrix[i_tsv, :]
if i_tsv + 1 < tsv_matrix.shape[0]:
next_pos_tsv = tsv_spos[i_tsv+1]
ancestry_matrix[i, :] = ancs
ancestry_matrix = ancestry_matrix.astype(str)
return ancestry_matrix, gt_matrix
def process_beagle(beagle_file):
rs_IDs = []
lis_beagle = []
with open(beagle_file) as file:
x = file.readline()
x_split = x.replace('\n', '').split('\t')
ind_IDs = x_split[2:]
ind_IDs = np.array(ind_IDs)
for x in file:
x_split = x.replace('\n', '').split('\t')
if x_split[1][:2] == 'rs':
rs_IDs.append(int(x_split[1][2:]))
else:
rs_ID_split = x_split[1].split('_')
rs_IDs.append(np.float64(rs_ID_split[0] + '.' + rs_ID_split[1]))
lis_beagle.append(x_split[2:])
start_time = time.time()
gt_matrix = np.zeros((len(lis_beagle),len(lis_beagle[0])), dtype=np.float32)
for i in range(len(lis_beagle)):
ref = lis_beagle[i][0]
for j in range(1, len(lis_beagle[i])):
gt_matrix[i, j] = (lis_beagle[i][j] != ref)*1
print("Beagle Encoding Time: --- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
return gt_matrix, ind_IDs, rs_IDs
def process_vcf(vcf_file):
vcf = allel.read_vcf(vcf_file)
gt = vcf['calldata/GT']
n_variants, n_samples, ploidy = gt.shape
gt_matrix = gt.reshape(n_variants, n_samples * ploidy).astype(np.float32)
np.place(gt_matrix, gt_matrix < 0, np.nan)
IDs = vcf['variants/ID']
rs_IDs = [int(x[2:]) for x in IDs]
samples = vcf['samples']
ind_IDs = []
for sample in samples:
ind_IDs.append(sample + '_A')
ind_IDs.append(sample + '_B')
ind_IDs = np.array(ind_IDs)
positions = vcf['variants/POS'].tolist()
return gt_matrix, rs_IDs, ind_IDs, positions
def mask(ancestry_matrix, gt_matrix, unique_ancestries):
start_time = time.time()
masked_matrices = {}
for ancestry in unique_ancestries:
masked = np.empty(ancestry_matrix.shape[0] * ancestry_matrix.shape[1], dtype=np.float32)
masked[:] = np.NaN
arg = ancestry_matrix.reshape(-1) == ancestry
masked[arg] = gt_matrix.reshape(-1)[arg]
masked_matrices[ancestry] = masked.reshape(ancestry_matrix.shape)
print("Masking for ancestry --- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
return masked_matrices
def average_parent_snps(masked_matrix):
num_samples, num_snps = masked_matrix.shape
average_masked_matrix = np.zeros((int(num_samples / 2), num_snps), dtype = np.float32)
for i in range(int(num_samples / 2)):
average_masked_matrix[i,:] = np.nanmean(masked_matrix[2*i:(2*i + 2),:], axis=0, dtype = np.float32)
return average_masked_matrix
def remove_AB_indIDs(ind_IDs):
new_ind_IDs = []
for i in range(int(len(ind_IDs)/2)):
new_ind_IDs.append(ind_IDs[2*i][:-2])
new_ind_IDs = np.array(new_ind_IDs)
return new_ind_IDs
def add_AB_indIDs(ind_IDs):
new_ind_IDs = []
for i in range(len(ind_IDs)):
new_ind_IDs.append(str(ind_IDs[i]) + '_A')
new_ind_IDs.append(str(ind_IDs[i]) + '_B')
new_ind_IDs = np.array(new_ind_IDs)
return new_ind_IDs
def get_masked_matrix(beagle_filename, vcf_filename, beagle_or_vcf, is_masked, vit_filename, fbk_filename, tsv_filename, vit_or_fbk_or_tsv, fb_or_msp, num_ancestries, ancestry, average_parents, prob_thresh):
if beagle_or_vcf == 1:
gt_matrix, ind_IDs, rs_IDs = process_beagle(beagle_filename)
elif beagle_or_vcf == 2:
gt_matrix, ind_IDs, rs_IDs, positions = process_vcf(vcf_filename)
else:
sys.exit("Illegal value for beagle_or_vcf. Choose 1 for beagle file or 2 for vcf file.")
if is_masked:
if vit_or_fbk_or_tsv == 1:
ancestry_matrix = process_vit(vit_filename)
elif vit_or_fbk_or_tsv == 2:
ancestry_matrix = process_fbk(fbk_filename, num_ancestries, prob_thresh)
elif vit_or_fbk_or_tsv == 3:
if fb_or_msp == 1:
ancestry_matrix, gt_matrix = process_tsv_fb(tsv_filename, num_ancestries, prob_thresh, positions, gt_matrix)
elif fb_or_msp == 2:
ancestry_matrix, gt_matrix = process_tsv_msp(tsv_filename, positions, gt_matrix)
else:
sys.exit("Illegal value for fb_or_msp. Choose 1 for fb.tsv file or 2 for msp.tsv file.")
else:
sys.exit("Illegal value for vit_or_fbk_or_tsv. Choose 1 for vit file or 2 for fbk file or 3 for tsv file.")
if vit_or_fbk_or_tsv == 1 or vit_or_fbk_or_tsv == 2:
unique_ancestries = [str(i) for i in np.arange(1, num_ancestries+1)]
else:
unique_ancestries = [str(i) for i in np.arange(0, num_ancestries)]
masked_matrices = mask(ancestry_matrix, gt_matrix, unique_ancestries)
masked_matrix = masked_matrices[str(ancestry)].T
else:
masked_matrix = gt_matrix.T
if average_parents:
masked_matrix = average_parent_snps(masked_matrix)
ind_IDs = remove_AB_indIDs(ind_IDs)
return masked_matrix, ind_IDs, rs_IDs
def process_labels_weights(labels_file, masked_matrix, ind_IDs, average_parents, is_weighted, save_masked_matrix, masked_matrix_filename):
labels_df = pd.read_csv(labels_file, sep='\t')
if average_parents:
labels = np.array(labels_df['label'][labels_df['indID'].isin(ind_IDs)])
label_ind_IDs = np.array(labels_df['indID'][labels_df['indID'].isin(ind_IDs)])
else:
temp_ind_IDs = remove_AB_indIDs(ind_IDs)
labels = np.array(labels_df['label'][labels_df['indID'].isin(temp_ind_IDs)])
labels = np.repeat(labels, 2)
label_ind_IDs = np.array(labels_df['indID'][labels_df['indID'].isin(temp_ind_IDs)])
label_ind_IDs = add_AB_indIDs(label_ind_IDs)
keep_indices = [ind_IDs.tolist().index(x) for x in label_ind_IDs]
ind_IDs = ind_IDs[keep_indices]
masked_matrix = masked_matrix[keep_indices]
if not is_weighted:
weights = np.ones(len(labels))
else:
if average_parents:
weights = np.array(labels_df['weight'][labels_df['indID'].isin(ind_IDs)])
else:
temp_ind_IDs = remove_AB_indIDs(ind_IDs)
weights = np.array(labels_df['weight'][labels_df['indID'].isin(temp_ind_IDs)])
weights = np.repeat(weights, 2)
non_combined_indices = np.where(weights > 0)
masked_matrix_new = masked_matrix[non_combined_indices]
ind_IDs_new = ind_IDs[non_combined_indices]
labels_new = labels[non_combined_indices]
weights_new = weights[non_combined_indices]
num_groups = - min(weights)
if num_groups > 0:
for i in range(1, num_groups+1):
weight = -i
combined_indices = np.where(weights == weight)
combined_row = [np.nanmean(masked_matrix[combined_indices], axis=0)]
masked_matrix_new = np.append(masked_matrix_new, combined_row, axis=0)
ind_IDs_new = np.append(ind_IDs_new, 'combined_ind_' + str(i))
labels_new = np.append(labels_new, labels[combined_indices[0][0]])
weights_new = np.append(weights_new, 1)
masked_matrix = masked_matrix_new
ind_IDs = ind_IDs_new
labels = labels_new
weights = weights_new
if save_masked_matrix:
np.save(masked_matrix_filename, masked_matrix)
return masked_matrix, ind_IDs, labels, weights
def center_masked_matrix(masked_matrix):
masked_matrix -= np.nanmean(masked_matrix, axis=0)
return masked_matrix
| [
"numpy.stack",
"numpy.save",
"pandas.read_csv",
"numpy.empty",
"allel.read_vcf",
"numpy.place",
"time.time",
"numpy.append",
"numpy.where",
"numpy.array",
"numpy.repeat",
"numpy.arange",
"numpy.float64",
"sys.exit",
"numpy.nanmean"
] | [((438, 481), 'pandas.read_csv', 'pd.read_csv', (['fbk_file'], {'sep': '""" """', 'header': 'None'}), "(fbk_file, sep=' ', header=None)\n", (449, 481), True, 'import pandas as pd\n'), ((955, 998), 'pandas.read_csv', 'pd.read_csv', (['tsv_file'], {'sep': '"""\t"""', 'skiprows': '(1)'}), "(tsv_file, sep='\\t', skiprows=1)\n", (966, 998), True, 'import pandas as pd\n'), ((2337, 2380), 'pandas.read_csv', 'pd.read_csv', (['tsv_file'], {'sep': '"""\t"""', 'skiprows': '(1)'}), "(tsv_file, sep='\\t', skiprows=1)\n", (2348, 2380), True, 'import pandas as pd\n'), ((3946, 3957), 'time.time', 'time.time', ([], {}), '()\n', (3955, 3957), False, 'import time\n'), ((4318, 4329), 'time.time', 'time.time', ([], {}), '()\n', (4327, 4329), False, 'import time\n'), ((4411, 4435), 'allel.read_vcf', 'allel.read_vcf', (['vcf_file'], {}), '(vcf_file)\n', (4425, 4435), False, 'import allel\n'), ((4591, 4633), 'numpy.place', 'np.place', (['gt_matrix', '(gt_matrix < 0)', 'np.nan'], {}), '(gt_matrix, gt_matrix < 0, np.nan)\n', (4599, 4633), True, 'import numpy as np\n'), ((4865, 4882), 'numpy.array', 'np.array', (['ind_IDs'], {}), '(ind_IDs)\n', (4873, 4882), True, 'import numpy as np\n'), ((5052, 5063), 'time.time', 'time.time', ([], {}), '()\n', (5061, 5063), False, 'import time\n'), ((6096, 6117), 'numpy.array', 'np.array', (['new_ind_IDs'], {}), '(new_ind_IDs)\n', (6104, 6117), True, 'import numpy as np\n'), ((6345, 6366), 'numpy.array', 'np.array', (['new_ind_IDs'], {}), '(new_ind_IDs)\n', (6353, 6366), True, 'import numpy as np\n'), ((8461, 8495), 'pandas.read_csv', 'pd.read_csv', (['labels_file'], {'sep': '"""\t"""'}), "(labels_file, sep='\\t')\n", (8472, 8495), True, 'import pandas as pd\n'), ((10743, 10776), 'numpy.nanmean', 'np.nanmean', (['masked_matrix'], {'axis': '(0)'}), '(masked_matrix, axis=0)\n', (10753, 10776), True, 'import numpy as np\n'), ((310, 338), 'numpy.stack', 'np.stack', (['vit_matrix'], {'axis': '(0)'}), '(vit_matrix, axis=0)\n', (318, 338), True, 'import numpy as np\n'), ((3545, 3562), 'numpy.array', 'np.array', (['ind_IDs'], {}), '(ind_IDs)\n', (3553, 3562), True, 'import numpy as np\n'), ((5145, 5224), 'numpy.empty', 'np.empty', (['(ancestry_matrix.shape[0] * ancestry_matrix.shape[1])'], {'dtype': 'np.float32'}), '(ancestry_matrix.shape[0] * ancestry_matrix.shape[1], dtype=np.float32)\n', (5153, 5224), True, 'import numpy as np\n'), ((5536, 5547), 'time.time', 'time.time', ([], {}), '()\n', (5545, 5547), False, 'import time\n'), ((5834, 5905), 'numpy.nanmean', 'np.nanmean', (['masked_matrix[2 * i:2 * i + 2, :]'], {'axis': '(0)', 'dtype': 'np.float32'}), '(masked_matrix[2 * i:2 * i + 2, :], axis=0, dtype=np.float32)\n', (5844, 5905), True, 'import numpy as np\n'), ((8848, 8868), 'numpy.repeat', 'np.repeat', (['labels', '(2)'], {}), '(labels, 2)\n', (8857, 8868), True, 'import numpy as np\n'), ((9588, 9609), 'numpy.where', 'np.where', (['(weights > 0)'], {}), '(weights > 0)\n', (9596, 9609), True, 'import numpy as np\n'), ((10582, 10628), 'numpy.save', 'np.save', (['masked_matrix_filename', 'masked_matrix'], {}), '(masked_matrix_filename, masked_matrix)\n', (10589, 10628), True, 'import numpy as np\n'), ((6816, 6914), 'sys.exit', 'sys.exit', (['"""Illegal value for beagle_or_vcf. Choose 1 for beagle file or 2 for vcf file."""'], {}), "(\n 'Illegal value for beagle_or_vcf. Choose 1 for beagle file or 2 for vcf file.'\n )\n", (6824, 6914), False, 'import sys\n'), ((9535, 9556), 'numpy.repeat', 'np.repeat', (['weights', '(2)'], {}), '(weights, 2)\n', (9544, 9556), True, 'import numpy as np\n'), ((263, 286), 'numpy.array', 'np.array', (['x_split[1:-1]'], {}), '(x_split[1:-1])\n', (271, 286), True, 'import numpy as np\n'), ((4274, 4285), 'time.time', 'time.time', ([], {}), '()\n', (4283, 4285), False, 'import time\n'), ((9999, 10026), 'numpy.where', 'np.where', (['(weights == weight)'], {}), '(weights == weight)\n', (10007, 10026), True, 'import numpy as np\n'), ((10148, 10198), 'numpy.append', 'np.append', (['masked_matrix_new', 'combined_row'], {'axis': '(0)'}), '(masked_matrix_new, combined_row, axis=0)\n', (10157, 10198), True, 'import numpy as np\n'), ((10307, 10360), 'numpy.append', 'np.append', (['labels_new', 'labels[combined_indices[0][0]]'], {}), '(labels_new, labels[combined_indices[0][0]])\n', (10316, 10360), True, 'import numpy as np\n'), ((10391, 10416), 'numpy.append', 'np.append', (['weights_new', '(1)'], {}), '(weights_new, 1)\n', (10400, 10416), True, 'import numpy as np\n'), ((3830, 3879), 'numpy.float64', 'np.float64', (["(rs_ID_split[0] + '.' + rs_ID_split[1])"], {}), "(rs_ID_split[0] + '.' + rs_ID_split[1])\n", (3840, 3879), True, 'import numpy as np\n'), ((5488, 5499), 'time.time', 'time.time', ([], {}), '()\n', (5497, 5499), False, 'import time\n'), ((7608, 7725), 'sys.exit', 'sys.exit', (['"""Illegal value for vit_or_fbk_or_tsv. Choose 1 for vit file or 2 for fbk file or 3 for tsv file."""'], {}), "(\n 'Illegal value for vit_or_fbk_or_tsv. Choose 1 for vit file or 2 for fbk file or 3 for tsv file.'\n )\n", (7616, 7725), False, 'import sys\n'), ((7826, 7858), 'numpy.arange', 'np.arange', (['(1)', '(num_ancestries + 1)'], {}), '(1, num_ancestries + 1)\n', (7835, 7858), True, 'import numpy as np\n'), ((7921, 7949), 'numpy.arange', 'np.arange', (['(0)', 'num_ancestries'], {}), '(0, num_ancestries)\n', (7930, 7949), True, 'import numpy as np\n'), ((10059, 10110), 'numpy.nanmean', 'np.nanmean', (['masked_matrix[combined_indices]'], {'axis': '(0)'}), '(masked_matrix[combined_indices], axis=0)\n', (10069, 10110), True, 'import numpy as np\n'), ((7493, 7591), 'sys.exit', 'sys.exit', (['"""Illegal value for fb_or_msp. Choose 1 for fb.tsv file or 2 for msp.tsv file."""'], {}), "(\n 'Illegal value for fb_or_msp. Choose 1 for fb.tsv file or 2 for msp.tsv file.'\n )\n", (7501, 7591), False, 'import sys\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 08:23:38 2020
@author: fabian
"""
from pathlib import Path
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
import matplotlib.pyplot as plt
def load_data(path):
mask = np.zeros((32,32))
mask[0:16, 0:16] = 1
mask = mask.reshape((1024,)).astype(np.bool)
tactile_data = []
line_count = 0
with open(path) as f:
# Expect data separated by \t and if it belongs to the same block.
# Blocks are separated by \n
for line in iter(f.readline, ''):
line = line.split('\t')
line.pop(0) # get rid of frame number at the beginning
line.pop(-1) # get rid of '\n' at the end
if(len(line) != 1024):
print('Error on line', line_count)
else:
frame = np.array(line)
tactile_data.append(frame[mask])
line_count += 1
tactile_data = np.array(tactile_data).astype(np.uint16)
data = tactile_data.reshape((-1, 16, 16))
return data
folder = Path('../../Data_Collection/3kOhm_FB/square_sensor/Slip/')
#file = 'screwdriver.log'
file = 'coke.log'
data = load_data(folder / file)
app = QtGui.QApplication([])
win = QtGui.QMainWindow()
imv = pg.ImageView()
imv.ui.histogram.hide()
imv.ui.menuBtn.hide()
imv.ui.roiBtn.hide()
win.setCentralWidget(imv)
win.resize(1000,1000)
win.show()
win.setWindowTitle('Slip experiment')
imv.setImage(data, xvals=np.linspace(0., data.shape[0]/100, data.shape[0]))
#autoHistogramRange=False, levels=lev)
if __name__ == "__main__":
QtGui.QApplication.instance().exec_()
| [
"pyqtgraph.Qt.QtGui.QMainWindow",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"numpy.zeros",
"pyqtgraph.ImageView",
"pathlib.Path",
"numpy.array",
"numpy.linspace",
"pyqtgraph.Qt.QtGui.QApplication"
] | [((1107, 1165), 'pathlib.Path', 'Path', (['"""../../Data_Collection/3kOhm_FB/square_sensor/Slip/"""'], {}), "('../../Data_Collection/3kOhm_FB/square_sensor/Slip/')\n", (1111, 1165), False, 'from pathlib import Path\n'), ((1250, 1272), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (1268, 1272), False, 'from pyqtgraph.Qt import QtGui\n'), ((1279, 1298), 'pyqtgraph.Qt.QtGui.QMainWindow', 'QtGui.QMainWindow', ([], {}), '()\n', (1296, 1298), False, 'from pyqtgraph.Qt import QtGui\n'), ((1305, 1319), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {}), '()\n', (1317, 1319), True, 'import pyqtgraph as pg\n'), ((279, 297), 'numpy.zeros', 'np.zeros', (['(32, 32)'], {}), '((32, 32))\n', (287, 297), True, 'import numpy as np\n'), ((1509, 1561), 'numpy.linspace', 'np.linspace', (['(0.0)', '(data.shape[0] / 100)', 'data.shape[0]'], {}), '(0.0, data.shape[0] / 100, data.shape[0])\n', (1520, 1561), True, 'import numpy as np\n'), ((994, 1016), 'numpy.array', 'np.array', (['tactile_data'], {}), '(tactile_data)\n', (1002, 1016), True, 'import numpy as np\n'), ((1659, 1688), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (1686, 1688), False, 'from pyqtgraph.Qt import QtGui\n'), ((882, 896), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (890, 896), True, 'import numpy as np\n')] |
import unittest
import os
import numpy as np
from skimage.io import imread, imsave
from skimage import img_as_float64
from shutil import rmtree
from sigback.processing import measure
class MeasureTest(unittest.TestCase):
test_data_path = './sigback/processing/tests/data'
def setUp(self):
test_dir = os.path.join(self.test_data_path, 'img_dir')
empty_test_dir = os.path.join(self.test_data_path, 'empty_img_dir')
if (not os.path.exists(test_dir)):
os.mkdir(test_dir)
if (not os.path.exists(empty_test_dir)):
os.mkdir(empty_test_dir)
img_names = ['first.png', 'second.png']
imgs = [np.random.rand(100, 200), np.random.rand(150, 300)]
for img, img_name in zip(imgs, img_names):
full_path = os.path.join(test_dir, img_name)
imsave(full_path, img)
def tearDown(self):
test_dir = os.path.join(self.test_data_path, 'img_dir')
empty_test_dir = os.path.join(self.test_data_path, 'empty_img_dir')
if (os.path.exists(test_dir)):
rmtree(test_dir)
if (os.path.exists(empty_test_dir)):
rmtree(empty_test_dir)
def test_minmax(self):
test_dir = os.path.join(self.test_data_path, 'img_dir')
actual = measure.minmax(test_dir)
expected = (150, 300, 100, 200)
self.assertEqual(expected, actual)
def test_minmax_empty_dir(self):
test_dir = os.path.join(self.test_data_path, 'empty_img_dir')
actual = measure.minmax(test_dir)
expected = (0, 0, -1, -1)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"os.mkdir",
"sigback.processing.measure.minmax",
"skimage.io.imsave",
"os.path.exists",
"numpy.random.rand",
"shutil.rmtree",
"os.path.join"
] | [((1662, 1677), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1675, 1677), False, 'import unittest\n'), ((320, 364), 'os.path.join', 'os.path.join', (['self.test_data_path', '"""img_dir"""'], {}), "(self.test_data_path, 'img_dir')\n", (332, 364), False, 'import os\n'), ((390, 440), 'os.path.join', 'os.path.join', (['self.test_data_path', '"""empty_img_dir"""'], {}), "(self.test_data_path, 'empty_img_dir')\n", (402, 440), False, 'import os\n'), ((911, 955), 'os.path.join', 'os.path.join', (['self.test_data_path', '"""img_dir"""'], {}), "(self.test_data_path, 'img_dir')\n", (923, 955), False, 'import os\n'), ((981, 1031), 'os.path.join', 'os.path.join', (['self.test_data_path', '"""empty_img_dir"""'], {}), "(self.test_data_path, 'empty_img_dir')\n", (993, 1031), False, 'import os\n'), ((1045, 1069), 'os.path.exists', 'os.path.exists', (['test_dir'], {}), '(test_dir)\n', (1059, 1069), False, 'import os\n'), ((1113, 1143), 'os.path.exists', 'os.path.exists', (['empty_test_dir'], {}), '(empty_test_dir)\n', (1127, 1143), False, 'import os\n'), ((1228, 1272), 'os.path.join', 'os.path.join', (['self.test_data_path', '"""img_dir"""'], {}), "(self.test_data_path, 'img_dir')\n", (1240, 1272), False, 'import os\n'), ((1291, 1315), 'sigback.processing.measure.minmax', 'measure.minmax', (['test_dir'], {}), '(test_dir)\n', (1305, 1315), False, 'from sigback.processing import measure\n'), ((1457, 1507), 'os.path.join', 'os.path.join', (['self.test_data_path', '"""empty_img_dir"""'], {}), "(self.test_data_path, 'empty_img_dir')\n", (1469, 1507), False, 'import os\n'), ((1526, 1550), 'sigback.processing.measure.minmax', 'measure.minmax', (['test_dir'], {}), '(test_dir)\n', (1540, 1550), False, 'from sigback.processing import measure\n'), ((458, 482), 'os.path.exists', 'os.path.exists', (['test_dir'], {}), '(test_dir)\n', (472, 482), False, 'import os\n'), ((497, 515), 'os.mkdir', 'os.mkdir', (['test_dir'], {}), '(test_dir)\n', (505, 515), False, 'import os\n'), ((532, 562), 'os.path.exists', 'os.path.exists', (['empty_test_dir'], {}), '(empty_test_dir)\n', (546, 562), False, 'import os\n'), ((577, 601), 'os.mkdir', 'os.mkdir', (['empty_test_dir'], {}), '(empty_test_dir)\n', (585, 601), False, 'import os\n'), ((667, 691), 'numpy.random.rand', 'np.random.rand', (['(100)', '(200)'], {}), '(100, 200)\n', (681, 691), True, 'import numpy as np\n'), ((693, 717), 'numpy.random.rand', 'np.random.rand', (['(150)', '(300)'], {}), '(150, 300)\n', (707, 717), True, 'import numpy as np\n'), ((795, 827), 'os.path.join', 'os.path.join', (['test_dir', 'img_name'], {}), '(test_dir, img_name)\n', (807, 827), False, 'import os\n'), ((840, 862), 'skimage.io.imsave', 'imsave', (['full_path', 'img'], {}), '(full_path, img)\n', (846, 862), False, 'from skimage.io import imread, imsave\n'), ((1084, 1100), 'shutil.rmtree', 'rmtree', (['test_dir'], {}), '(test_dir)\n', (1090, 1100), False, 'from shutil import rmtree\n'), ((1158, 1180), 'shutil.rmtree', 'rmtree', (['empty_test_dir'], {}), '(empty_test_dir)\n', (1164, 1180), False, 'from shutil import rmtree\n')] |
"""
Interactive image plots.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import Slider, Button, RadioButtons
def intimage(img, **kwargs):
"""Interactive imshow with widgets.
"""
fig, ax = plt.subplots()
plt.subplots_adjust(left=0, bottom=0.20)
im = ax.imshow(img, **kwargs)
cbar = fig.colorbar(im)
rax = plt.axes([0.025, 0.025, 0.13, 0.13])
cmap_names = [im.get_cmap().name, 'gray', 'binary']
c = im.get_cmap().name
active = 0
for i, name in enumerate(cmap_names):
if c == name:
active = i
break
radio = RadioButtons(rax, cmap_names, active=active)
def cmapfunc(label):
im.set_cmap(label)
fig.canvas.draw_idle()
radio.on_clicked(cmapfunc)
low, high = im.get_clim()
bot = min(low, np.nanmin(img))
top = max(high, np.nanmax(img))
axmin = plt.axes([0.25, 0.025, 0.60, 0.03])
axmax = plt.axes([0.25, 0.07, 0.60, 0.03])
smin = Slider(axmin, 'Min', bot, top, valinit=low)
smax = Slider(axmax, 'Max', bot, top, valinit=high)
def update(val):
im.set_clim(smin.val, smax.val)
fig.canvas.draw_idle()
smin.on_changed(update)
smax.on_changed(update)
flipxbut = Button(plt.axes([0.25, 0.12, 0.1, 0.04]), 'Flip X')
def flipx(event):
img = im.get_array()
im.set_data(img[:,::-1])
fig.canvas.draw_idle()
flipxbut.on_clicked(flipx)
flipybut = Button(plt.axes([0.36, 0.12, 0.1, 0.04]), 'Flip Y')
def flipx(event):
img = im.get_array()
im.set_data(img[::-1,:])
fig.canvas.draw_idle()
flipybut.on_clicked(flipx)
# return these so we keep a reference to them.
# otherwise the widget will no longer be responsive
return im, radio, smin, smax, flipxbut, flipybut
| [
"matplotlib.widgets.RadioButtons",
"matplotlib.pyplot.axes",
"matplotlib.widgets.Slider",
"numpy.nanmin",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplots",
"numpy.nanmax"
] | [((237, 251), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (249, 251), True, 'import matplotlib.pyplot as plt\n'), ((256, 295), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0)', 'bottom': '(0.2)'}), '(left=0, bottom=0.2)\n', (275, 295), True, 'import matplotlib.pyplot as plt\n'), ((370, 406), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.025, 0.025, 0.13, 0.13]'], {}), '([0.025, 0.025, 0.13, 0.13])\n', (378, 406), True, 'import matplotlib.pyplot as plt\n'), ((622, 666), 'matplotlib.widgets.RadioButtons', 'RadioButtons', (['rax', 'cmap_names'], {'active': 'active'}), '(rax, cmap_names, active=active)\n', (634, 666), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((895, 929), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.025, 0.6, 0.03]'], {}), '([0.25, 0.025, 0.6, 0.03])\n', (903, 929), True, 'import matplotlib.pyplot as plt\n'), ((944, 977), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.07, 0.6, 0.03]'], {}), '([0.25, 0.07, 0.6, 0.03])\n', (952, 977), True, 'import matplotlib.pyplot as plt\n'), ((990, 1033), 'matplotlib.widgets.Slider', 'Slider', (['axmin', '"""Min"""', 'bot', 'top'], {'valinit': 'low'}), "(axmin, 'Min', bot, top, valinit=low)\n", (996, 1033), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((1045, 1089), 'matplotlib.widgets.Slider', 'Slider', (['axmax', '"""Max"""', 'bot', 'top'], {'valinit': 'high'}), "(axmax, 'Max', bot, top, valinit=high)\n", (1051, 1089), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((831, 845), 'numpy.nanmin', 'np.nanmin', (['img'], {}), '(img)\n', (840, 845), True, 'import numpy as np\n'), ((867, 881), 'numpy.nanmax', 'np.nanmax', (['img'], {}), '(img)\n', (876, 881), True, 'import numpy as np\n'), ((1262, 1295), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.12, 0.1, 0.04]'], {}), '([0.25, 0.12, 0.1, 0.04])\n', (1270, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1476, 1509), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.36, 0.12, 0.1, 0.04]'], {}), '([0.36, 0.12, 0.1, 0.04])\n', (1484, 1509), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 19 13:56:17 2020
@author: shah
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 12 13:23:58 2020
@author: shah
"""
from util import m_normal, learning_rate, get_lambda
from classes import ret
import random as random
import numpy as np
import math
def bpr_update(users, movies):
count = 0
lr = learning_rate()
lam = get_lambda()
for u1 in users:
u = users[u1]
userid = u.userid
Vu = u.factor
if (len(u.movies_train) > 0):
rand_pos = random.sample(u.movies_train.keys(), 1)[0]
rand_neg = random.sample(movies.keys(), 1)[0]
if rand_neg not in u.movies_train:
Vi = movies[rand_pos].factor
Vj = movies[rand_neg].factor
firstterm = calculate_first_term(Vu, Vi, Vj)
# USER FACTOR
diff = Vi - Vj
d = firstterm * diff
derivative = d
Vu = Vu + lr * (derivative + lam * np.linalg.norm(Vu))
users[u1].factor = Vu
# ITEM POSITIVE FACTOR
d = firstterm * Vu
derivative = d
Vi = Vi + lr * (derivative + lam * np.linalg.norm(Vi))
movies[rand_pos].factor = Vi
#ITEM NEGATIVE FACTOR
negvu = -1 * Vu
d = firstterm * negvu
derivative = d
Vj = Vj + lr * (derivative + lam * np.linalg.norm(Vj))
movies[rand_neg].factor = Vj
def calculate_first_term(Vu, Vi, Vj):
boughtdot = np.dot(Vu, Vi)
notboughtdot = np.dot(Vu, Vj)
negxuij = (boughtdot - notboughtdot) * -1
if negxuij > 500:
negxuij = 500
numerator = math.exp(negxuij)
denominator = 1 + math.exp(negxuij)
firstterm = numerator / denominator
return firstterm
| [
"math.exp",
"util.get_lambda",
"util.learning_rate",
"numpy.linalg.norm",
"numpy.dot"
] | [((398, 413), 'util.learning_rate', 'learning_rate', ([], {}), '()\n', (411, 413), False, 'from util import m_normal, learning_rate, get_lambda\n'), ((424, 436), 'util.get_lambda', 'get_lambda', ([], {}), '()\n', (434, 436), False, 'from util import m_normal, learning_rate, get_lambda\n'), ((1662, 1676), 'numpy.dot', 'np.dot', (['Vu', 'Vi'], {}), '(Vu, Vi)\n', (1668, 1676), True, 'import numpy as np\n'), ((1696, 1710), 'numpy.dot', 'np.dot', (['Vu', 'Vj'], {}), '(Vu, Vj)\n', (1702, 1710), True, 'import numpy as np\n'), ((1817, 1834), 'math.exp', 'math.exp', (['negxuij'], {}), '(negxuij)\n', (1825, 1834), False, 'import math\n'), ((1857, 1874), 'math.exp', 'math.exp', (['negxuij'], {}), '(negxuij)\n', (1865, 1874), False, 'import math\n'), ((1071, 1089), 'numpy.linalg.norm', 'np.linalg.norm', (['Vu'], {}), '(Vu)\n', (1085, 1089), True, 'import numpy as np\n'), ((1286, 1304), 'numpy.linalg.norm', 'np.linalg.norm', (['Vi'], {}), '(Vi)\n', (1300, 1304), True, 'import numpy as np\n'), ((1542, 1560), 'numpy.linalg.norm', 'np.linalg.norm', (['Vj'], {}), '(Vj)\n', (1556, 1560), True, 'import numpy as np\n')] |
from __future__ import division
import argparse
from PIL import Image
import numpy as np
import gym
from keras.models import Model
from keras.layers import Flatten, Conv2D, Input, Dense
from keras.optimizers import Adam
from keras.regularizers import l2
import keras.backend as K
from rl.agents.dqn import DQfDAgent
from rl.policy import EpsGreedyQPolicy
from rl.memory import PartitionedMemory
from rl.core import Processor
from rl.callbacks import TrainEpisodeLogger, ModelIntervalCheckpoint
from rl.util import load_demo_data_from_file, record_demo_data
#We downsize the atari frame to 84 x 84 and feed the model 4 frames at a time for
#a sense of direction and speed.
INPUT_SHAPE = (84, 84)
WINDOW_LENGTH = 4
#Standard Atari processing
class AtariDQfDProcessor(Processor):
def process_observation(self, observation):
assert observation.ndim == 3
img = Image.fromarray(observation)
img = img.resize(INPUT_SHAPE).convert('L') # resize and convert to grayscale
processed_observation = np.array(img)
assert processed_observation.shape == INPUT_SHAPE
return processed_observation.astype('uint8') # saves storage in experience memory
def process_state_batch(self, batch):
processed_batch = batch.astype('float32') / 255.
return processed_batch
def process_reward(self, reward):
return np.sign(reward) * np.log(1 + abs(reward))
def process_demo_data(self, demo_data):
#Important addition from dqn example.
for step in demo_data:
step[0] = self.process_observation(step[0])
step[2] = self.process_reward(step[2])
return demo_data
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--env-name', type=str, default='HeroDeterministic-v4')
parser.add_argument('--weights', type=str, default=None)
args = parser.parse_args()
# Get the environment and extract the number of actions.
env = gym.make(args.env_name)
np.random.seed(231)
env.seed(123)
nb_actions = env.action_space.n
print("NUMBER OF ACTIONS: " + str(nb_actions))
#Standard DQN model architecture + l2 regularization to prevent overfitting on small demo sets.
input_shape = (WINDOW_LENGTH, INPUT_SHAPE[0], INPUT_SHAPE[1])
frame = Input(shape=(input_shape))
cv1 = Conv2D(32, kernel_size=(8,8), strides=4, activation='relu', kernel_regularizer=l2(1e-4), data_format='channels_first')(frame)
cv2 = Conv2D(64, kernel_size=(4,4), strides=2, activation='relu', kernel_regularizer=l2(1e-4), data_format='channels_first')(cv1)
cv3 = Conv2D(64, kernel_size=(3,3), strides=1, activation='relu', kernel_regularizer=l2(1e-4), data_format='channels_first')(cv2)
dense= Flatten()(cv3)
dense = Dense(512, activation='relu', kernel_regularizer=l2(1e-4))(dense)
buttons = Dense(nb_actions, activation='linear', kernel_regularizer=l2(1e-4))(dense)
model = Model(inputs=frame,outputs=buttons)
model.summary()
processor = AtariDQfDProcessor()
# record_demo_data('HeroDeterministic-v4', steps=50000, data_filepath='hero_expert.npy', frame_delay=0.03)
# Load and process the demonstration data.
expert_demo_data = processor.process_demo_data(load_demo_data_from_file('hero_expert.npy'))
memory = PartitionedMemory(limit=1000000, pre_load_data=expert_demo_data, alpha=.4, start_beta=.6, end_beta=.6, window_length=WINDOW_LENGTH)
policy = EpsGreedyQPolicy(.01)
dqfd = DQfDAgent(model=model, nb_actions=nb_actions, policy=policy, memory=memory,
processor=processor, enable_double_dqn=True, enable_dueling_network=True, gamma=.99, target_model_update=10000,
train_interval=4, delta_clip=1., pretraining_steps=750000, n_step=10)
lr = .00025/4
dqfd.compile(Adam(lr=lr), metrics=['mae'])
if args.mode == 'train':
weights_filename = 'dqfd_{}_weights.h5f'.format(args.env_name)
checkpoint_weights_filename = 'dqfd_' + args.env_name + '_weights_{step}.h5f'
log_filename = 'dqfd_' + args.env_name + '_REWARD_DATA.txt' #uses TrainEpisodeLogger csv (optional)
callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=500000)]
callbacks += [TrainEpisodeLogger(log_filename)]
dqfd.fit(env, callbacks=callbacks, nb_steps=10000000, verbose=0, nb_max_episode_steps=200000)
dqfd.save_weights(weights_filename, overwrite=True)
elif args.mode == 'test':
weights_filename = 'dqfd_{}_weights.h5f'.format(args.env_name)
if args.weights:
weights_filename = args.weights
dqfd.load_weights(weights_filename)
dqfd.test(env, nb_episodes=10, visualize=True, nb_max_start_steps=30)
| [
"rl.callbacks.TrainEpisodeLogger",
"keras.regularizers.l2",
"numpy.random.seed",
"gym.make",
"argparse.ArgumentParser",
"rl.memory.PartitionedMemory",
"rl.util.load_demo_data_from_file",
"rl.policy.EpsGreedyQPolicy",
"keras.layers.Flatten",
"keras.optimizers.Adam",
"keras.models.Model",
"rl.ca... | [((1679, 1704), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1702, 1704), False, 'import argparse\n'), ((2003, 2026), 'gym.make', 'gym.make', (['args.env_name'], {}), '(args.env_name)\n', (2011, 2026), False, 'import gym\n'), ((2027, 2046), 'numpy.random.seed', 'np.random.seed', (['(231)'], {}), '(231)\n', (2041, 2046), True, 'import numpy as np\n'), ((2307, 2331), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (2312, 2331), False, 'from keras.layers import Flatten, Conv2D, Input, Dense\n'), ((2915, 2951), 'keras.models.Model', 'Model', ([], {'inputs': 'frame', 'outputs': 'buttons'}), '(inputs=frame, outputs=buttons)\n', (2920, 2951), False, 'from keras.models import Model\n'), ((3254, 3392), 'rl.memory.PartitionedMemory', 'PartitionedMemory', ([], {'limit': '(1000000)', 'pre_load_data': 'expert_demo_data', 'alpha': '(0.4)', 'start_beta': '(0.6)', 'end_beta': '(0.6)', 'window_length': 'WINDOW_LENGTH'}), '(limit=1000000, pre_load_data=expert_demo_data, alpha=0.4,\n start_beta=0.6, end_beta=0.6, window_length=WINDOW_LENGTH)\n', (3271, 3392), False, 'from rl.memory import PartitionedMemory\n'), ((3396, 3418), 'rl.policy.EpsGreedyQPolicy', 'EpsGreedyQPolicy', (['(0.01)'], {}), '(0.01)\n', (3412, 3418), False, 'from rl.policy import EpsGreedyQPolicy\n'), ((3426, 3698), 'rl.agents.dqn.DQfDAgent', 'DQfDAgent', ([], {'model': 'model', 'nb_actions': 'nb_actions', 'policy': 'policy', 'memory': 'memory', 'processor': 'processor', 'enable_double_dqn': '(True)', 'enable_dueling_network': '(True)', 'gamma': '(0.99)', 'target_model_update': '(10000)', 'train_interval': '(4)', 'delta_clip': '(1.0)', 'pretraining_steps': '(750000)', 'n_step': '(10)'}), '(model=model, nb_actions=nb_actions, policy=policy, memory=memory,\n processor=processor, enable_double_dqn=True, enable_dueling_network=\n True, gamma=0.99, target_model_update=10000, train_interval=4,\n delta_clip=1.0, pretraining_steps=750000, n_step=10)\n', (3435, 3698), False, 'from rl.agents.dqn import DQfDAgent\n'), ((2733, 2742), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2740, 2742), False, 'from keras.layers import Flatten, Conv2D, Input, Dense\n'), ((3200, 3243), 'rl.util.load_demo_data_from_file', 'load_demo_data_from_file', (['"""hero_expert.npy"""'], {}), "('hero_expert.npy')\n", (3224, 3243), False, 'from rl.util import load_demo_data_from_file, record_demo_data\n'), ((3742, 3753), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (3746, 3753), False, 'from keras.optimizers import Adam\n'), ((878, 906), 'PIL.Image.fromarray', 'Image.fromarray', (['observation'], {}), '(observation)\n', (893, 906), False, 'from PIL import Image\n'), ((1025, 1038), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1033, 1038), True, 'import numpy as np\n'), ((4068, 4137), 'rl.callbacks.ModelIntervalCheckpoint', 'ModelIntervalCheckpoint', (['checkpoint_weights_filename'], {'interval': '(500000)'}), '(checkpoint_weights_filename, interval=500000)\n', (4091, 4137), False, 'from rl.callbacks import TrainEpisodeLogger, ModelIntervalCheckpoint\n'), ((4157, 4189), 'rl.callbacks.TrainEpisodeLogger', 'TrainEpisodeLogger', (['log_filename'], {}), '(log_filename)\n', (4175, 4189), False, 'from rl.callbacks import TrainEpisodeLogger, ModelIntervalCheckpoint\n'), ((1373, 1388), 'numpy.sign', 'np.sign', (['reward'], {}), '(reward)\n', (1380, 1388), True, 'import numpy as np\n'), ((2419, 2429), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (2421, 2429), False, 'from keras.regularizers import l2\n'), ((2551, 2561), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (2553, 2561), False, 'from keras.regularizers import l2\n'), ((2681, 2691), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (2683, 2691), False, 'from keras.regularizers import l2\n'), ((2805, 2815), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (2807, 2815), False, 'from keras.regularizers import l2\n'), ((2890, 2900), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (2892, 2900), False, 'from keras.regularizers import l2\n')] |
"""
Train a Noisy Logistic Regression Classifier from Training Data
"""
import argparse
import pickle
import numpy as np
from sklearn.linear_model import LogisticRegression
class NoisyLR(LogisticRegression):
def set_noise_ratio(self, noise_ratio=None):
self.noise_ratio = noise_ratio
def predict_proba(self, X):
proba = super().predict_proba(X)
if self.noise_ratio is not None:
for i in range(proba.shape[0]):
# print(X[i, -1])
if int(X[i, -1]) == 1:
noise_or_not = np.random.binomial(1, self.noise_ratio["maj"])
else:
noise_or_not = np.random.binomial(1, self.noise_ratio["min"])
if noise_or_not:
noise = np.random.beta(1, 4)
proba[i, 0] = 1. - noise
proba[i, 1] = noise
return proba
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--train_data_path", type=str, help="the input training data path")
parser.add_argument("--lbd", type=float, help="L2 regularization parameter")
# parser.add_argument("--C", type=float, help="L2 regularization parameter")
parser.add_argument("--classifier_path", type=str, help="the output classifier path")
parser.add_argument('--noise_ratio_maj', type=float, default=0., help="noise ratio of majority group")
parser.add_argument('--noise_ratio_min', type=float, default=-1., help="noise ratio of minority group")
args = parser.parse_args()
with open(args.train_data_path, "rb") as f:
X, y = pickle.load(f)
n = y.shape[0]
C = 1 / (args.lbd * n)
# C = args.C
if args.noise_ratio_min < 0.:
classifier = LogisticRegression(C=C).fit(X, y)
else:
classifier = NoisyLR(C=C).fit(X, y)
noise_ratio = {}
noise_ratio["maj"] = args.noise_ratio_maj
noise_ratio["min"] = args.noise_ratio_min
classifier.set_noise_ratio(noise_ratio)
with open(args.classifier_path, "wb") as f:
pickle.dump(classifier, f)
| [
"pickle.dump",
"numpy.random.binomial",
"argparse.ArgumentParser",
"numpy.random.beta",
"sklearn.linear_model.LogisticRegression",
"pickle.load"
] | [((946, 971), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (969, 971), False, 'import argparse\n'), ((1627, 1641), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1638, 1641), False, 'import pickle\n'), ((2090, 2116), 'pickle.dump', 'pickle.dump', (['classifier', 'f'], {}), '(classifier, f)\n', (2101, 2116), False, 'import pickle\n'), ((1773, 1796), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'C'}), '(C=C)\n', (1791, 1796), False, 'from sklearn.linear_model import LogisticRegression\n'), ((565, 611), 'numpy.random.binomial', 'np.random.binomial', (['(1)', "self.noise_ratio['maj']"], {}), "(1, self.noise_ratio['maj'])\n", (583, 611), True, 'import numpy as np\n'), ((669, 715), 'numpy.random.binomial', 'np.random.binomial', (['(1)', "self.noise_ratio['min']"], {}), "(1, self.noise_ratio['min'])\n", (687, 715), True, 'import numpy as np\n'), ((777, 797), 'numpy.random.beta', 'np.random.beta', (['(1)', '(4)'], {}), '(1, 4)\n', (791, 797), True, 'import numpy as np\n')] |
import numpy as np
from ndsimulator.potentials.potential import Potential
class Flat2d(Potential):
ndim = 2
def compute(self, x=None):
if x is None:
x = self.atoms.positions
return 0, np.zeros(x.shape)
def projection(self, X, Y):
return np.zeros(X.shape)
class Flat1d(Potential):
ndim = 1
def compute(self, x=None):
return 0, np.zeros(1)
def projection(self, X):
return np.zeros(X)
| [
"numpy.zeros"
] | [((289, 306), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (297, 306), True, 'import numpy as np\n'), ((454, 465), 'numpy.zeros', 'np.zeros', (['X'], {}), '(X)\n', (462, 465), True, 'import numpy as np\n'), ((223, 240), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (231, 240), True, 'import numpy as np\n'), ((397, 408), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (405, 408), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.